Merge remote-tracking branch 'spacebeaker/upstream' into master

Change-Id: Ieb5c6acc9bbafad3813c83058a9e40089fee2e3c
diff --git a/.bazelrc b/.bazelrc
index 55bf613..af0b2fd 100644
--- a/.bazelrc
+++ b/.bazelrc
@@ -15,6 +15,18 @@
 #
 # TFLM Bazel configuration file.
 
+# The semver-format version label embedded in build outputs when and where
+# stamping is used. Note TFLM does not currently publish semver-versioned
+# releases; however, this value is used where a version label is required, such
+# as in the Python distribution package.
+build --embed_label=0
+
+# Get stamp values from a script's output
+build --workspace_status_command=./tools/workspace_status.sh
+
+# TODO(b/315853820): Needed for Bazel 7.0, until migrated to bzlmod
+build --noenable_bzlmod
+
 # Use the following C++ standard
 build --cxxopt -std=c++17
 
diff --git a/.bazelversion b/.bazelversion
new file mode 100644
index 0000000..66ce77b
--- /dev/null
+++ b/.bazelversion
@@ -0,0 +1 @@
+7.0.0
diff --git a/.github/mergify.yml b/.github/mergify.yml
index 79b9fc9..4d3b6ad 100644
--- a/.github/mergify.yml
+++ b/.github/mergify.yml
@@ -1,6 +1,7 @@
 queue_rules:
   - name: default
     checks_timeout: 2 h
+    branch_protection_injection_mode: queue
     conditions:
       - base=main
       - label=ci:ready_to_merge
@@ -14,7 +15,6 @@
     actions:
       queue:
         name: default
-        require_branch_protection: true
         method: squash
         commit_message_template: |
           {{ title }} (#{{ number }})
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 8728675..1386751 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -35,7 +35,6 @@
         run: |
           sudo ci/install_bazelisk.sh
           pip3 install Pillow
-          pip3 install Wave
           pip3 install numpy
       - name: Test
         run: |
@@ -56,7 +55,6 @@
         run: |
           sudo ci/install_bazelisk.sh
           pip3 install Pillow
-          pip3 install Wave
           pip3 install numpy
       - name: Test
         run: |
@@ -77,8 +75,12 @@
         run: |
           sudo ci/install_bazelisk.sh
           pip3 install Pillow
-          pip3 install Wave
           pip3 install numpy
+      - name: Fix kernel mmap rnd bits
+        # Asan in llvm 14 provided in ubuntu 22.04 is incompatible with
+        # high-entropy ASLR in much newer kernels that GitHub runners are
+        # using leading to random crashes: https://reviews.llvm.org/D148280
+        run: sudo sysctl vm.mmap_rnd_bits=28
       - name: Test
         run: |
           tensorflow/lite/micro/tools/ci_build/test_bazel_msan.sh
@@ -98,8 +100,12 @@
         run: |
           sudo ci/install_bazelisk.sh
           pip3 install Pillow
-          pip3 install Wave
           pip3 install numpy
+      - name: Fix kernel mmap rnd bits
+        # Asan in llvm 14 provided in ubuntu 22.04 is incompatible with
+        # high-entropy ASLR in much newer kernels that GitHub runners are
+        # using leading to random crashes: https://reviews.llvm.org/D148280
+        run: sudo sysctl vm.mmap_rnd_bits=28
       - name: Test
         run: |
           tensorflow/lite/micro/tools/ci_build/test_bazel_asan.sh
@@ -118,7 +124,6 @@
       - name: Install dependencies
         run: |
           pip3 install Pillow
-          pip3 install Wave
           pip3 install numpy
       - name: Test
         run: |
@@ -139,7 +144,6 @@
       - name: Install dependencies
         run: |
           pip3 install Pillow
-          pip3 install Wave
           pip3 install numpy
       - name: Test
         run: |
@@ -160,7 +164,6 @@
       - name: Install dependencies
         run: |
           pip3 install Pillow
-          pip3 install Wave
           pip3 install numpy
       - name: Test
         run: |
@@ -213,7 +216,6 @@
       - name: Install dependencies
         run: |
           pip3 install Pillow
-          pip3 install Wave
           pip3 install numpy
       - name: Test
         run: |
@@ -234,7 +236,6 @@
       - name: Install dependencies
         run: |
           pip3 install Pillow
-          pip3 install Wave
           pip3 install numpy
       - name: Test
         run: |
@@ -256,7 +257,6 @@
       - name: Install dependencies
         run: |
           pip3 install Pillow
-          pip3 install Wave
           pip3 install numpy
       - name: Test
         run: |
@@ -278,7 +278,6 @@
       - name: Install dependencies
         run: |
           pip3 install Pillow
-          pip3 install Wave
           pip3 install numpy
       - name: Test
         run: |
@@ -300,7 +299,6 @@
       - name: Install dependencies
         run: |
           pip3 install Pillow
-          pip3 install Wave
           pip3 install numpy
       - name: Test
         run: |
diff --git a/.github/workflows/cortex_m.yml b/.github/workflows/cortex_m.yml
index 26fe77d..a1e8168 100644
--- a/.github/workflows/cortex_m.yml
+++ b/.github/workflows/cortex_m.yml
@@ -14,24 +14,40 @@
   # Allow manually triggering of the workflow.
   workflow_dispatch: {}
 
+  pull_request_target:
+    types:
+      - closed
+      - labeled
+
+  workflow_call:
+    inputs:
+      trigger-sha:
+        required: true
+        type: string
+    secrets:
+      tflm-bot-token:
+        required: true
+
 jobs:
   cortex_m_generic:
     runs-on: ubuntu-latest
 
     if: |
       github.event_name == 'workflow_dispatch' ||
-      (github.event_name == 'schedule' && github.repository == 'tensorflow/tflite-micro')
+      (github.event_name == 'schedule' &&
+       github.repository == 'tensorflow/tflite-micro') ||
+      (github.event.action == 'labeled' &&
+       github.event.label.name == 'ci:run_full')
 
     name: Cortex-M Generic
     steps:
       - uses: actions/setup-python@v4
-        with: 
+        with:
           python-version: '3.10'
       - uses: actions/checkout@v2
       - name: Install dependencies
         run: |
           pip3 install Pillow
-          pip3 install Wave
           pip3 install numpy
       - name: Test
         run: |
@@ -42,18 +58,20 @@
 
     if: |
       github.event_name == 'workflow_dispatch' ||
-      (github.event_name == 'schedule' && github.repository == 'tensorflow/tflite-micro')
+      (github.event_name == 'schedule' &&
+       github.repository == 'tensorflow/tflite-micro') ||
+      (github.event.action == 'labeled' &&
+       github.event.label.name == 'ci:run_full')
 
     name: Cortex-M Corstone 300 (FVP)
     steps:
       - uses: actions/setup-python@v4
-        with: 
+        with:
           python-version: '3.10'
       - uses: actions/checkout@v2
       - name: Install dependencies
         run: |
           pip3 install Pillow
-          pip3 install Wave
           pip3 install numpy
       - name: Test
         run: |
diff --git a/.github/workflows/generate_integration_tests.yml b/.github/workflows/generate_integration_tests.yml
index 91c8f18..74ed944 100644
--- a/.github/workflows/generate_integration_tests.yml
+++ b/.github/workflows/generate_integration_tests.yml
@@ -30,7 +30,6 @@
       - name: Install dependencies
         run: |
           pip3 install Pillow
-          pip3 install Wave
           pip3 install numpy
       - name: Test
         run: |
diff --git a/.github/workflows/pypi_build.yml b/.github/workflows/pypi_build.yml
new file mode 100644
index 0000000..52a8075
--- /dev/null
+++ b/.github/workflows/pypi_build.yml
@@ -0,0 +1,54 @@
+# YAML schema for GitHub Actions:
+# https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions
+#
+# Helpful YAML parser to clarify YAML syntax:
+# https://yaml-online-parser.appspot.com/
+#
+
+name: PyPI Build
+
+on:
+  schedule:
+    # 1pm UTC is 6am or 7am PT depending on daylight savings.
+    - cron: '0 13 * * *'
+
+  workflow_dispatch:
+    inputs:
+      upload-type:
+        description: 'Upload type'
+        required: true
+        default: 'pypi'
+        type: choice
+        options:
+          - 'pypi'
+          - 'no upload'
+env:
+  TWINE_PASSWORD: ${{ secrets.PYPI_API_KEY }}
+
+jobs:
+  pypi-build:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v2
+        with:
+          token: ${{ secrets.TFLM_BOT_REPO_TOKEN }}
+      - name: Build Wheel 3.10
+        run: |
+          python/tflite_micro/pypi_build.sh cp310
+      - name: Build Wheel 3.11
+        run: |
+          python/tflite_micro/pypi_build.sh cp311
+      - name: Check Directory Output
+        run: |
+          ls -l bazel-pypi-out
+      - name: Install Twine
+        run: |
+          python -m pip install --upgrade pip setuptools wheel
+          pip install twine
+      - name: upload to pypi
+        if: |
+          github.event.inputs.upload-type == 'pypi' ||
+          github.event_name == 'schedule'
+        run: |
+          python/tflite_micro/pypi_upload.sh \
+          bazel-pypi-out/tflite_micro-*.whl
\ No newline at end of file
diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml
index d505a9d..0c5434e 100644
--- a/.github/workflows/sync.yml
+++ b/.github/workflows/sync.yml
@@ -62,5 +62,5 @@
           author: TFLM-bot <tflm-github-bot@google.com>
           body: "BUG=automated sync from upstream\nNO_CHECK_TFLITE_FILES=automated sync from upstream"
           labels: bot:sync-tf, ci:run
-          reviewers: advaitjain
+          reviewers: rascani
 
diff --git a/.github/workflows/tests_entry.yml b/.github/workflows/tests_entry.yml
index 07cf617..1ed4de1 100644
--- a/.github/workflows/tests_entry.yml
+++ b/.github/workflows/tests_entry.yml
@@ -28,9 +28,10 @@
     runs-on: ubuntu-latest
     steps:
       - name: fail-without-labels
-        if: ${{ !(contains(github.event.pull_request.labels.*.name, 'ci:run') ||
-          contains(github.event.pull_request.labels.*.name, 'ci:ready_to_merge') ||
-          contains(github.event.pull_request.labels.*.name, 'ci:run_full')) }}
+        if: github.event.action == 'labeled' &&
+          !(github.event.label.name == 'ci:run' ||
+            github.event.label.name == 'ci:ready_to_merge' ||
+            github.event.label.name == 'ci:run_full')
         run: exit 1
 
   ci-ready-to-merge:
@@ -60,7 +61,8 @@
     needs: ci-ready-to-merge
     steps:
       - name: remove-cirun
-        if: ${{ contains(github.event.pull_request.labels.*.name, 'ci:run') }}
+        if: github.event.action == 'labeled' &&
+            github.event.label.name == 'ci:run'
         uses: actions/github-script@v5
         with:
           github-token: ${{ secrets.TFLM_BOT_REPO_TOKEN }}
@@ -78,7 +80,8 @@
     needs: ci-run
     steps:
       - name: remove-cirun-full
-        if: ${{ contains(github.event.pull_request.labels.*.name, 'ci:run_full') }}
+        if: github.event.action == 'labeled' &&
+            github.event.label.name == 'ci:run_full'
         uses: actions/github-script@v5
         with:
           github-token: ${{ secrets.TFLM_BOT_REPO_TOKEN }}
@@ -102,7 +105,7 @@
           echo "PR description requires a BUG= line with issue number."
           echo "See https://testing.googleblog.com/2017/09/code-health-providing-context-with.html for additional context"
           exit 1
-  
+
   call-ci:
     needs: ci-run
     uses: ./.github/workflows/ci.yml
@@ -128,7 +131,7 @@
   call-check-tflite-files:
     needs: ci-run
     uses: ./.github/workflows/check_tflite_files.yml
-    with: 
+    with:
       trigger-sha: ${{ github.event.pull_request.head.sha }}
       pr-number: ${{ github.event.pull_request.number }}
       pr-body: ${{ github.event.pull_request.body }}
diff --git a/.github/workflows/tests_post.yml b/.github/workflows/tests_post.yml
index 719adaa..d15b092 100644
--- a/.github/workflows/tests_post.yml
+++ b/.github/workflows/tests_post.yml
@@ -11,46 +11,12 @@
       - labeled
 
 jobs:
-  riscv_postmerge:
-    if: ${{ github.event.pull_request.merged == true ||
-      contains(github.event.pull_request.labels.*.name, 'ci:run_full') }}
-    uses: ./.github/workflows/riscv_postmerge.yml
-    with:
-      trigger-sha: ${{ github.event.pull_request.head.sha }}
-    secrets:
-      tflm-bot-token: ${{ secrets.TFLM_BOT_PACKAGE_READ_TOKEN }}
-
-  xtensa_postmerge:
-    if: ${{ github.event.pull_request.merged == true ||
-            contains(github.event.pull_request.labels.*.name, 'ci:run_full') }}
-    uses: ./.github/workflows/xtensa_postmerge.yml
-    with:
-      trigger-sha: ${{ github.event.pull_request.head.sha }}
-    secrets:
-      tflm-bot-token: ${{ secrets.TFLM_BOT_PACKAGE_READ_TOKEN }}
-
-  issue_on_error:
-    needs: [riscv_postmerge,xtensa_postmerge]
-    if: ${{ always() && contains(needs.*.result, 'failure') &&
-            !contains(github.event.pull_request.labels.*.name, 'ci:run_full') }}
-    uses: ./.github/workflows/issue_on_error.yml
-    with:
-      repo: ${{ github.repository}}
-      workflow: ${{ github.workflow }}
-      run_number: ${{ github.run_number }}
-      run_id: ${{ github.run_id }}
-      flag_label: ci:bot_issue
-      pr_number: ${{ github.event.number }}
-      pr_link: ${{ github.event.pull_request._links.html.href }}
-    secrets:
-      token: ${{ secrets.GITHUB_TOKEN }}
-
   ci_run_full:
-    needs: [issue_on_error]
     runs-on: ubuntu-latest
     steps:
       - name: remove-cirun-full
-        if: ${{ contains(github.event.pull_request.labels.*.name, 'ci:run_full') }}
+        if: github.event.action == 'labeled' &&
+            github.event.label.name == 'ci:run_full'
         uses: actions/github-script@v5
         with:
           github-token: ${{ secrets.TFLM_BOT_REPO_TOKEN }}
@@ -63,3 +29,48 @@
             })
         continue-on-error: true
 
+  riscv_postmerge:
+    needs: [ci_run_full]
+    if: always() && github.event.pull_request.merged == true ||
+        (github.event.action == 'labeled' &&
+         github.event.label.name == 'ci:run_full')
+    uses: ./.github/workflows/riscv_postmerge.yml
+    with:
+      trigger-sha: ${{ github.event.pull_request.head.sha }}
+    secrets:
+      tflm-bot-token: ${{ secrets.TFLM_BOT_PACKAGE_READ_TOKEN }}
+
+  xtensa_postmerge:
+    needs: [ci_run_full]
+    if: always() && github.event.pull_request.merged == true ||
+        (github.event.action == 'labeled' &&
+         github.event.label.name == 'ci:run_full')
+    uses: ./.github/workflows/xtensa_postmerge.yml
+    with:
+      trigger-sha: ${{ github.event.pull_request.head.sha }}
+    secrets:
+      tflm-bot-token: ${{ secrets.TFLM_BOT_PACKAGE_READ_TOKEN }}
+
+  cortex_m_ci_full:
+    needs: [ci_run_full]
+    uses: ./.github/workflows/cortex_m.yml
+    with:
+      trigger-sha: ${{ github.event.pull_request.head.sha }}
+    secrets:
+      tflm-bot-token: ${{ secrets.TFLM_BOT_PACKAGE_READ_TOKEN }}
+
+  issue_on_error:
+    needs: [riscv_postmerge, xtensa_postmerge, cortex_m_ci_full]
+    if: ${{ always() && contains(needs.*.result, 'failure') &&
+            github.event.pull_request.merged == true }}
+    uses: ./.github/workflows/issue_on_error.yml
+    with:
+      repo: ${{ github.repository}}
+      workflow: ${{ github.workflow }}
+      run_number: ${{ github.run_number }}
+      run_id: ${{ github.run_id }}
+      flag_label: ci:bot_issue
+      pr_number: ${{ github.event.number }}
+      pr_link: ${{ github.event.pull_request._links.html.href }}
+    secrets:
+      token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/xtensa_postmerge.yml b/.github/workflows/xtensa_postmerge.yml
index 8e1188e..d91cc94 100644
--- a/.github/workflows/xtensa_postmerge.yml
+++ b/.github/workflows/xtensa_postmerge.yml
@@ -31,7 +31,7 @@
       - run: |
           rm -rf .git
           echo ${{ secrets.tflm-bot-token }} | docker login ghcr.io -u tflm-bot --password-stdin
-          docker run --env XTENSA_TOOLS_VERSION=RI-2020.4-linux --rm -v `pwd`:/opt/tflite-micro ghcr.io/tflm-bot/xtensa_xplorer_13:0.1 \
+          docker run --env XTENSA_TOOLS_VERSION=RI-2020.4-linux --rm -v `pwd`:/opt/tflite-micro ghcr.io/tflm-bot/xtensa_xplorer_13:0.3 \
           /bin/bash -c \
           "cd /opt && tflite-micro/tensorflow/lite/micro/tools/ci_build/test_xtensa_fusion_f1.sh EXTERNAL tflite-micro/"
 
@@ -46,7 +46,7 @@
       - run: |
           rm -rf .git
           echo ${{ secrets.tflm-bot-token }} | docker login ghcr.io -u tflm-bot --password-stdin
-          docker run --env XTENSA_TOOLS_VERSION=RI-2020.4-linux --rm -v `pwd`:/opt/tflite-micro ghcr.io/tflm-bot/xtensa_xplorer_13:0.1 \
+          docker run --env XTENSA_TOOLS_VERSION=RI-2020.4-linux --rm -v `pwd`:/opt/tflite-micro ghcr.io/tflm-bot/xtensa_xplorer_13:0.3 \
           /bin/bash -c \
           "cd /opt && tflite-micro/tensorflow/lite/micro/tools/ci_build/test_xtensa_vision_p6.sh RUN_TESTS tflite-micro/"
 
@@ -61,6 +61,6 @@
       - run: |
           rm -rf .git
           echo ${{ secrets.tflm-bot-token }} | docker login ghcr.io -u tflm-bot --password-stdin
-          docker run --env XTENSA_TOOLS_VERSION=RI-2019.2-linux --rm -v `pwd`:/opt/tflite-micro ghcr.io/tflm-bot/xtensa_xplorer_11:0.1 \
+          docker run --env XTENSA_TOOLS_VERSION=RI-2019.2-linux --rm -v `pwd`:/opt/tflite-micro ghcr.io/tflm-bot/xtensa_xplorer_11:0.2 \
           /bin/bash -c \
           "cd /opt && tflite-micro/tensorflow/lite/micro/tools/ci_build/test_xtensa_hifimini.sh tflite-micro/"
diff --git a/.github/workflows/xtensa_presubmit.yml b/.github/workflows/xtensa_presubmit.yml
index 58c4258..519aff9 100644
--- a/.github/workflows/xtensa_presubmit.yml
+++ b/.github/workflows/xtensa_presubmit.yml
@@ -32,7 +32,7 @@
       - run: |
           rm -rf .git
           echo ${{ secrets.tflm-bot-token }} | docker login ghcr.io -u tflm-bot --password-stdin
-          docker run --env XTENSA_TOOLS_VERSION=RI-2020.4-linux --rm -v `pwd`:/opt/tflite-micro ghcr.io/tflm-bot/xtensa_xplorer_13:0.1 \
+          docker run --env XTENSA_TOOLS_VERSION=RI-2020.4-linux --rm -v `pwd`:/opt/tflite-micro ghcr.io/tflm-bot/xtensa_xplorer_13:0.3 \
           /bin/bash -c \
           "cd /opt && tflite-micro/tensorflow/lite/micro/tools/ci_build/test_xtensa_vision_p6.sh RUN_NO_TESTS tflite-micro/"
 
@@ -47,7 +47,7 @@
       - run: |
           rm -rf .git
           echo ${{ secrets.tflm-bot-token }} | docker login ghcr.io -u tflm-bot --password-stdin
-          docker run --env XTENSA_TOOLS_VERSION=RI-2022.9-linux --rm -v `pwd`:/opt/tflite-micro ghcr.io/tflm-bot/xtensa_hifi5:0.1 \
+          docker run --env XTENSA_TOOLS_VERSION=RI-2022.9-linux --rm -v `pwd`:/opt/tflite-micro ghcr.io/tflm-bot/xtensa_xplorer_hifi5:0.2 \
           /bin/bash -c \
           "cd /opt && tflite-micro/tensorflow/lite/micro/tools/ci_build/test_xtensa_hifi5.sh tflite-micro/"
 
@@ -62,6 +62,6 @@
       - run: |
           rm -rf .git
           echo ${{ secrets.tflm-bot-token }} | docker login ghcr.io -u tflm-bot --password-stdin
-          docker run --env XTENSA_TOOLS_VERSION=RI-2020.4-linux --rm -v `pwd`:/opt/tflite-micro ghcr.io/tflm-bot/xtensa_xplorer_13:0.1 \
+          docker run --env XTENSA_TOOLS_VERSION=RI-2020.4-linux --rm -v `pwd`:/opt/tflite-micro ghcr.io/tflm-bot/xtensa_xplorer_13:0.3 \
           /bin/bash -c \
           "cd /opt && tflite-micro/tensorflow/lite/micro/tools/ci_build/test_xtensa_hifi3z.sh EXTERNAL tflite-micro/"
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index a442570..e1410af 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -4,25 +4,27 @@
 -->
 
 <!--ts-->
-   * [How to Contribute](#how-to-contribute)
-      * [Contributor License Agreement](#contributor-license-agreement)
-      * [Community Guidelines](#community-guidelines)
-   * [Code Contribution Guidelines](#code-contribution-guidelines)
-      * [General Pull Request Guidelines](#general-pull-request-guidelines)
-      * [Guidelines for Specific Contribution Categories](#guidelines-for-specific-contribution-categories)
-         * [Bug Fixes](#bug-fixes)
-         * [Reference Kernel Implementations](#reference-kernel-implementations)
-         * [Optimized Kernel Implementations](#optimized-kernel-implementations)
-         * [New Target / Platform / IDE / Examples](#new-target--platform--ide--examples)
-   * [Development Workflow Notes](#development-workflow-notes)
-      * [Initial Setup](#initial-setup)
-      * [Before submitting your PR](#before-submitting-your-pr)
-      * [During the PR review](#during-the-pr-review)
-      * [Reviewer notes](#reviewer-notes)
-      * [Python notes](#python-notes)
-   * [Continuous Integration System](#continuous-integration-system)
+* [How to Contribute](#how-to-contribute)
+   * [Contributor License Agreement](#contributor-license-agreement)
+   * [Community Guidelines](#community-guidelines)
+* [Code Contribution Guidelines](#code-contribution-guidelines)
+   * [General Pull Request Guidelines](#general-pull-request-guidelines)
+   * [Guidelines for Specific Contribution Categories](#guidelines-for-specific-contribution-categories)
+      * [Bug Fixes](#bug-fixes)
+      * [Reference Kernel Implementations](#reference-kernel-implementations)
+      * [Optimized Kernel Implementations](#optimized-kernel-implementations)
+      * [New Target / Platform / IDE / Examples](#new-target--platform--ide--examples)
+* [Development Environment](#development-environment)
+   * [Prerequisites](#prerequisites)
+   * [Recommendations](#recommendations)
+* [Development Workflow Notes](#development-workflow-notes)
+   * [Before submitting your PR](#before-submitting-your-pr)
+   * [During the PR review](#during-the-pr-review)
+   * [Reviewer notes](#reviewer-notes)
+   * [Python notes](#python-notes)
+* [Continuous Integration System](#continuous-integration-system)
 
-<!-- Added by: advaitjain, at: Thu 16 Sep 2021 11:43:42 AM PDT -->
+<!-- Added by: rkuester, at: Fri Dec 15 04:25:41 PM CST 2023 -->
 
 <!--te-->
 
@@ -143,10 +145,20 @@
 Please see the [new platform support guide](tensorflow/lite/micro/docs/new_platform_support.md)
 for documentation on how to add TFLM support for your particular platform.
 
+# Development Environment
 
-# Development Workflow Notes
+We support amd64-architecture development and testing on Ubuntu 22.04, although
+other OSes may work.
 
-## Initial Setup
+## Prerequisites
+
+TFLM's primary build system is [Bazel](https://bazel.build). Add
+[Bazelisk](https://github.com/bazelbuild/bazelisk) as the `bazel` executable in
+your PATH ([e.g., copy it to `/usr/local/bin/bazel`](ci/install_bazelisk.sh)) to
+automatically download and run the correct Bazel version as specified in
+`//.bazelversion`.
+
+## Recommendations
 
 Below are some tips that might be useful and improve the development experience.
 
@@ -156,7 +168,9 @@
 * Code search the [TfLite Micro codebase](https://sourcegraph.com/github.com/tensorflow/tflite-micro@main)
   on Sourcegraph. And optionally install the [plugin that enables GitHub integration](https://docs.sourcegraph.com/integration/github#github-integration-with-sourcegraph).
 
-* Install [bazel](ci/install_bazelisk.sh) and [buildifier](ci/install_buildifier.sh).
+* Install
+  [Buildifier](https://github.com/bazelbuild/buildtools/blob/master/buildifier/README.md)
+  ([e.g.](ci/install_buildifier.sh)) to format Bazel BUILD and .bzl files.
 
 * Install the latest clang and clang-format. For example, [here](ci/Dockerfile.micro)
   is the what we do for the TFLM continuous integration Docker container.
@@ -164,8 +178,8 @@
 * Get a copy of [cpplint](https://github.com/google/styleguide/tree/gh-pages/cpplint)
   or install it:
 
-* Install Pillow and Wave.  For example, [here](ci/Dockerfile.micro) is what we
-  do for the TFLM continuous integration Docker container.
+* Install Pillow.  For example, [here](ci/Dockerfile.micro) is what we do for
+  the TFLM continuous integration Docker container.
 
   ```
   pip install cpplint
@@ -184,6 +198,8 @@
   cp tensorflow/lite/micro/tools/dev_setup/pre-push.tflm .git/hooks/pre-push
   ```
 
+# Development Workflow Notes
+
 ## Before submitting your PR
 
 1.  Run in-place clang-format on all the files that are modified in your git
diff --git a/WORKSPACE b/WORKSPACE
index f881df9..48202f5 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -15,54 +15,72 @@
 
 workspace(name = "tflite_micro")
 
+load("//tensorflow:workspace.bzl", "workspace")
+
+workspace()
+
 load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
 
 # compile_commands.json generator
 http_archive(
     name = "hedron_compile_commands",
-    url = "https://github.com/hedronvision/bazel-compile-commands-extractor/archive/1266d6a25314d165ca78d0061d3399e909b7920e.tar.gz",
-    strip_prefix = "bazel-compile-commands-extractor-1266d6a25314d165ca78d0061d3399e909b7920e",
     sha256 = "bacabfe758676fdc19e4bea7c4a3ac99c7e7378d259a9f1054d341c6a6b44ff6",
+    strip_prefix = "bazel-compile-commands-extractor-1266d6a25314d165ca78d0061d3399e909b7920e",
+    url = "https://github.com/hedronvision/bazel-compile-commands-extractor/archive/1266d6a25314d165ca78d0061d3399e909b7920e.tar.gz",
 )
+
 load("@hedron_compile_commands//:workspace_setup.bzl", "hedron_compile_commands_setup")
+
 hedron_compile_commands_setup()
 
+_rules_python_version = "0.26.0"
+
 http_archive(
     name = "rules_python",
-    sha256 = "497ca47374f48c8b067d786b512ac10a276211810f4a580178ee9b9ad139323a",
-    strip_prefix = "rules_python-0.16.1",
-    url = "https://github.com/bazelbuild/rules_python/archive/refs/tags/0.16.1.tar.gz",
+    sha256 = "9d04041ac92a0985e344235f5d946f71ac543f1b1565f2cdbc9a2aaee8adf55b",
+    strip_prefix = "rules_python-{}".format(_rules_python_version),
+    url = "https://github.com/bazelbuild/rules_python/archive/refs/tags/{}.tar.gz".format(_rules_python_version),
 )
 
+load("@rules_python//python:repositories.bzl", "py_repositories")
+
+py_repositories()
+
+# Read the Python package dependencies of the build environment. To modify
+# them, see //third_party:python_requirements.in.
 load("@rules_python//python:pip.bzl", "pip_parse")
+
 pip_parse(
     name = "tflm_pip_deps",
     requirements_lock = "//third_party:python_requirements.txt",
 )
 
+# Create repositories for each Python package dependency.
 load("@tflm_pip_deps//:requirements.bzl", "install_deps", "requirement")
+
 install_deps()
 
-load("//tensorflow:workspace.bzl", "workspace")
-workspace()
-
 http_archive(
-  name = "pybind11_bazel",
-  strip_prefix = "pybind11_bazel-faf56fb3df11287f26dbc66fdedf60a2fc2c6631",
-  urls = ["https://github.com/pybind/pybind11_bazel/archive/faf56fb3df11287f26dbc66fdedf60a2fc2c6631.zip"],
-  sha256 = "a185aa68c93b9f62c80fcb3aadc3c83c763854750dc3f38be1dadcb7be223837",
+    name = "pybind11_bazel",
+    sha256 = "a185aa68c93b9f62c80fcb3aadc3c83c763854750dc3f38be1dadcb7be223837",
+    strip_prefix = "pybind11_bazel-faf56fb3df11287f26dbc66fdedf60a2fc2c6631",
+    urls = ["https://github.com/pybind/pybind11_bazel/archive/faf56fb3df11287f26dbc66fdedf60a2fc2c6631.zip"],
 )
 
 http_archive(
-  name = "pybind11",
-  build_file = "@pybind11_bazel//:pybind11.BUILD",
-  strip_prefix = "pybind11-2.10.0",
-  urls = ["https://github.com/pybind/pybind11/archive/refs/tags/v2.10.0.tar.gz"],
-  sha256 = "eacf582fa8f696227988d08cfc46121770823839fe9e301a20fbce67e7cd70ec",
+    name = "pybind11",
+    build_file = "@pybind11_bazel//:pybind11.BUILD",
+    sha256 = "eacf582fa8f696227988d08cfc46121770823839fe9e301a20fbce67e7cd70ec",
+    strip_prefix = "pybind11-2.10.0",
+    urls = ["https://github.com/pybind/pybind11/archive/refs/tags/v2.10.0.tar.gz"],
 )
 
 load("@pybind11_bazel//:python_configure.bzl", "python_configure")
-python_configure(name = "local_config_python", python_version = "3")
+
+python_configure(
+    name = "local_config_python",
+    python_version = "3",
+)
 
 load("//python:py_pkg_cc_deps.bzl", "py_pkg_cc_deps")
 
@@ -76,5 +94,13 @@
     name = "tensorflow_cc_deps",
     includes = ["tensorflow/include"],
     libs = ["tensorflow/libtensorflow_framework.so.2"],
-    pkg = requirement("tensorflow-cpu"),
+    pkg = requirement("tensorflow"),
+)
+
+# Optimized kernel deps
+http_archive(
+    name = "nnlib_hifi4",
+    build_file = "@tflite_micro//third_party/xtensa/nnlib_hifi4:nnlib_hifi4.BUILD",
+    strip_prefix = "nnlib-hifi4-34f5f995f28d298ae2b6e2ba6e76c32a5cb34989",
+    urls = ["https://github.com/foss-xtensa/nnlib-hifi4/archive/34f5f995f28d298ae2b6e2ba6e76c32a5cb34989.zip"],
 )
diff --git a/ci/Dockerfile.micro b/ci/Dockerfile.micro
index 6ce775e..0515cca 100644
--- a/ci/Dockerfile.micro
+++ b/ci/Dockerfile.micro
@@ -59,7 +59,6 @@
 # https://github.com/tensorflow/tflite-micro/pull/337
 # https://github.com/tensorflow/tflite-micro/pull/410
 RUN pip install Pillow
-RUN pip install Wave
 
 # necessary bits for create_size_log scripts
 RUN pip install pandas
diff --git a/ci/Dockerfile.xtensa_xplorer_11 b/ci/Dockerfile.xtensa_xplorer_11
new file mode 100644
index 0000000..fa7f316
--- /dev/null
+++ b/ci/Dockerfile.xtensa_xplorer_11
@@ -0,0 +1,49 @@
+FROM python:3.10-bullseye
+ENV DEBIAN_FRONTEND noninterative
+
+RUN \
+  apt update && \
+  apt install -y \
+    automake \
+    build-essential \
+    curl \
+    git \
+    unzip \
+    wget
+
+WORKDIR /opt/xtensa
+
+COPY ./Xplorer-8.0.11-linux-x64-installer.bin .
+COPY ./mini1m1m_RI_2019_2_linux_w_keys.tgz .
+COPY ./XtensaTools_RI_2022_9_linux.tgz .
+COPY ci/install_cores_xplorer_11.sh .
+COPY ci/install_bazelisk.sh .
+
+RUN \
+  pip3 install Pillow
+
+RUN \
+  pip3 install numpy
+
+RUN \
+  chmod +x Xplorer-8.0.11-linux-x64-installer.bin && \
+  ./Xplorer-8.0.11-linux-x64-installer.bin --prefix /opt/xtensa --mode unattended
+
+ENV XTENSA_BASE "/opt/xtensa/XtDevTools/install/"
+
+RUN \
+  chmod +x install_cores_xplorer_11.sh && \
+  ./install_cores_xplorer_11.sh
+
+RUN ./install_bazelisk.sh
+
+RUN \
+  rm Xplorer-8.0.11-linux-x64-installer.bin && \
+  rm mini1m1m_RI_2019_2_linux_w_keys.tgz && \
+  rm XtensaTools_RI_2022_9_linux.tgz && \
+  rm -f install_cores_xplorer_11.sh
+
+ENV LIC_DIR "/opt/xtensa/licenses/RI-2020.4-linux"
+ENV LM_LICENSE_FILE "/opt/xtensa/licenses/RI-2019.2-linux/mini1m1m_RG/misc/license.dat"
+
+CMD /opt/xtensa/XtDevTools/install/tools/RI-2020.4-linux/XtensaTools/bin/xt-clang++ --xtensa-core=
diff --git a/ci/Dockerfile.xtensa_xplorer_13 b/ci/Dockerfile.xtensa_xplorer_13
new file mode 100644
index 0000000..d50be6c
--- /dev/null
+++ b/ci/Dockerfile.xtensa_xplorer_13
@@ -0,0 +1,51 @@
+FROM python:3.10-bullseye
+ENV DEBIAN_FRONTEND noninterative
+
+RUN \
+  apt update && \
+  apt install -y \
+    automake \
+    build-essential \
+    curl \
+    git \
+    unzip \
+    wget
+
+WORKDIR /opt/xtensa
+
+COPY ./Xplorer-8.0.13-linux-x64-installer.bin .
+COPY ./F1_190305_swupgrade_linux.tgz .
+COPY ./P6_200528_linux.tgz .
+COPY ./HIFI_190304_swupgrade_linux.tgz .
+COPY ci/install_cores_xplorer_13.sh .
+COPY ci/install_bazelisk.sh .
+
+RUN \
+  pip3 install Pillow
+
+RUN \
+  pip3 install numpy
+
+RUN \
+  chmod +x Xplorer-8.0.13-linux-x64-installer.bin && \
+  ./Xplorer-8.0.13-linux-x64-installer.bin --prefix /opt/xtensa --mode unattended
+
+ENV XTENSA_BASE "/opt/xtensa/XtDevTools/install/"
+
+RUN \
+  chmod +x install_cores_xplorer_13.sh && \
+  ./install_cores_xplorer_13.sh
+
+RUN ./install_bazelisk.sh
+
+RUN \
+  rm Xplorer-8.0.13-linux-x64-installer.bin && \
+  rm F1_190305_swupgrade_linux.tgz && \
+  rm P6_200528_linux.tgz && \
+  rm HIFI_190304_swupgrade_linux.tgz && \
+  rm -f install_cores_xplorer_13.sh
+
+ENV LIC_DIR "/opt/xtensa/licenses/RI-2020.4-linux"
+ENV LM_LICENSE_FILE "${LIC_DIR}/F1_190305_swupgrade/misc/license.dat:${LIC_DIR}/AE_HiFi5_LE5_AO_FP_XC/misc/license.dat:${LIC_DIR}/P6_200528/misc/license.dat:${LIC_DIR}/HIFI_190304_swupgrade/misc/license.dat"
+
+CMD /opt/xtensa/XtDevTools/install/tools/RI-2020.4-linux/XtensaTools/bin/xt-clang++ --xtensa-core=
diff --git a/ci/Dockerfile.xtensa_xplorer_solo b/ci/Dockerfile.xtensa_xplorer_solo
new file mode 100644
index 0000000..e7fe205
--- /dev/null
+++ b/ci/Dockerfile.xtensa_xplorer_solo
@@ -0,0 +1,49 @@
+FROM python:3.10-bullseye
+ENV DEBIAN_FRONTEND noninterative
+
+RUN \
+  apt update && \
+  apt install -y \
+    automake \
+    build-essential \
+    curl \
+    git \
+    unzip \
+    wget
+
+WORKDIR /opt/xtensa
+
+COPY ./Xplorer-solo-9.0.19-linux-x64-installer.bin .
+COPY ./PRD_H5_RDO_07_01_2022_linux.tgz .
+COPY ./XtensaTools_RI_2022_9_linux.tgz .
+COPY ci/install_cores_xplorer_solo.sh .
+COPY ci/install_bazelisk.sh .
+
+RUN \
+  pip3 install Pillow
+
+RUN \
+  pip3 install numpy
+
+RUN \
+  chmod +x Xplorer-solo-9.0.19-linux-x64-installer.bin && \
+  ./Xplorer-solo-9.0.19-linux-x64-installer.bin --prefix /opt/xtensa --mode unattended
+
+ENV XTENSA_BASE "/opt/xtensa/XtDevTools/install/"
+
+RUN \
+  chmod +x install_cores_xplorer_solo.sh && \
+  ./install_cores_xplorer_solo.sh
+
+RUN ./install_bazelisk.sh
+
+RUN \
+  rm Xplorer-solo-9.0.19-linux-x64-installer.bin && \
+  rm PRD_H5_RDO_07_01_2022_linux.tgz && \
+  rm XtensaTools_RI_2022_9_linux.tgz && \
+  rm -f install_cores_xplorer_solo.sh
+
+ENV LIC_DIR "/opt/xtensa/licenses/RI-2020.4-linux"
+ENV LM_LICENSE_FILE "/opt/xtensa/licenses/RI-2022.9-linux/PRD_H5_RDO_07_01_2022/misc/license.dat"
+
+CMD /opt/xtensa/XtDevTools/install/tools/RI-2020.4-linux/XtensaTools/bin/xt-clang++ --xtensa-core=
diff --git a/ci/flatbuffers_for_tf_sync/BUILD b/ci/flatbuffers_for_tf_sync/BUILD
deleted file mode 100644
index 82bab3f..0000000
--- a/ci/flatbuffers_for_tf_sync/BUILD
+++ /dev/null
@@ -1 +0,0 @@
-# This empty BUILD file is required to make Bazel treat this directory as a package.
diff --git a/ci/flatbuffers_for_tf_sync/BUILD.system b/ci/flatbuffers_for_tf_sync/BUILD.system
deleted file mode 100644
index 8fe4d7a..0000000
--- a/ci/flatbuffers_for_tf_sync/BUILD.system
+++ /dev/null
@@ -1,43 +0,0 @@
-licenses(["notice"])  # Apache 2.0
-
-filegroup(
-    name = "LICENSE.txt",
-    visibility = ["//visibility:public"],
-)
-
-# Public flatc library to compile flatbuffer files at runtime.
-cc_library(
-    name = "flatbuffers",
-    linkopts = ["-lflatbuffers"],
-    visibility = ["//visibility:public"],
-)
-
-# Public flatc compiler library.
-cc_library(
-    name = "flatc_library",
-    linkopts = ["-lflatbuffers"],
-    visibility = ["//visibility:public"],
-)
-
-genrule(
-    name = "lnflatc",
-    outs = ["flatc.bin"],
-    cmd = "ln -s $$(which flatc) $@",
-)
-
-# Public flatc compiler.
-sh_binary(
-    name = "flatc",
-    srcs = ["flatc.bin"],
-    visibility = ["//visibility:public"],
-)
-
-cc_library(
-    name = "runtime_cc",
-    visibility = ["//visibility:public"],
-)
-
-py_library(
-    name = "runtime_py",
-    visibility = ["//visibility:public"],
-)
diff --git a/ci/flatbuffers_for_tf_sync/build_defs.bzl b/ci/flatbuffers_for_tf_sync/build_defs.bzl
deleted file mode 100644
index eaefb01..0000000
--- a/ci/flatbuffers_for_tf_sync/build_defs.bzl
+++ /dev/null
@@ -1,640 +0,0 @@
-"""BUILD rules for generating flatbuffer files."""
-
-load("@build_bazel_rules_android//android:rules.bzl", "android_library")
-load("@rules_python//python:defs.bzl", "py_library")
-
-flatc_path = "@flatbuffers//:flatc"
-zip_files = "//tensorflow/lite/tools:zip_files"
-
-DEFAULT_INCLUDE_PATHS = [
-    "./",
-    "$(GENDIR)",
-    "$(BINDIR)",
-]
-
-DEFAULT_FLATC_ARGS = [
-    "--no-union-value-namespacing",
-    "--gen-object-api",
-]
-
-def flatbuffer_library_public(
-        name,
-        srcs,
-        outs,
-        language_flag,
-        out_prefix = "",
-        includes = [],
-        include_paths = [],
-        compatible_with = [],
-        flatc_args = DEFAULT_FLATC_ARGS,
-        reflection_name = "",
-        reflection_visibility = None,
-        output_to_bindir = False):
-    """Generates code files for reading/writing the given flatbuffers in the requested language using the public compiler.
-
-    Outs:
-      filegroup(name): all generated source files.
-      Fileset([reflection_name]): (Optional) all generated reflection binaries.
-
-    Args:
-      name: Rule name.
-      srcs: Source .fbs files. Sent in order to the compiler.
-      outs: Output files from flatc.
-      language_flag: Target language flag. One of [-c, -j, -js].
-      out_prefix: Prepend this path to the front of all generated files except on
-          single source targets. Usually is a directory name.
-      includes: Optional, list of filegroups of schemas that the srcs depend on.
-      include_paths: Optional, list of paths the includes files can be found in.
-      compatible_with: Optional, passed to genrule for environments this rule
-          can be built for.
-      flatc_args: Optional, list of additional arguments to pass to flatc.
-      reflection_name: Optional, if set this will generate the flatbuffer
-        reflection binaries for the schemas.
-      reflection_visibility: The visibility of the generated reflection Fileset.
-      output_to_bindir: Passed to genrule for output to bin directory.
-    """
-    include_paths_cmd = ["-I %s" % (s) for s in include_paths]
-
-    # '$(@D)' when given a single source target will give the appropriate
-    # directory. Appending 'out_prefix' is only necessary when given a build
-    # target with multiple sources.
-    output_directory = (
-        ("-o $(@D)/%s" % (out_prefix)) if len(srcs) > 1 else ("-o $(@D)")
-    )
-    genrule_cmd = " ".join([
-        "for f in $(SRCS); do",
-        "$(location %s)" % (flatc_path),
-        " ".join(flatc_args),
-        " ".join(include_paths_cmd),
-        language_flag,
-        output_directory,
-        "$$f;",
-        "done",
-    ])
-    native.genrule(
-        name = name,
-        srcs = srcs,
-        outs = outs,
-        output_to_bindir = output_to_bindir,
-        compatible_with = compatible_with,
-        tools = includes + [flatc_path],
-        cmd = genrule_cmd,
-        message = "Generating flatbuffer files for %s:" % (name),
-    )
-    if reflection_name:
-        reflection_genrule_cmd = " ".join([
-            "for f in $(SRCS); do",
-            "$(location %s)" % (flatc_path),
-            "-b --schema",
-            " ".join(flatc_args),
-            " ".join(include_paths_cmd),
-            language_flag,
-            output_directory,
-            "$$f;",
-            "done",
-        ])
-        reflection_outs = [
-            (out_prefix + "%s.bfbs") % (s.replace(".fbs", "").split("/")[-1])
-            for s in srcs
-        ]
-        native.genrule(
-            name = "%s_srcs" % reflection_name,
-            srcs = srcs,
-            outs = reflection_outs,
-            output_to_bindir = output_to_bindir,
-            compatible_with = compatible_with,
-            tools = includes + [flatc_path],
-            cmd = reflection_genrule_cmd,
-            message = "Generating flatbuffer reflection binary for %s:" % (name),
-        )
-        # TODO(b/114456773): Make bazel rules proper and supported by flatbuffer
-        # Have to comment this since FilesetEntry is not supported in bazel
-        # skylark.
-        # native.Fileset(
-        #     name = reflection_name,
-        #     out = "%s_out" % reflection_name,
-        #     entries = [
-        #         native.FilesetEntry(files = reflection_outs),
-        #     ],
-        #     visibility = reflection_visibility,
-        #     compatible_with = compatible_with,
-        # )
-
-def flatbuffer_cc_library(
-        name,
-        srcs,
-        srcs_filegroup_name = "",
-        out_prefix = "",
-        includes = [],
-        include_paths = [],
-        compatible_with = [],
-        flatc_args = DEFAULT_FLATC_ARGS,
-        visibility = None,
-        srcs_filegroup_visibility = None,
-        gen_reflections = False):
-    '''A cc_library with the generated reader/writers for the given flatbuffer definitions.
-
-    Outs:
-      filegroup([name]_srcs): all generated .h files.
-      filegroup(srcs_filegroup_name if specified, or [name]_includes if not):
-          Other flatbuffer_cc_library's can pass this in for their `includes`
-          parameter, if they depend on the schemas in this library.
-      Fileset([name]_reflection): (Optional) all generated reflection binaries.
-      cc_library([name]): library with sources and flatbuffers deps.
-
-    Remarks:
-      ** Because the genrule used to call flatc does not have any trivial way of
-        computing the output list of files transitively generated by includes and
-        --gen-includes (the default) being defined for flatc, the --gen-includes
-        flag will not work as expected. The way around this is to add a dependency
-        to the flatbuffer_cc_library defined alongside the flatc included Fileset.
-        For example you might define:
-
-        flatbuffer_cc_library(
-            name = "my_fbs",
-            srcs = [ "schemas/foo.fbs" ],
-            includes = [ "//third_party/bazz:bazz_fbs_includes" ],
-        )
-
-        In which foo.fbs includes a few files from the Fileset defined at
-        //third_party/bazz:bazz_fbs_includes. When compiling the library that
-        includes foo_generated.h, and therefore has my_fbs as a dependency, it
-        will fail to find any of the bazz *_generated.h files unless you also
-        add bazz's flatbuffer_cc_library to your own dependency list, e.g.:
-
-        cc_library(
-            name = "my_lib",
-            deps = [
-                ":my_fbs",
-                "//third_party/bazz:bazz_fbs"
-            ],
-        )
-
-        Happy dependent Flatbuffering!
-
-    Args:
-      name: Rule name.
-      srcs: Source .fbs files. Sent in order to the compiler.
-      srcs_filegroup_name: Name of the output filegroup that holds srcs. Pass this
-          filegroup into the `includes` parameter of any other
-          flatbuffer_cc_library that depends on this one's schemas.
-      out_prefix: Prepend this path to the front of all generated files. Usually
-          is a directory name.
-      includes: Optional, list of filegroups of schemas that the srcs depend on.
-          ** SEE REMARKS BELOW **
-      include_paths: Optional, list of paths the includes files can be found in.
-      compatible_with: Optional, passed to genrule for environments this rule
-          can be built for
-      flatc_args: Optional list of additional arguments to pass to flatc
-          (e.g. --gen-mutable).
-      visibility: The visibility of the generated cc_library. By default, use the
-          default visibility of the project.
-      srcs_filegroup_visibility: The visibility of the generated srcs filegroup.
-          By default, use the value of the visibility parameter above.
-      gen_reflections: Optional, if true this will generate the flatbuffer
-        reflection binaries for the schemas.
-    '''
-    output_headers = [
-        (out_prefix + "%s_generated.h") % (s.replace(".fbs", "").split("/")[-1])
-        for s in srcs
-    ]
-    reflection_name = "%s_reflection" % name if gen_reflections else ""
-
-    flatbuffer_library_public(
-        name = "%s_srcs" % (name),
-        srcs = srcs,
-        outs = output_headers,
-        language_flag = "-c",
-        out_prefix = out_prefix,
-        includes = includes,
-        include_paths = include_paths,
-        compatible_with = compatible_with,
-        flatc_args = flatc_args,
-        reflection_name = reflection_name,
-        reflection_visibility = visibility,
-    )
-    native.cc_library(
-        name = name,
-        hdrs = output_headers,
-        srcs = output_headers,
-        features = [
-            "-parse_headers",
-        ],
-        deps = [
-            "@flatbuffers//:runtime_cc",
-        ],
-        includes = ["."],
-        linkstatic = 1,
-        visibility = visibility,
-        compatible_with = compatible_with,
-    )
-
-    # A filegroup for the `srcs`. That is, all the schema files for this
-    # Flatbuffer set.
-    native.filegroup(
-        name = srcs_filegroup_name if srcs_filegroup_name else "%s_includes" % (name),
-        srcs = srcs,
-        visibility = srcs_filegroup_visibility if srcs_filegroup_visibility != None else visibility,
-        compatible_with = compatible_with,
-    )
-
-# Custom provider to track dependencies transitively.
-FlatbufferInfo = provider(
-    fields = {
-        "transitive_srcs": "flatbuffer schema definitions.",
-    },
-)
-
-def _flatbuffer_schemas_aspect_impl(target, ctx):
-    _ignore = [target]
-    transitive_srcs = depset()
-    if hasattr(ctx.rule.attr, "deps"):
-        for dep in ctx.rule.attr.deps:
-            if FlatbufferInfo in dep:
-                transitive_srcs = depset(dep[FlatbufferInfo].transitive_srcs, transitive = [transitive_srcs])
-    if hasattr(ctx.rule.attr, "srcs"):
-        for src in ctx.rule.attr.srcs:
-            if FlatbufferInfo in src:
-                transitive_srcs = depset(src[FlatbufferInfo].transitive_srcs, transitive = [transitive_srcs])
-            for f in src.files:
-                if f.extension == "fbs":
-                    transitive_srcs = depset([f], transitive = [transitive_srcs])
-    return [FlatbufferInfo(transitive_srcs = transitive_srcs)]
-
-# An aspect that runs over all dependencies and transitively collects
-# flatbuffer schema files.
-_flatbuffer_schemas_aspect = aspect(
-    attr_aspects = [
-        "deps",
-        "srcs",
-    ],
-    implementation = _flatbuffer_schemas_aspect_impl,
-)
-
-# Rule to invoke the flatbuffer compiler.
-def _gen_flatbuffer_srcs_impl(ctx):
-    outputs = ctx.attr.outputs
-    include_paths = ctx.attr.include_paths
-    if ctx.attr.no_includes:
-        no_includes_statement = ["--no-includes"]
-    else:
-        no_includes_statement = []
-
-    # Need to generate all files in a directory.
-    if not outputs:
-        outputs = [ctx.actions.declare_directory("{}_all".format(ctx.attr.name))]
-        output_directory = outputs[0].path
-    else:
-        outputs = [ctx.actions.declare_file(output) for output in outputs]
-        output_directory = outputs[0].dirname
-
-    deps = depset(ctx.files.srcs + ctx.files.deps, transitive = [
-        dep[FlatbufferInfo].transitive_srcs
-        for dep in ctx.attr.deps
-        if FlatbufferInfo in dep
-    ])
-
-    include_paths_cmd_line = []
-    for s in include_paths:
-        include_paths_cmd_line.extend(["-I", s])
-
-    for src in ctx.files.srcs:
-        ctx.actions.run(
-            inputs = deps,
-            outputs = outputs,
-            executable = ctx.executable._flatc,
-            arguments = [
-                            ctx.attr.language_flag,
-                            "-o",
-                            output_directory,
-                            # Allow for absolute imports and referencing of generated files.
-                            "-I",
-                            "./",
-                            "-I",
-                            ctx.genfiles_dir.path,
-                            "-I",
-                            ctx.bin_dir.path,
-                        ] + no_includes_statement +
-                        include_paths_cmd_line + [
-                "--no-union-value-namespacing",
-                "--gen-object-api",
-                src.path,
-            ],
-            progress_message = "Generating flatbuffer files for {}:".format(src),
-            use_default_shell_env = True,
-        )
-    return [
-        DefaultInfo(files = depset(outputs)),
-    ]
-
-_gen_flatbuffer_srcs = rule(
-    _gen_flatbuffer_srcs_impl,
-    attrs = {
-        "srcs": attr.label_list(
-            allow_files = [".fbs"],
-            mandatory = True,
-        ),
-        "outputs": attr.string_list(
-            default = [],
-            mandatory = False,
-        ),
-        "deps": attr.label_list(
-            default = [],
-            mandatory = False,
-            aspects = [_flatbuffer_schemas_aspect],
-        ),
-        "include_paths": attr.string_list(
-            default = [],
-            mandatory = False,
-        ),
-        "language_flag": attr.string(
-            mandatory = True,
-        ),
-        "no_includes": attr.bool(
-            default = False,
-            mandatory = False,
-        ),
-        "_flatc": attr.label(
-            default = Label("@flatbuffers//:flatc"),
-            executable = True,
-            cfg = "exec",
-        ),
-    },
-    output_to_genfiles = True,
-)
-
-def flatbuffer_py_strip_prefix_srcs(name, srcs = [], strip_prefix = ""):
-    """Strips path prefix.
-
-    Args:
-      name: Rule name. (required)
-      srcs: Source .py files. (required)
-      strip_prefix: Path that needs to be stripped from the srcs filepaths. (required)
-    """
-    for src in srcs:
-        native.genrule(
-            name = name + "_" + src.replace(".", "_").replace("/", "_"),
-            srcs = [src],
-            outs = [src.replace(strip_prefix, "")],
-            cmd = "cp $< $@",
-        )
-
-def _concat_flatbuffer_py_srcs_impl(ctx):
-    # Merge all generated python files. The files are concatenated and import
-    # statements are removed. Finally we import the flatbuffer runtime library.
-    # IMPORTANT: Our Windows shell does not support "find ... -exec" properly.
-    # If you're changing the commandline below, please build wheels and run smoke
-    # tests on all the three operating systems.
-    command = "echo 'import flatbuffers\n' > %s; "
-    command += "for f in $(find %s -name '*.py' | sort); do cat $f | sed '/import flatbuffers/d' >> %s; done "
-    ctx.actions.run_shell(
-        inputs = ctx.attr.deps[0].files,
-        outputs = [ctx.outputs.out],
-        command = command % (
-            ctx.outputs.out.path,
-            ctx.attr.deps[0].files.to_list()[0].path,
-            ctx.outputs.out.path,
-        ),
-        use_default_shell_env = True,
-    )
-
-_concat_flatbuffer_py_srcs = rule(
-    _concat_flatbuffer_py_srcs_impl,
-    attrs = {
-        "deps": attr.label_list(mandatory = True),
-    },
-    output_to_genfiles = True,
-    outputs = {"out": "%{name}.py"},
-)
-
-def flatbuffer_py_library(
-        name,
-        srcs,
-        deps = [],
-        include_paths = []):
-    """A py_library with the generated reader/writers for the given schema.
-
-    This rule assumes that the schema files define non-conflicting names, so that
-    they can be merged in a single file. This is e.g. the case if only a single
-    namespace is used.
-    The rule call the flatbuffer compiler for all schema files and merges the
-    generated python files into a single file that is wrapped in a py_library.
-
-    Args:
-      name: Rule name. (required)
-      srcs: List of source .fbs files. (required)
-      deps: List of dependencies.
-      include_paths: Optional, list of paths the includes files can be found in.
-    """
-    all_srcs = "{}_srcs".format(name)
-    _gen_flatbuffer_srcs(
-        name = all_srcs,
-        srcs = srcs,
-        language_flag = "--python",
-        deps = deps,
-        include_paths = include_paths,
-    )
-    all_srcs_no_include = "{}_srcs_no_include".format(name)
-    _gen_flatbuffer_srcs(
-        name = all_srcs_no_include,
-        srcs = srcs,
-        language_flag = "--python",
-        deps = deps,
-        no_includes = True,
-        include_paths = include_paths,
-    )
-    concat_py_srcs = "{}_generated".format(name)
-    _concat_flatbuffer_py_srcs(
-        name = concat_py_srcs,
-        deps = [
-            ":{}".format(all_srcs_no_include),
-        ],
-    )
-    py_library(
-        name = name,
-        srcs = [
-            ":{}".format(concat_py_srcs),
-        ],
-        srcs_version = "PY3",
-        deps = deps + [
-            "@flatbuffers//:runtime_py",
-        ],
-    )
-
-def flatbuffer_java_library(
-        name,
-        srcs,
-        custom_package = "",
-        package_prefix = "",
-        include_paths = DEFAULT_INCLUDE_PATHS,
-        flatc_args = DEFAULT_FLATC_ARGS,
-        visibility = None):
-    """A java library with the generated reader/writers for the given flatbuffer definitions.
-
-    Args:
-      name: Rule name. (required)
-      srcs: List of source .fbs files including all includes. (required)
-      custom_package: Package name of generated Java files. If not specified
-          namespace in the schema files will be used. (optional)
-      package_prefix: like custom_package, but prefixes to the existing
-          namespace. (optional)
-      include_paths: List of paths that includes files can be found in. (optional)
-      flatc_args: List of additional arguments to pass to flatc. (optional)
-      visibility: Visibility setting for the java_library rule. (optional)
-    """
-    out_srcjar = "java_%s_all.srcjar" % name
-    flatbuffer_java_srcjar(
-        name = "%s_srcjar" % name,
-        srcs = srcs,
-        out = out_srcjar,
-        custom_package = custom_package,
-        flatc_args = flatc_args,
-        include_paths = include_paths,
-        package_prefix = package_prefix,
-    )
-
-    native.filegroup(
-        name = "%s.srcjar" % name,
-        srcs = [out_srcjar],
-    )
-
-    native.java_library(
-        name = name,
-        srcs = [out_srcjar],
-        javacopts = ["-source 7 -target 7"],
-        deps = [
-            "@flatbuffers//:runtime_java",
-        ],
-        visibility = visibility,
-    )
-
-def flatbuffer_java_srcjar(
-        name,
-        srcs,
-        out,
-        custom_package = "",
-        package_prefix = "",
-        include_paths = DEFAULT_INCLUDE_PATHS,
-        flatc_args = DEFAULT_FLATC_ARGS):
-    """Generate flatbuffer Java source files.
-
-    Args:
-      name: Rule name. (required)
-      srcs: List of source .fbs files including all includes. (required)
-      out: Output file name. (required)
-      custom_package: Package name of generated Java files. If not specified
-          namespace in the schema files will be used. (optional)
-      package_prefix: like custom_package, but prefixes to the existing
-          namespace. (optional)
-      include_paths: List of paths that includes files can be found in. (optional)
-      flatc_args: List of additional arguments to pass to flatc. (optional)
-    """
-    command_fmt = """set -e
-      tmpdir=$(@D)
-      schemas=$$tmpdir/schemas
-      java_root=$$tmpdir/java
-      rm -rf $$schemas
-      rm -rf $$java_root
-      mkdir -p $$schemas
-      mkdir -p $$java_root
-
-      for src in $(SRCS); do
-        dest=$$schemas/$$src
-        rm -rf $$(dirname $$dest)
-        mkdir -p $$(dirname $$dest)
-        if [ -z "{custom_package}" ] && [ -z "{package_prefix}" ]; then
-          cp -f $$src $$dest
-        else
-          if [ -z "{package_prefix}" ]; then
-            sed -e "s/namespace\\s.*/namespace {custom_package};/" $$src > $$dest
-          else
-            sed -e "s/namespace \\([^;]\\+\\);/namespace {package_prefix}.\\1;/" $$src > $$dest
-          fi
-        fi
-      done
-
-      flatc_arg_I="-I $$tmpdir/schemas"
-      for include_path in {include_paths}; do
-        flatc_arg_I="$$flatc_arg_I -I $$schemas/$$include_path"
-      done
-
-      flatc_additional_args=
-      for arg in {flatc_args}; do
-        flatc_additional_args="$$flatc_additional_args $$arg"
-      done
-
-      for src in $(SRCS); do
-        $(location {flatc_path}) $$flatc_arg_I --java $$flatc_additional_args -o $$java_root  $$schemas/$$src
-      done
-
-      $(location {zip_files}) -export_zip_path=$@ -file_directory=$$java_root
-      """
-    genrule_cmd = command_fmt.format(
-        package_name = native.package_name(),
-        custom_package = custom_package,
-        package_prefix = package_prefix,
-        flatc_path = flatc_path,
-        zip_files = zip_files,
-        include_paths = " ".join(include_paths),
-        flatc_args = " ".join(flatc_args),
-    )
-
-    native.genrule(
-        name = name,
-        srcs = srcs,
-        outs = [out],
-        tools = [flatc_path, zip_files],
-        cmd = genrule_cmd,
-    )
-
-def flatbuffer_android_library(
-        name,
-        srcs,
-        custom_package = "",
-        package_prefix = "",
-        include_paths = DEFAULT_INCLUDE_PATHS,
-        flatc_args = DEFAULT_FLATC_ARGS,
-        visibility = None):
-    """An android_library with the generated reader/writers for the given flatbuffer definitions.
-
-    Args:
-      name: Rule name. (required)
-      srcs: List of source .fbs files including all includes. (required)
-      custom_package: Package name of generated Java files. If not specified
-          namespace in the schema files will be used. (optional)
-      package_prefix: like custom_package, but prefixes to the existing
-          namespace. (optional)
-      include_paths: List of paths that includes files can be found in. (optional)
-      flatc_args: List of additional arguments to pass to flatc. (optional)
-      visibility: Visibility setting for the android_library rule. (optional)
-    """
-    out_srcjar = "android_%s_all.srcjar" % name
-    flatbuffer_java_srcjar(
-        name = "%s_srcjar" % name,
-        srcs = srcs,
-        out = out_srcjar,
-        custom_package = custom_package,
-        flatc_args = flatc_args,
-        include_paths = include_paths,
-        package_prefix = package_prefix,
-    )
-
-    native.filegroup(
-        name = "%s.srcjar" % name,
-        srcs = [out_srcjar],
-    )
-
-    # To support org.checkerframework.dataflow.qual.Pure.
-    checkerframework_annotations = [
-        "@org_checkerframework_qual",
-    ] if "--java-checkerframework" in flatc_args else []
-
-    android_library(
-        name = name,
-        srcs = [out_srcjar],
-        javacopts = ["-source 7 -target 7"],
-        visibility = visibility,
-        deps = [
-            "@flatbuffers//:runtime_android",
-        ] + checkerframework_annotations,
-    )
diff --git a/ci/flatbuffers_for_tf_sync/flatbuffers.BUILD b/ci/flatbuffers_for_tf_sync/flatbuffers.BUILD
deleted file mode 100644
index 108c0cd..0000000
--- a/ci/flatbuffers_for_tf_sync/flatbuffers.BUILD
+++ /dev/null
@@ -1,156 +0,0 @@
-load("@build_bazel_rules_android//android:rules.bzl", "android_library")
-load(":build_defs.bzl", "flatbuffer_py_strip_prefix_srcs")
-
-package(default_visibility = ["//visibility:public"])
-
-licenses(["notice"])  # Apache 2.0
-
-exports_files(["LICENSE.txt"])
-
-licenses(["notice"])
-
-config_setting(
-    name = "freebsd",
-    values = {"cpu": "freebsd"},
-)
-
-config_setting(
-    name = "windows",
-    values = {"cpu": "x64_windows"},
-)
-
-load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library")
-
-# Public flatc library to compile flatbuffer files at runtime.
-cc_library(
-    name = "flatbuffers",
-    hdrs = ["//:public_headers"],
-    linkstatic = 1,
-    strip_include_prefix = "/include",
-    visibility = ["//visibility:public"],
-    deps = ["//src:flatbuffers"],
-)
-
-# Public C++ headers for the Flatbuffers library.
-filegroup(
-    name = "public_headers",
-    srcs = [
-        "include/flatbuffers/base.h",
-        "include/flatbuffers/code_generators.h",
-        "include/flatbuffers/flatbuffers.h",
-        "include/flatbuffers/flexbuffers.h",
-        "include/flatbuffers/hash.h",
-        "include/flatbuffers/idl.h",
-        "include/flatbuffers/minireflect.h",
-        "include/flatbuffers/reflection.h",
-        "include/flatbuffers/reflection_generated.h",
-        "include/flatbuffers/registry.h",
-        "include/flatbuffers/stl_emulation.h",
-        "include/flatbuffers/util.h",
-    ],
-    visibility = ["//:__subpackages__"],
-)
-
-# Public flatc compiler library.
-cc_library(
-    name = "flatc_library",
-    linkstatic = 1,
-    visibility = ["//visibility:public"],
-    deps = [
-        "@flatbuffers//src:flatc_library",
-    ],
-)
-
-# Public flatc compiler.
-cc_binary(
-    name = "flatc",
-    linkopts = select({
-        ":freebsd": [
-            "-lm",
-        ],
-        ":windows": [],
-        "//conditions:default": [
-            "-lm",
-            "-ldl",
-        ],
-    }),
-    visibility = ["//visibility:public"],
-    deps = [
-        "@flatbuffers//src:flatc",
-    ],
-)
-
-filegroup(
-    name = "flatc_headers",
-    srcs = [
-        "include/flatbuffers/flatc.h",
-    ],
-    visibility = ["//:__subpackages__"],
-)
-
-# Library used by flatbuffer_cc_library rules.
-cc_library(
-    name = "runtime_cc",
-    hdrs = [
-        "include/flatbuffers/base.h",
-        "include/flatbuffers/flatbuffers.h",
-        "include/flatbuffers/flexbuffers.h",
-        "include/flatbuffers/stl_emulation.h",
-        "include/flatbuffers/util.h",
-    ],
-    linkstatic = 1,
-    strip_include_prefix = "/include",
-    visibility = ["//visibility:public"],
-)
-
-flatbuffer_py_strip_prefix_srcs(
-    name = "flatbuffer_py_strip_prefix",
-    srcs = [
-        "python/flatbuffers/__init__.py",
-        "python/flatbuffers/builder.py",
-        "python/flatbuffers/compat.py",
-        "python/flatbuffers/encode.py",
-        "python/flatbuffers/number_types.py",
-        "python/flatbuffers/packer.py",
-        "python/flatbuffers/table.py",
-        "python/flatbuffers/util.py",
-    ],
-    strip_prefix = "python/flatbuffers/",
-)
-
-filegroup(
-    name = "runtime_py_srcs",
-    srcs = [
-        "__init__.py",
-        "builder.py",
-        "compat.py",
-        "encode.py",
-        "number_types.py",
-        "packer.py",
-        "table.py",
-        "util.py",
-    ],
-)
-
-py_library(
-    name = "runtime_py",
-    srcs = [":runtime_py_srcs"],
-    visibility = ["//visibility:public"],
-)
-
-filegroup(
-    name = "runtime_java_srcs",
-    srcs = glob(["java/com/google/flatbuffers/**/*.java"]),
-)
-
-java_library(
-    name = "runtime_java",
-    srcs = [":runtime_java_srcs"],
-    visibility = ["//visibility:public"],
-)
-
-android_library(
-    name = "runtime_android",
-    srcs = [":runtime_java_srcs"],
-    visibility = ["//visibility:public"],
-)
diff --git a/ci/flatbuffers_for_tf_sync/workspace.bzl b/ci/flatbuffers_for_tf_sync/workspace.bzl
deleted file mode 100644
index 59c1fd9..0000000
--- a/ci/flatbuffers_for_tf_sync/workspace.bzl
+++ /dev/null
@@ -1,16 +0,0 @@
-"""Loads the Flatbuffers library, used by TF Lite."""
-
-load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
-
-def repo():
-    tf_http_archive(
-        name = "flatbuffers",
-        strip_prefix = "flatbuffers-1.12.0",
-        sha256 = "62f2223fb9181d1d6338451375628975775f7522185266cd5296571ac152bc45",
-        urls = tf_mirror_urls("https://github.com/google/flatbuffers/archive/v1.12.0.tar.gz"),
-        build_file = "//third_party/flatbuffers:flatbuffers.BUILD",
-        system_build_file = "//third_party/flatbuffers:BUILD.system",
-        link_files = {
-            "//third_party/flatbuffers:build_defs.bzl": "build_defs.bzl",
-        },
-    )
diff --git a/ci/install_bazelisk.sh b/ci/install_bazelisk.sh
index d2f8a13..db385e6 100755
--- a/ci/install_bazelisk.sh
+++ b/ci/install_bazelisk.sh
@@ -18,5 +18,5 @@
 wget https://github.com/bazelbuild/bazelisk/releases/download/v1.16.0/bazelisk-linux-amd64
 mv bazelisk-linux-amd64 bazel
 chmod +x bazel
-sudo mv bazel /usr/local/bin
+mv bazel /usr/local/bin
 
diff --git a/ci/install_cores_xplorer_11.sh b/ci/install_cores_xplorer_11.sh
new file mode 100755
index 0000000..1df00cb
--- /dev/null
+++ b/ci/install_cores_xplorer_11.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+mkdir /opt/xtensa/licenses
+
+mkdir -p /opt/xtensa/XtDevTools/install/tools/
+tar xvzf XtensaTools_RI_2022_9_linux.tgz --dir /opt/xtensa/XtDevTools/install/tools/
+
+
+###########
+#  Hifimini
+###########
+cd /opt/xtensa/
+tar xvzf mini1m1m_RI_2019_2_linux_w_keys.tgz --dir /opt/xtensa/licenses/
+cd /opt/xtensa/licenses/RI-2019.2-linux/mini1m1m_RG/
+
+./install --xtensa-tools \
+  /opt/xtensa/XtDevTools/install/tools/RI-2019.2-linux/XtensaTools/ \
+  --no-default \
+  --no-replace
diff --git a/ci/install_cores_xplorer_13.sh b/ci/install_cores_xplorer_13.sh
new file mode 100755
index 0000000..a24b1f3
--- /dev/null
+++ b/ci/install_cores_xplorer_13.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+mkdir /opt/xtensa/licenses
+
+##############
+#  Fusion F1
+##############
+cd /opt/xtensa/
+tar xvzf F1_190305_swupgrade_linux.tgz --dir /opt/xtensa/licenses/
+cd /opt/xtensa/licenses/RI-2020.4-linux/F1_190305_swupgrade/
+
+./install --xtensa-tools \
+  /opt/xtensa/XtDevTools/install/tools/RI-2020.4-linux/XtensaTools/ \
+  --no-default \
+  --no-replace
+
+##############
+#  Vision P6
+##############
+cd /opt/xtensa/
+tar xvzf P6_200528_linux.tgz --dir /opt/xtensa/licenses/
+cd /opt/xtensa/licenses/RI-2020.4-linux/P6_200528/
+
+./install --xtensa-tools \
+  /opt/xtensa/XtDevTools/install/tools/RI-2020.4-linux/XtensaTools/ \
+  --no-default \
+  --no-replace
+
+##############
+#  Hifi3Z
+##############
+cd /opt/xtensa/
+tar xvzf HIFI_190304_swupgrade_linux.tgz --dir /opt/xtensa/licenses/
+cd /opt/xtensa/licenses/RI-2020.4-linux/HIFI_190304_swupgrade/
+
+./install --xtensa-tools \
+  /opt/xtensa/XtDevTools/install/tools/RI-2020.4-linux/XtensaTools/ \
+  --no-default \
+  --no-replace
diff --git a/ci/install_cores_xplorer_solo.sh b/ci/install_cores_xplorer_solo.sh
new file mode 100755
index 0000000..559c67a
--- /dev/null
+++ b/ci/install_cores_xplorer_solo.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+mkdir /opt/xtensa/licenses
+
+mkdir -p /opt/xtensa/XtDevTools/install/tools/
+tar xvzf XtensaTools_RI_2022_9_linux.tgz --dir /opt/xtensa/XtDevTools/install/tools/
+
+
+###########
+#  Hifi5
+###########
+cd /opt/xtensa/
+tar xvzf PRD_H5_RDO_07_01_2022_linux.tgz --dir /opt/xtensa/licenses/
+cd /opt/xtensa/licenses/RI-2022.9-linux/PRD_H5_RDO_07_01_2022/
+
+./install --xtensa-tools \
+  /opt/xtensa/XtDevTools/install/tools/RI-2022.9-linux/XtensaTools/ \
+  --no-default \
+  --no-replace
diff --git a/ci/sync_from_upstream_tf.sh b/ci/sync_from_upstream_tf.sh
index 094df65..bb5d098 100755
--- a/ci/sync_from_upstream_tf.sh
+++ b/ci/sync_from_upstream_tf.sh
@@ -47,6 +47,7 @@
 git checkout tensorflow/lite/kernels/internal/optimized/neon_check.h
 # http://b/149862813
 git checkout tensorflow/lite/kernels/internal/runtime_shape.h
+git checkout tensorflow/lite/kernels/internal/runtime_shape.cc
 # http://b/187728891
 git checkout tensorflow/lite/kernels/op_macros.h
 # http://b/242077843
@@ -57,8 +58,8 @@
 bazel build tensorflow/lite/python:schema_py
 /bin/cp bazel-bin/tensorflow/lite/python/schema_py_generated.py tensorflow/lite/python/schema_py_generated.py
 
-bazel build tensorflow/lite/schema:schema_fbs_srcs
-/bin/cp ./bazel-bin/tensorflow/lite/schema/schema_generated.h tensorflow/lite/schema/schema_generated.h
+bazel build tensorflow/compiler/mlir/lite/schema:schema_fbs_srcs
+/bin/cp ./bazel-bin/tensorflow/compiler/mlir/lite/schema/schema_generated.h tensorflow/lite/schema/schema_generated.h
 
 # Must clean the bazel directories out after building as we don't check these in.
 bazel clean
diff --git a/ci/tflite_files.txt b/ci/tflite_files.txt
index 51381ce..6514137 100644
--- a/ci/tflite_files.txt
+++ b/ci/tflite_files.txt
@@ -1,3 +1,6 @@
+tensorflow/compiler/mlir/lite/schema/schema.fbs
+tensorflow/compiler/mlir/lite/schema/schema_utils.h
+tensorflow/compiler/mlir/lite/schema/schema_utils.cc
 tensorflow/lite/array.h
 tensorflow/lite/array.cc
 tensorflow/lite/builtin_op_data.h
@@ -12,7 +15,6 @@
 tensorflow/lite/kernels/internal/tensor_ctypes.cc
 tensorflow/lite/kernels/internal/reference/comparisons.cc
 tensorflow/lite/kernels/kernel_util.cc
-tensorflow/lite/schema/schema_utils.cc
 tensorflow/lite/c/builtin_op_data.h
 tensorflow/lite/c/c_api_types.h
 tensorflow/lite/c/common.h
@@ -108,7 +110,6 @@
 tensorflow/lite/portable_type_to_tflitetype.h
 tensorflow/lite/python/schema_util.py
 tensorflow/lite/schema/schema_utils.h
-tensorflow/lite/schema/schema.fbs
 tensorflow/lite/tools/flatbuffer_utils.py
 tensorflow/lite/tools/flatbuffer_utils_test.py
 tensorflow/lite/tools/randomize_weights.py
diff --git a/codegen/BUILD b/codegen/BUILD
new file mode 100644
index 0000000..ae62c04
--- /dev/null
+++ b/codegen/BUILD
@@ -0,0 +1,71 @@
+load("@rules_python//python:defs.bzl", "py_binary", "py_library")
+load("@tflm_pip_deps//:requirements.bzl", "requirement")
+
+package(
+    default_visibility = ["//:__subpackages__"],
+    licenses = ["notice"],
+)
+
+py_library(
+    name = "graph",
+    srcs = [
+        "graph.py",
+    ],
+    deps = [
+        ":tensor",
+        ":utils",
+        "//codegen/operators:factory",
+        "//codegen/operators:operator",
+        "//tensorflow/lite/python:schema_py",
+        "//tensorflow/lite/tools:visualize",
+    ],
+)
+
+py_library(
+    name = "inference_generator",
+    srcs = [
+        "inference_generator.py",
+    ],
+    data = [
+        "templates/inference.cc.mako",
+        "templates/inference.h.mako",
+    ],
+    deps = [
+        ":graph",
+        requirement("mako"),
+    ],
+)
+
+py_library(
+    name = "tensor",
+    srcs = [
+        "tensor.py",
+    ],
+    deps = [
+        ":utils",
+        "//tensorflow/lite/python:schema_py",
+    ],
+)
+
+py_library(
+    name = "utils",
+    srcs = [
+        "utils.py",
+    ],
+)
+
+py_binary(
+    name = "code_generator",
+    srcs = [
+        "code_generator.py",
+    ],
+    python_version = "PY3",
+    srcs_version = "PY3",
+    deps = [
+        ":graph",
+        ":inference_generator",
+        "//tensorflow/lite/tools:flatbuffer_utils",
+        "@absl_py//absl:app",
+        "@absl_py//absl/flags",
+    ],
+)
diff --git a/codegen/README.md b/codegen/README.md
new file mode 100644
index 0000000..ff7e9d2
--- /dev/null
+++ b/codegen/README.md
@@ -0,0 +1,3 @@
+# TFLM Code Generator
+
+This is a work in progress experiment. It is not ready for use.
diff --git a/codegen/build_def.bzl b/codegen/build_def.bzl
new file mode 100644
index 0000000..28b6232
--- /dev/null
+++ b/codegen/build_def.bzl
@@ -0,0 +1,44 @@
+""" Build rule for generating ML inference code from TFLite model. """
+
+load("//tensorflow/lite/micro:build_def.bzl", "micro_copts")
+
+def tflm_inference_library(
+        name,
+        tflite_model,
+        visibility = None):
+    """Creates a C++ library capable of performing ML inference of the provided
+    model.
+
+    Args:
+      name: Target name.
+      tflite_model: TFLite Model to generate inference from.
+      visibility: Visibility for the C++ library.
+    """
+    generated_target = name + "_gen"
+    native.genrule(
+        name = generated_target,
+        srcs = [tflite_model],
+        outs = [name + ".h", name + ".cc"],
+        tools = ["//codegen:code_generator"],
+        cmd = "$(location //codegen:code_generator) " +
+              "--model=$< --output_dir=$(RULEDIR) --output_name=%s" % name,
+        visibility = ["//visibility:private"],
+    )
+
+    native.cc_library(
+        name = name,
+        hdrs = [name + ".h"],
+        srcs = [name + ".cc"],
+        deps = [
+            generated_target,
+            "//codegen/runtime:micro_codegen_context",
+            "//tensorflow/lite/c:common",
+            "//tensorflow/lite/c:c_api_types",
+            "//tensorflow/lite/kernels/internal:compatibility",
+            "//tensorflow/lite/micro/kernels:micro_ops",
+            "//tensorflow/lite/micro:micro_common",
+            "//tensorflow/lite/micro:micro_context",
+        ],
+        copts = micro_copts(),
+        visibility = visibility,
+    )
diff --git a/codegen/code_generator.py b/codegen/code_generator.py
new file mode 100644
index 0000000..91cab73
--- /dev/null
+++ b/codegen/code_generator.py
@@ -0,0 +1,66 @@
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+""" Generates C/C++ source code capable of performing inference for a model. """
+
+import os
+
+from absl import app
+from absl import flags
+from collections.abc import Sequence
+
+from tflite_micro.codegen import inference_generator
+from tflite_micro.codegen import graph
+from tflite_micro.tensorflow.lite.tools import flatbuffer_utils
+
+# Usage information:
+# Default:
+#   `bazel run codegen:code_generator -- \
+#        --model=</path/to/my_model.tflite>
+# Output will be located at: /path/to/my_model.h|cc
+
+_MODEL_PATH = flags.DEFINE_string(name="model",
+                                  default=None,
+                                  help="Path to the TFLite model file.",
+                                  required=True)
+
+_OUTPUT_DIR = flags.DEFINE_string(
+    name="output_dir",
+    default=None,
+    help="Path to write generated source to. Leave blank to use 'model' path.",
+    required=False)
+
+_OUTPUT_NAME = flags.DEFINE_string(
+    name="output_name",
+    default=None,
+    help=("The output basename for the generated .h/.cc. Leave blank to use "
+          "'model' basename."),
+    required=False)
+
+
+def main(argv: Sequence[str]) -> None:
+  output_dir = _OUTPUT_DIR.value or os.path.dirname(_MODEL_PATH.value)
+  output_name = _OUTPUT_NAME.value or os.path.splitext(
+      os.path.basename(_MODEL_PATH.value))[0]
+
+  model = flatbuffer_utils.read_model(_MODEL_PATH.value)
+
+  print("Generating inference code for model: {}".format(_MODEL_PATH.value))
+
+  inference_generator.generate(output_dir, output_name,
+                               graph.OpCodeTable([model]), graph.Graph(model))
+
+
+if __name__ == "__main__":
+  app.run(main)
diff --git a/codegen/examples/hello_world/BUILD b/codegen/examples/hello_world/BUILD
new file mode 100644
index 0000000..04425cb
--- /dev/null
+++ b/codegen/examples/hello_world/BUILD
@@ -0,0 +1,17 @@
+load("//codegen:build_def.bzl", "tflm_inference_library")
+
+package(default_visibility = ["//visibility:public"])
+
+tflm_inference_library(
+    name = "hello_world_model",
+    tflite_model = "//tensorflow/lite/micro/examples/hello_world/models:hello_world_int8.tflite",
+)
+
+cc_binary(
+    name = "hello_world",
+    srcs = ["hello_world.cc"],
+    deps = [
+        ":hello_world_model",
+        "//tensorflow/lite/c:c_api_types",
+    ],
+)
diff --git a/codegen/examples/hello_world/README.md b/codegen/examples/hello_world/README.md
new file mode 100644
index 0000000..62afee5
--- /dev/null
+++ b/codegen/examples/hello_world/README.md
@@ -0,0 +1,27 @@
+# Codegen Hello World Example
+
+This is a code-generated example of the hello world model. The generated source
+is checked in for now so that it can be reviewed during the prototyping stage.
+
+## Building the example executable
+Please note that this will execute Bazel from make as part of the process.
+
+```
+bazel build //codegen/examples/hello_world:hello_world
+```
+
+## Running the example
+
+TODO(rjascani): The command works, but it'll just crash as we don't have all of
+the data structures fully populated yet.
+
+```
+bazel run //codegen/examples/hello_world:hello_world
+```
+
+## Updating the generated sources
+To update the generated source, you can execute this make target:
+
+```
+./codegen/examples/hello_world/update_example_source.sh
+```
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.cc b/codegen/examples/hello_world/hello_world.cc
similarity index 62%
copy from tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.cc
copy to codegen/examples/hello_world/hello_world.cc
index e2cf661..70d665b 100644
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.cc
+++ b/codegen/examples/hello_world/hello_world.cc
@@ -1,4 +1,4 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -13,11 +13,16 @@
 limitations under the License.
 ==============================================================================*/
 
-#include "tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.h"
+#include "codegen/examples/hello_world/hello_world_model.h"
+#include "tensorflow/lite/c/c_api_types.h"
 
-const char* kCategoryLabels[kCategoryCount] = {
-    "silence",
-    "unknown",
-    "yes",
-    "no",
-};
+int main(int argc, char** argv) {
+  hello_world_model::Model hello_world{};
+
+  TfLiteStatus status = hello_world.Invoke();
+  if (status != kTfLiteOk) {
+    return -1;
+  }
+
+  return 0;
+}
diff --git a/codegen/examples/hello_world/hello_world_model.cc b/codegen/examples/hello_world/hello_world_model.cc
new file mode 100644
index 0000000..7d8290c
--- /dev/null
+++ b/codegen/examples/hello_world/hello_world_model.cc
@@ -0,0 +1,316 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+/* AUTOMATICALLY GENERATED DO NOT MODIFY */
+
+#include "hello_world_model.h"
+
+#include "codegen/runtime/micro_codegen_context.h"
+#include "tensorflow/lite/c/builtin_op_data.h"
+#include "tensorflow/lite/c/c_api_types.h"
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/kernels/internal/compatibility.h"
+#include "tensorflow/lite/micro/kernels/micro_ops.h"
+#include "tensorflow/lite/micro/micro_common.h"
+#include "tensorflow/lite/micro/micro_context.h"
+
+namespace hello_world_model {
+namespace {
+// TODO(rjascani): We should probably split out the OpTable to a separate file
+// once we start generating for multiple models.
+enum OpCode { kFullyConnected, kCount };
+
+TFLMInferenceRegistration op_table[OpCode::kCount] = {
+    tflite::RegisterInference_FULLY_CONNECTED(),
+};
+
+// buffer_1 is located in the arena
+
+alignas(16) uint8_t buffer_2[4] = {
+    0xAD,
+    0x01,
+    0x00,
+    0x00,
+};
+
+alignas(16) uint8_t buffer_3[16] = {
+    0xD9, 0x3B, 0x27, 0x15, 0x1C, 0xE0, 0xDE, 0xDD,
+    0x0F, 0x1B, 0xC5, 0xD7, 0x12, 0xDD, 0xF9, 0x7F,
+};
+
+alignas(16) uint8_t buffer_4[64] = {
+    0x27, 0xFD, 0xFF, 0xFF, 0xA2, 0x07, 0x00, 0x00, 0x62, 0x02, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0xF1, 0x00, 0x00, 0x00, 0x29, 0xFE,
+    0xFF, 0xFF, 0xDD, 0xFF, 0xFF, 0xFF, 0x9D, 0xFC, 0xFF, 0xFF, 0x3B,
+    0x02, 0x00, 0x00, 0x45, 0x02, 0x00, 0x00, 0xA4, 0x10, 0x00, 0x00,
+    0x67, 0x0F, 0x00, 0x00, 0x4F, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x87, 0xFC, 0xFF, 0xFF, 0x11, 0xEC, 0xFF, 0xFF,
+};
+
+alignas(16) uint8_t buffer_5[256] = {
+    0xF4, 0x1A, 0xED, 0x09, 0x19, 0x21, 0xF4, 0x24, 0xE0, 0x21, 0xEF, 0xBC,
+    0xF7, 0xF5, 0xFA, 0x19, 0x03, 0xDC, 0xD2, 0x02, 0x06, 0xF9, 0xF4, 0x02,
+    0xFF, 0xFA, 0xEF, 0xF1, 0xEF, 0xD3, 0x27, 0xE1, 0xFB, 0x27, 0xDD, 0xEB,
+    0xDB, 0xE4, 0x05, 0x1A, 0x17, 0xFC, 0x24, 0x12, 0x15, 0xEF, 0x1E, 0xE4,
+    0x10, 0xFE, 0x14, 0xDA, 0x1C, 0xF8, 0xF3, 0xF1, 0xEF, 0xE2, 0xF3, 0x09,
+    0xE3, 0xE9, 0xED, 0xE3, 0xE4, 0x15, 0x07, 0x0B, 0x04, 0x1B, 0x1A, 0xFE,
+    0xEB, 0x01, 0xDE, 0x21, 0xE6, 0x0B, 0xEC, 0x03, 0x23, 0x0A, 0x22, 0x24,
+    0x1E, 0x27, 0x03, 0xE6, 0x03, 0x24, 0xFF, 0xC0, 0x11, 0xF8, 0xFC, 0xF1,
+    0x11, 0x0C, 0xF5, 0xE0, 0xF3, 0x07, 0x17, 0xE5, 0xE8, 0xED, 0xFA, 0xDC,
+    0xE8, 0x23, 0xFB, 0x07, 0xDD, 0xFB, 0xFD, 0x00, 0x14, 0x26, 0x11, 0x17,
+    0xE7, 0xF1, 0x11, 0xEA, 0x02, 0x26, 0x04, 0x04, 0x25, 0x21, 0x1D, 0x0A,
+    0xDB, 0x1D, 0xDC, 0x20, 0x01, 0xFA, 0xE3, 0x37, 0x0B, 0xF1, 0x1A, 0x16,
+    0xEF, 0x1C, 0xE7, 0x03, 0xE0, 0x16, 0x02, 0x03, 0x21, 0x18, 0x09, 0x2E,
+    0xD9, 0xE5, 0x14, 0x0B, 0xEA, 0x1A, 0xFC, 0xD8, 0x13, 0x00, 0xC4, 0xD8,
+    0xEC, 0xD9, 0xFE, 0x0D, 0x19, 0x20, 0xD8, 0xD6, 0xE2, 0x1F, 0xE9, 0xD7,
+    0xCA, 0xE2, 0xDD, 0xC6, 0x13, 0xE7, 0x04, 0x3E, 0x00, 0x01, 0x14, 0xC7,
+    0xDB, 0xE7, 0x15, 0x15, 0xF5, 0x06, 0xD6, 0x1A, 0xDC, 0x09, 0x22, 0xFE,
+    0x08, 0x02, 0x13, 0xEF, 0x19, 0x1E, 0xE2, 0x09, 0xFD, 0xF3, 0x14, 0xDD,
+    0xDA, 0x20, 0xD9, 0x0F, 0xE3, 0xF9, 0xF7, 0xEE, 0xE9, 0x24, 0xE6, 0x29,
+    0x00, 0x07, 0x16, 0xE2, 0x1E, 0x0D, 0x23, 0xD3, 0xDD, 0xF7, 0x14, 0xFA,
+    0x08, 0x22, 0x26, 0x21, 0x09, 0x08, 0x0F, 0x0B, 0xE0, 0x12, 0xF4, 0x7F,
+    0xDC, 0x58, 0xE5, 0x26,
+};
+
+alignas(16) uint8_t buffer_6[64] = {
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC2, 0xEA, 0xFF,
+    0xFF, 0x75, 0xEA, 0xFF, 0xFF, 0xB8, 0xFA, 0xFF, 0xFF, 0x24, 0xFA,
+    0xFF, 0xFF, 0xC8, 0xEF, 0xFF, 0xFF, 0xAC, 0xFF, 0xFF, 0xFF, 0x44,
+    0x0D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xBD, 0x07, 0x00, 0x00,
+    0x33, 0xEA, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0xCC, 0xE4, 0xFF,
+    0xFF, 0x4F, 0x0D, 0x00, 0x00, 0xCF, 0xE3, 0xFF, 0xFF,
+};
+
+alignas(16) uint8_t buffer_7[16] = {
+    0xF7, 0xCA, 0x39, 0x47, 0x68, 0x73, 0x62, 0x63,
+    0x40, 0xE6, 0x7F, 0x19, 0xAE, 0x44, 0x5F, 0x56,
+};
+
+// buffer_8 is located in the arena
+
+// buffer_9 is located in the arena
+
+// buffer_10 is located in the arena
+
+constexpr size_t kSubgraph0Inputs[1] = {0};
+
+constexpr size_t kSubgraph0Outputs[1] = {9};
+
+struct Node0_0 {
+  struct Inputs {
+    int size = 3;
+    int data[3] = {0, 6, 5};
+  } inputs;
+  struct Outputs {
+    int size = 1;
+    int data[1] = {7};
+  } outputs;
+  // No intermediates
+  TfLiteFullyConnectedParams builtin_data = {
+      .activation = kTfLiteActRelu,
+      .weights_format = kTfLiteFullyConnectedWeightsFormatDefault,
+      .keep_num_dims = false,
+      .asymmetric_quantize_inputs = false,
+      .quantized_bias_type = kTfLiteNoType};
+} node_0_0;
+
+struct Node0_1 {
+  struct Inputs {
+    int size = 3;
+    int data[3] = {7, 4, 3};
+  } inputs;
+  struct Outputs {
+    int size = 1;
+    int data[1] = {8};
+  } outputs;
+  // No intermediates
+  TfLiteFullyConnectedParams builtin_data = {
+      .activation = kTfLiteActRelu,
+      .weights_format = kTfLiteFullyConnectedWeightsFormatDefault,
+      .keep_num_dims = false,
+      .asymmetric_quantize_inputs = false,
+      .quantized_bias_type = kTfLiteNoType};
+} node_0_1;
+
+struct Node0_2 {
+  struct Inputs {
+    int size = 3;
+    int data[3] = {8, 2, 1};
+  } inputs;
+  struct Outputs {
+    int size = 1;
+    int data[1] = {9};
+  } outputs;
+  // No intermediates
+  TfLiteFullyConnectedParams builtin_data = {
+      .activation = kTfLiteActNone,
+      .weights_format = kTfLiteFullyConnectedWeightsFormatDefault,
+      .keep_num_dims = false,
+      .asymmetric_quantize_inputs = false,
+      .quantized_bias_type = kTfLiteNoType};
+} node_0_2;
+
+struct Tensor0_0Dims {
+  int size = 2;
+  int data[2] = {1, 1};
+} tensor0_0_dims;
+
+struct Tensor0_1Dims {
+  int size = 1;
+  int data[1] = {1};
+} tensor0_1_dims;
+
+struct Tensor0_2Dims {
+  int size = 2;
+  int data[2] = {1, 16};
+} tensor0_2_dims;
+
+struct Tensor0_3Dims {
+  int size = 1;
+  int data[1] = {16};
+} tensor0_3_dims;
+
+struct Tensor0_4Dims {
+  int size = 2;
+  int data[2] = {16, 16};
+} tensor0_4_dims;
+
+struct Tensor0_5Dims {
+  int size = 1;
+  int data[1] = {16};
+} tensor0_5_dims;
+
+struct Tensor0_6Dims {
+  int size = 2;
+  int data[2] = {16, 1};
+} tensor0_6_dims;
+
+struct Tensor0_7Dims {
+  int size = 2;
+  int data[2] = {1, 16};
+} tensor0_7_dims;
+
+struct Tensor0_8Dims {
+  int size = 2;
+  int data[2] = {1, 16};
+} tensor0_8_dims;
+
+struct Tensor0_9Dims {
+  int size = 2;
+  int data[2] = {1, 1};
+} tensor0_9_dims;
+
+TfLiteStatus InvokeSubgraph0(TfLiteContext* context,
+                             tflite::Span<TfLiteNode> nodes) {
+  TFLITE_DCHECK(nodes.size() == 3);
+  TF_LITE_ENSURE_OK(
+      context, op_table[OpCode::kFullyConnected].invoke(context, &nodes[0]));
+  TF_LITE_ENSURE_OK(
+      context, op_table[OpCode::kFullyConnected].invoke(context, &nodes[1]));
+  TF_LITE_ENSURE_OK(
+      context, op_table[OpCode::kFullyConnected].invoke(context, &nodes[2]));
+
+  return kTfLiteOk;
+}
+
+}  // namespace
+
+Model::Model()
+  : subgraphs_{
+      {.inputs = {&kSubgraph0Inputs[0], 1},
+       .outputs = {&kSubgraph0Outputs[0], 1},
+       .nodes = {&subgraph0_nodes_[0], 3},
+       .tensors = {&subgraph0_tensors_[0], 10},
+       .invoke = &InvokeSubgraph0},
+    },
+    micro_context_{&context_, {&subgraphs_[0], 1}} {
+  context_.impl_ = static_cast<void*>(&micro_context_);
+  context_.ReportError = nullptr;
+  context_.GetTensor = nullptr;
+  context_.GetEvalTensor = tflite::MicroContextGetEvalTensor;
+  context_.profiler = nullptr;
+  context_.GetExternalContext = nullptr;
+  context_.GetScratchBuffer = nullptr;
+
+  subgraph0_nodes_[0] = TfLiteNode{
+      .inputs = reinterpret_cast<TfLiteIntArray*>(&node_0_0.inputs),
+      .outputs = reinterpret_cast<TfLiteIntArray*>(&node_0_0.outputs),
+      .intermediates = nullptr,
+      .user_data = nullptr,
+      .builtin_data = static_cast<void*>(&node_0_0.builtin_data),
+      .custom_initial_data = nullptr,
+      .custom_initial_data_size = 0};
+  subgraph0_nodes_[1] = TfLiteNode{
+      .inputs = reinterpret_cast<TfLiteIntArray*>(&node_0_1.inputs),
+      .outputs = reinterpret_cast<TfLiteIntArray*>(&node_0_1.outputs),
+      .intermediates = nullptr,
+      .user_data = nullptr,
+      .builtin_data = static_cast<void*>(&node_0_1.builtin_data),
+      .custom_initial_data = nullptr,
+      .custom_initial_data_size = 0};
+  subgraph0_nodes_[2] = TfLiteNode{
+      .inputs = reinterpret_cast<TfLiteIntArray*>(&node_0_2.inputs),
+      .outputs = reinterpret_cast<TfLiteIntArray*>(&node_0_2.outputs),
+      .intermediates = nullptr,
+      .user_data = nullptr,
+      .builtin_data = static_cast<void*>(&node_0_2.builtin_data),
+      .custom_initial_data = nullptr,
+      .custom_initial_data_size = 0};
+
+  subgraph0_tensors_[0] = TfLiteEvalTensor{
+      .data = {.data = static_cast<void*>(nullptr /* buffer_1 */)},
+      .dims = reinterpret_cast<TfLiteIntArray*>(&tensor0_0_dims),
+      .type = kTfLiteInt8};
+  subgraph0_tensors_[1] = TfLiteEvalTensor{
+      .data = {.data = static_cast<void*>(&buffer_2)},
+      .dims = reinterpret_cast<TfLiteIntArray*>(&tensor0_1_dims),
+      .type = kTfLiteInt32};
+  subgraph0_tensors_[2] = TfLiteEvalTensor{
+      .data = {.data = static_cast<void*>(&buffer_3)},
+      .dims = reinterpret_cast<TfLiteIntArray*>(&tensor0_2_dims),
+      .type = kTfLiteInt8};
+  subgraph0_tensors_[3] = TfLiteEvalTensor{
+      .data = {.data = static_cast<void*>(&buffer_4)},
+      .dims = reinterpret_cast<TfLiteIntArray*>(&tensor0_3_dims),
+      .type = kTfLiteInt32};
+  subgraph0_tensors_[4] = TfLiteEvalTensor{
+      .data = {.data = static_cast<void*>(&buffer_5)},
+      .dims = reinterpret_cast<TfLiteIntArray*>(&tensor0_4_dims),
+      .type = kTfLiteInt8};
+  subgraph0_tensors_[5] = TfLiteEvalTensor{
+      .data = {.data = static_cast<void*>(&buffer_6)},
+      .dims = reinterpret_cast<TfLiteIntArray*>(&tensor0_5_dims),
+      .type = kTfLiteInt32};
+  subgraph0_tensors_[6] = TfLiteEvalTensor{
+      .data = {.data = static_cast<void*>(&buffer_7)},
+      .dims = reinterpret_cast<TfLiteIntArray*>(&tensor0_6_dims),
+      .type = kTfLiteInt8};
+  subgraph0_tensors_[7] = TfLiteEvalTensor{
+      .data = {.data = static_cast<void*>(nullptr /* buffer_8 */)},
+      .dims = reinterpret_cast<TfLiteIntArray*>(&tensor0_7_dims),
+      .type = kTfLiteInt8};
+  subgraph0_tensors_[8] = TfLiteEvalTensor{
+      .data = {.data = static_cast<void*>(nullptr /* buffer_9 */)},
+      .dims = reinterpret_cast<TfLiteIntArray*>(&tensor0_8_dims),
+      .type = kTfLiteInt8};
+  subgraph0_tensors_[9] = TfLiteEvalTensor{
+      .data = {.data = static_cast<void*>(nullptr /* buffer_10 */)},
+      .dims = reinterpret_cast<TfLiteIntArray*>(&tensor0_9_dims),
+      .type = kTfLiteInt8};
+}
+
+TfLiteStatus Model::Invoke() { return micro_context_.InvokeSubgraph(0); }
+
+}  // namespace hello_world_model
diff --git a/codegen/examples/hello_world/hello_world_model.h b/codegen/examples/hello_world/hello_world_model.h
new file mode 100644
index 0000000..80cfe2c
--- /dev/null
+++ b/codegen/examples/hello_world/hello_world_model.h
@@ -0,0 +1,40 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+/* AUTOMATICALLY GENERATED DO NOT MODIFY */
+
+#pragma once
+
+#include "codegen/runtime/micro_codegen_context.h"
+#include "tensorflow/lite/c/c_api_types.h"
+#include "tensorflow/lite/c/common.h"
+
+namespace hello_world_model {
+
+class Model {
+ public:
+  Model();
+
+  TfLiteStatus Invoke();
+
+ private:
+  TfLiteContext context_ = {};
+  tflite::Subgraph subgraphs_[1];
+  tflite::MicroCodegenContext micro_context_;
+  TfLiteNode subgraph0_nodes_[3] = {};
+  TfLiteEvalTensor subgraph0_tensors_[10] = {};
+};
+
+}  // namespace hello_world_model
diff --git a/codegen/examples/hello_world/update_example_source.sh b/codegen/examples/hello_world/update_example_source.sh
new file mode 100755
index 0000000..a381fed
--- /dev/null
+++ b/codegen/examples/hello_world/update_example_source.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+#
+# Syncs the generated example source code in the repository.
+#
+
+set -e
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+ROOT_DIR=${SCRIPT_DIR}/../../..
+cd "${ROOT_DIR}"
+
+bazel build //codegen/examples/hello_world:hello_world_model
+cp ./bazel-bin/codegen/examples/hello_world/hello_world_model.h ${SCRIPT_DIR}
+cp ./bazel-bin/codegen/examples/hello_world/hello_world_model.cc ${SCRIPT_DIR}
+clang-format --style=google -i \
+  ${SCRIPT_DIR}/hello_world_model.h \
+  ${SCRIPT_DIR}/hello_world_model.cc
diff --git a/codegen/graph.py b/codegen/graph.py
new file mode 100644
index 0000000..ad5a700
--- /dev/null
+++ b/codegen/graph.py
@@ -0,0 +1,262 @@
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+""" Provides object representation for the model that is conducive to code 
+    generation using templates. """
+
+from typing import Dict, List, Optional, Sequence
+import string
+import textwrap
+
+from tflite_micro.codegen.operators import factory
+from tflite_micro.codegen.operators import operator
+from tflite_micro.codegen import tensor
+from tflite_micro.codegen import utils
+from tflite_micro.tensorflow.lite.python import schema_py_generated as schema_fb
+from tflite_micro.tensorflow.lite.tools import visualize
+
+
+class OpCode(object):
+
+  def __init__(self, op_code: schema_fb.OperatorCodeT):
+    self._op_code: schema_fb.OperatorCodeT = op_code
+
+  @property
+  def name(self) -> str:
+    if self._op_code.customCode:
+      return self._op_code.customCode
+    return visualize.BuiltinCodeToName(self._op_code.builtinCode)
+
+  @property
+  def register_function(self) -> str:
+    return "tflite::RegisterInference_{}".format(self.name)
+
+  @property
+  def enum_name(self) -> str:
+    return "k{}".format(utils.to_pascal_case(self.name))
+
+  @property
+  def full_enum_name(self) -> str:
+    return "OpCode::" + self.enum_name
+
+
+class Subgraph(object):
+
+  def __init__(self, model: schema_fb.ModelT, buffers: Sequence[tensor.Buffer],
+               subgraph_idx: int, subgraph: schema_fb.SubGraphT):
+    self._subgraph_idx: int = subgraph_idx
+    self._subgraph: schema_fb.SubGraphT = subgraph
+    self._op_codes: List[OpCode] = [
+        OpCode(op_code) for op_code in model.operatorCodes
+    ]
+    self._tensors: List[Tensor] = []
+    for t in subgraph.tensors:
+      self._tensors.append(tensor.Tensor(buffers[t.buffer], t))
+
+    self._operators: List[operator.Operator] = []
+    for op in subgraph.operators:
+      op_code = model.operatorCodes[op.opcodeIndex]
+      self._operators.append(factory.create_operator(op_code, op))
+
+  @property
+  def index(self) -> int:
+    return self._subgraph_idx
+
+  @property
+  def inputs(self) -> Sequence[int]:
+    return self._subgraph.inputs
+
+  @property
+  def outputs(self) -> Sequence[int]:
+    return self._subgraph.outputs
+
+  @property
+  def operators(self) -> Sequence[operator.Operator]:
+    return self._operators
+
+  @property
+  def tensors(self) -> Sequence[tensor.Tensor]:
+    return self._tensors
+
+  @property
+  def needs_zero_length_int_array(self) -> bool:
+    return any(t.needs_zero_length_int_array for t in self.tensors)
+
+  @property
+  def invoke_fn_name(self) -> str:
+    return f"InvokeSubgraph{self.index}"
+
+  @property
+  def inputs_array_name(self) -> str:
+    return f"kSubgraph{self.index}Inputs"
+
+  @property
+  def outputs_array_name(self) -> str:
+    return f"kSubgraph{self.index}Outputs"
+
+  @property
+  def nodes_array(self) -> str:
+    return f"subgraph{self.index}_nodes_"
+
+  def nodes_element(self, operator_idx: int) -> str:
+    return self.nodes_array + f"[{operator_idx}]"
+
+  def node_data_type(self, operator_idx: int) -> str:
+    return f"Node{self.index}_{operator_idx}"
+
+  def node_data_name(self, operator_idx: int) -> str:
+    return f"node_{self.index}_{operator_idx}"
+
+  def generate_c_node_data(self, indent: str) -> str:
+    node_data_strs: List[str] = []
+    for op_idx, op in enumerate(self.operators):
+      type_name = self.node_data_type(op_idx)
+      node_name = self.node_data_name(op_idx)
+      node_data_strs.append(op.generate_c_node_data(type_name, node_name))
+    return textwrap.indent("\n\n".join(node_data_strs), indent)
+
+  def generate_c_node_init(self, indent: str) -> str:
+    node_init_strs: List[str] = []
+    for op_idx, op in enumerate(self.operators):
+      tflite_node_name = self.nodes_element(op_idx)
+      node_data_name = self.node_data_name(op_idx)
+      node_init_strs.append(
+          op.generate_c_node_init(tflite_node_name, node_data_name))
+    return textwrap.indent("\n".join(node_init_strs), indent)
+
+  def generate_c_invoke(self, indent: str) -> str:
+    function_template = string.Template(
+        "TfLiteStatus ${function_name}(TfLiteContext* context,\n"
+        "                             tflite::Span<TfLiteNode> nodes) {\n"
+        "  TFLITE_DCHECK(nodes.size() == ${num_nodes});\n"
+        "${body}\n"
+        "  return kTfLiteOk;\n"
+        "}")
+
+    body_template = string.Template(
+        "  TF_LITE_ENSURE_OK(\n"
+        "      context, op_table[${op_code}].invoke(context, &${node}));\n")
+    invoke_strs: List[str] = []
+    for op_idx, op in enumerate(self.operators):
+      invoke_strs.append(
+          body_template.substitute(
+              op_code=self._op_codes[op.op_code_index].full_enum_name,
+              node=f"nodes[{op_idx}]"))
+
+    invoke = function_template.substitute(function_name=self.invoke_fn_name,
+                                          num_nodes=len(self.operators),
+                                          body="".join(invoke_strs))
+    return textwrap.indent(invoke, indent)
+
+  def generate_c_input_array(self, indent: str) -> str:
+    return utils.generate_c_int_array(indent, "size_t", self.inputs_array_name,
+                                      self.inputs)
+
+  def generate_c_output_array(self, indent: str) -> str:
+    return utils.generate_c_int_array(indent, "size_t",
+                                      self.outputs_array_name, self.outputs)
+
+  def generate_c_subgraph_init(self, indent: str) -> str:
+    init_template = string.Template(
+        "{.inputs = {&${input_array}[0], ${input_size}},\n"
+        " .outputs = {&${output_array}[0], ${output_size}},\n"
+        " .nodes = {&${node_array}[0], ${node_size}},\n"
+        " .tensors = {&${tensor_array}[0], ${tensor_size}},\n"
+        " .invoke = &${invoke}},")
+    return textwrap.indent(
+        init_template.substitute(input_array=self.inputs_array_name,
+                                 input_size=len(self.inputs),
+                                 output_array=self.outputs_array_name,
+                                 output_size=len(self.outputs),
+                                 node_array=self.nodes_array,
+                                 node_size=len(self.operators),
+                                 tensor_array=self.tensors_array,
+                                 tensor_size=len(self.tensors),
+                                 invoke=self.invoke_fn_name), indent)
+
+  @property
+  def tensors_array(self) -> str:
+    return f"subgraph{self.index}_tensors_"
+
+  def tensors_element(self, tensor_idx: int) -> str:
+    return self.tensors_array + f"[{tensor_idx}]"
+
+  def tensor_data_type(self, tensor_idx: int) -> str:
+    return f"Tensor{self.index}_{tensor_idx}"
+
+  def tensor_data_name(self, tensor_idx: int) -> str:
+    return f"tensor{self.index}_{tensor_idx}"
+
+  def generate_c_tensor_data(self, indent: str) -> str:
+    tensor_dims_strs: List[str] = []
+    for tensor_idx, tensor in enumerate(self.tensors):
+      type_name = self.tensor_data_type(tensor_idx)
+      tensor_name = self.tensor_data_name(tensor_idx)
+      tensor_dims_strs.append(
+          tensor.generate_c_tensor_dims(type_name, tensor_name))
+    return textwrap.indent("\n\n".join(tensor_dims_strs), indent)
+
+  def generate_c_tensor_init(self, indent: str) -> str:
+    tensor_init_strs: List[str] = []
+    for tensor_idx, tensor in enumerate(self.tensors):
+      tflite_tensor_name = self.tensors_element(tensor_idx)
+      tensor_data_name = self.tensor_data_name(tensor_idx)
+      tensor_init_strs.append(
+          tensor.generate_c_tensor_init(tflite_tensor_name, tensor_data_name))
+    return textwrap.indent("\n".join(tensor_init_strs), indent)
+
+
+class Graph(object):
+
+  def __init__(self, model: schema_fb.ModelT):
+    buffers: List[tensor.Buffer] = [
+        tensor.Buffer("buffer_{}".format(idx), buffer)
+        for idx, buffer in enumerate(model.buffers)
+    ]
+    self._subgraphs: List[SubGraph] = [
+        Subgraph(model, buffers, idx, subgraph)
+        for idx, subgraph in enumerate(model.subgraphs)
+    ]
+
+  @property
+  def subgraphs(self) -> Sequence[Subgraph]:
+    return self._subgraphs
+
+  @property
+  def buffers(self) -> Sequence[tensor.Buffer]:
+    buffers: List[tensor.Buffer] = []
+    for subgraph in self.subgraphs:
+      for t in subgraph.tensors:
+        buffers.append(t.buffer)
+    return buffers
+
+  @property
+  def needs_zero_length_int_array(self) -> bool:
+    return any(subgraph.needs_zero_length_int_array
+               for subgraph in self.subgraphs)
+
+
+class OpCodeTable(object):
+
+  def __init__(self, models: Sequence[schema_fb.ModelT]):
+    op_codes = []
+    for model in models:
+      for op_code in model.operatorCodes:
+        op_codes.append(OpCode(op_code))
+
+    self._op_codes: List([OpCode]) = list(set(op_codes))
+
+  @property
+  def op_codes(self) -> Sequence[OpCode]:
+    return self._op_codes
diff --git a/codegen/inference_generator.py b/codegen/inference_generator.py
new file mode 100644
index 0000000..fe351f3
--- /dev/null
+++ b/codegen/inference_generator.py
@@ -0,0 +1,68 @@
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+""" Generates C/C++ inference source code. """
+
+import pathlib
+
+from mako import template
+from typing import TypedDict
+
+from tflite_micro.codegen import graph
+
+_TEMPLATE_DIR = pathlib.Path(__file__).parent / 'templates'
+_HEADER_TEMPLATE = _TEMPLATE_DIR / 'inference.h.mako'
+_SOURCE_TEMPLATE = _TEMPLATE_DIR / 'inference.cc.mako'
+
+
+class ModelData(TypedDict):
+  header_file: str
+  model_name: str
+  op_code_table: graph.OpCodeTable
+  graph: graph.Graph
+
+
+def _render(output_file: pathlib.Path, template_file: pathlib.Path,
+            model_data: ModelData) -> None:
+  print("Generating {}".format(output_file))
+  t = template.Template(filename=str(template_file))
+  with output_file.open('w+') as file:
+    file.write(t.render(**model_data))
+
+
+def _generate_header(header_path: pathlib.Path, model_data: ModelData) -> None:
+  _render(header_path, _HEADER_TEMPLATE, model_data)
+
+
+def _generate_source(source_path: pathlib.Path, model_data: ModelData) -> None:
+  _render(source_path, _SOURCE_TEMPLATE, model_data)
+
+
+def generate(output_dir: str, output_name: str,
+             op_code_table: graph.OpCodeTable, graph: graph.Graph) -> None:
+  """ Generate C/C++ inference code. """
+  header_file = f"{output_name}.h"
+  model_data: ModelData = {
+      'header_file': header_file,
+      'model_name': output_name,
+      'op_code_table': op_code_table,
+      'graph': graph,
+  }
+
+  # Ensure output directory exists
+  output_path = pathlib.Path(output_dir)
+  output_path.mkdir(parents=True, exist_ok=True)
+
+  _generate_header(output_path / header_file, model_data)
+  _generate_source(output_path / f"{output_name}.cc", model_data)
diff --git a/codegen/operators/BUILD b/codegen/operators/BUILD
new file mode 100644
index 0000000..3a7ae29
--- /dev/null
+++ b/codegen/operators/BUILD
@@ -0,0 +1,52 @@
+load("@rules_python//python:defs.bzl", "py_library")
+
+package(
+    default_visibility = ["//:__subpackages__"],
+    licenses = ["notice"],
+)
+
+py_library(
+    name = "constants",
+    srcs = [
+        "constants.py",
+    ],
+    deps = [
+        "//tensorflow/lite/python:schema_py",
+    ],
+)
+
+py_library(
+    name = "factory",
+    srcs = [
+        "factory.py",
+    ],
+    deps = [
+        ":fully_connected",
+        ":operator",
+        "//tensorflow/lite/python:schema_py",
+    ],
+)
+
+py_library(
+    name = "fully_connected",
+    srcs = [
+        "fully_connected.py",
+    ],
+    deps = [
+        ":constants",
+        ":operator",
+        "//codegen:utils",
+        "//tensorflow/lite/python:schema_py",
+    ],
+)
+
+py_library(
+    name = "operator",
+    srcs = [
+        "operator.py",
+    ],
+    deps = [
+        "//codegen:utils",
+        "//tensorflow/lite/python:schema_py",
+    ],
+)
diff --git a/codegen/operators/constants.py b/codegen/operators/constants.py
new file mode 100644
index 0000000..b9ff17a
--- /dev/null
+++ b/codegen/operators/constants.py
@@ -0,0 +1,50 @@
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+""" Operator Constants """
+
+from typing import Dict
+
+from tflite_micro.tensorflow.lite.python import schema_py_generated as schema_fb
+
+ACTIVATION_FUNCS: Dict[int, str] = {
+    schema_fb.ActivationFunctionType.NONE: "kTfLiteActNone",
+    schema_fb.ActivationFunctionType.RELU: "kTfLiteActRelu",
+    schema_fb.ActivationFunctionType.RELU_N1_TO_1: "kTfLiteActReluN1To1",
+    schema_fb.ActivationFunctionType.RELU6: "kTfLiteActRelu6",
+    schema_fb.ActivationFunctionType.TANH: "kTfLiteActTanh",
+    schema_fb.ActivationFunctionType.SIGN_BIT: "kTfLiteActSignBit",
+}
+
+TFLITE_TYPE: Dict[int, str] = {
+    0: "kTfLiteNoType",
+    1: "kTfLiteFloat32",
+    2: "kTfLiteInt32",
+    3: "kTfLiteUInt8",
+    4: "kTfLiteInt64",
+    5: "kTfLiteString",
+    6: "kTfLiteBool",
+    7: "kTfLiteInt16",
+    8: "kTfLiteComplex64",
+    9: "kTfLiteInt8",
+    10: "kTfLiteFloat16",
+    11: "kTfLiteFloat64",
+    12: "kTfLiteComplex128",
+    13: "kTfLiteUInt64",
+    14: "kTfLiteResource",
+    15: "kTfLiteVariant",
+    16: "kTfLiteUInt32",
+    17: "kTfLiteUInt16",
+    18: "kTfLiteInt4",
+}
diff --git a/codegen/operators/factory.py b/codegen/operators/factory.py
new file mode 100644
index 0000000..f62cacb
--- /dev/null
+++ b/codegen/operators/factory.py
@@ -0,0 +1,28 @@
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+""" A factory function for creating operators """
+
+from tflite_micro.codegen.operators import fully_connected
+from tflite_micro.codegen.operators import operator
+from tflite_micro.tensorflow.lite.python import schema_py_generated as schema_fb
+
+_BUILTIN_OPERATORS: dict[int, operator.Operator.__class__] = {
+    schema_fb.BuiltinOperator.FULLY_CONNECTED: fully_connected.FullyConnected,
+}
+
+
+def create_operator(op_code: schema_fb.OperatorCodeT, op: schema_fb.Operator):
+  operator_class = _BUILTIN_OPERATORS[op_code.builtinCode]
+  return operator_class(op)
diff --git a/codegen/operators/fully_connected.py b/codegen/operators/fully_connected.py
new file mode 100644
index 0000000..f756bef
--- /dev/null
+++ b/codegen/operators/fully_connected.py
@@ -0,0 +1,56 @@
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+""" FullyConnected operator """
+
+from typing import Dict
+import string
+
+from tflite_micro.codegen.operators import constants
+from tflite_micro.codegen.operators import operator
+from tflite_micro.codegen import utils
+from tflite_micro.tensorflow.lite.python import schema_py_generated as schema_fb
+
+_WEIGHTS_FORMATS: Dict[int, str] = {
+    schema_fb.FullyConnectedOptionsWeightsFormat.DEFAULT:
+    "kTfLiteFullyConnectedWeightsFormatDefault",
+    schema_fb.FullyConnectedOptionsWeightsFormat.SHUFFLED4x16INT8:
+    "kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8",
+}
+
+
+class FullyConnected(operator.Operator):
+
+  def __init__(self, op: schema_fb.OperatorT):
+    assert op.builtinOptionsType == schema_fb.BuiltinOptions.FullyConnectedOptions
+    super(FullyConnected, self).__init__(op)
+    self._builtin_options: schema_fb.FullyConnectedOptionsT = op.builtinOptions
+
+  def generate_c_builtin_data(self) -> str:
+    builtin_template = string.Template(
+        "TfLiteFullyConnectedParams builtin_data = {\n"
+        "    .activation = ${activation},\n"
+        "    .weights_format = ${weights_format},\n"
+        "    .keep_num_dims = ${keep_num_dims},\n"
+        "    .asymmetric_quantize_inputs = ${asymmetric_quantize_inputs},\n"
+        "    .quantized_bias_type = ${quantized_bias_type}};")
+    return builtin_template.substitute(
+        activation=constants.ACTIVATION_FUNCS[
+            self._builtin_options.fusedActivationFunction],
+        weights_format=_WEIGHTS_FORMATS[self._builtin_options.weightsFormat],
+        keep_num_dims=utils.bool_to_c_str(self._builtin_options.keepNumDims),
+        asymmetric_quantize_inputs=utils.bool_to_c_str(
+            self._builtin_options.asymmetricQuantizeInputs),
+        quantized_bias_type=constants.TFLITE_TYPE[
+            self._builtin_options.quantizedBiasType])
diff --git a/codegen/operators/operator.py b/codegen/operators/operator.py
new file mode 100644
index 0000000..2879d37
--- /dev/null
+++ b/codegen/operators/operator.py
@@ -0,0 +1,92 @@
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+""" Provides object representation for the model that is conducive to code 
+    generation using templates. """
+
+import abc
+from typing import Optional
+import string
+import textwrap
+
+from tflite_micro.codegen import utils
+from tflite_micro.tensorflow.lite.python import schema_py_generated as schema_fb
+
+
+class Operator(abc.ABC):
+
+  def __init__(self, operator: schema_fb.OperatorT):
+    self._operator: schema_fb.OperatorT = operator
+    self._inputs: utils.IntArray = utils.IntArray(self._operator.inputs)
+    self._outputs: utils.IntArray = utils.IntArray(self._operator.outputs)
+    self._intermediates: Optional[utils.IntArray] = utils.IntArray(
+        self._operator.intermediates) if self._operator.intermediates else None
+
+  def generate_c_node_data(self, type_name: str, node_name: str) -> str:
+    struct_template = string.Template("struct ${type_name} {\n"
+                                      "${body}"
+                                      "} ${node_name};")
+    body_template = string.Template("${inputs}\n"
+                                    "${outputs}\n"
+                                    "${intermediates}\n"
+                                    "${builtin_data}\n")
+    if self._intermediates:
+      intermediates = self._intermediates.generate_c_struct(
+          "Intermediates", "intermediates")
+    else:
+      intermediates = "// No intermediates"
+
+    body = body_template.substitute(
+        inputs=self._inputs.generate_c_struct("Inputs", "inputs"),
+        outputs=self._outputs.generate_c_struct("Outputs", "outputs"),
+        intermediates=intermediates,
+        builtin_data=self.generate_c_builtin_data())
+
+    return struct_template.substitute(type_name=type_name,
+                                      node_name=node_name,
+                                      body=textwrap.indent(body, "  "))
+
+  def generate_c_node_init(self, tflite_node_name: str,
+                           node_data_name: str) -> str:
+    init_template = string.Template(
+        "${tflite_node_name} = TfLiteNode{\n"
+        "    .inputs ="
+        " reinterpret_cast<TfLiteIntArray*>(&${node_data_name}.inputs),\n"
+        "    .outputs ="
+        " reinterpret_cast<TfLiteIntArray*>(&${node_data_name}.outputs),\n"
+        "    .intermediates = ${intermediates},\n"
+        "    .user_data = nullptr,\n"
+        "    .builtin_data ="
+        " static_cast<void*>(&${node_data_name}.builtin_data),\n"
+        "    .custom_initial_data = nullptr,\n"
+        "    .custom_initial_data_size = 0};")
+
+    if self._intermediates:
+      intermediates = (
+          "reinterpret_cast<TfLiteIntArray*>(&{}.intermediates)".format(
+              self._intermediates))
+    else:
+      intermediates = "nullptr"
+
+    return init_template.substitute(tflite_node_name=tflite_node_name,
+                                    node_data_name=node_data_name,
+                                    intermediates=intermediates)
+
+  @property
+  def op_code_index(self) -> int:
+    return self._operator.opcodeIndex
+
+  @abc.abstractmethod
+  def generate_c_builtin_data(self) -> str:
+    raise NotImplementedError(f"Generating builtin data in {self.__name__}")
diff --git a/codegen/runtime/BUILD b/codegen/runtime/BUILD
new file mode 100644
index 0000000..d23cb70
--- /dev/null
+++ b/codegen/runtime/BUILD
@@ -0,0 +1,19 @@
+load("//tensorflow/lite/micro:build_def.bzl", "micro_copts")
+
+package(default_visibility = ["//visibility:public"])
+
+cc_library(
+    name = "micro_codegen_context",
+    srcs = ["micro_codegen_context.cc"],
+    hdrs = ["micro_codegen_context.h"],
+    copts = micro_copts(),
+    deps = [
+        "//tensorflow/lite/c:common",
+        "//tensorflow/lite/kernels:op_macros",
+        "//tensorflow/lite/kernels/internal:compatibility",
+        "//tensorflow/lite/micro:micro_context",
+        "//tensorflow/lite/micro:micro_graph",
+        "//tensorflow/lite/micro:micro_log",
+        "//tensorflow/lite/micro:span",
+    ],
+)
diff --git a/codegen/runtime/micro_codegen_context.cc b/codegen/runtime/micro_codegen_context.cc
new file mode 100644
index 0000000..858c823
--- /dev/null
+++ b/codegen/runtime/micro_codegen_context.cc
@@ -0,0 +1,139 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "codegen/runtime/micro_codegen_context.h"
+
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/kernels/internal/compatibility.h"
+#include "tensorflow/lite/kernels/op_macros.h"
+#include "tensorflow/lite/micro/micro_log.h"
+
+namespace tflite {
+
+MicroCodegenContext::MicroCodegenContext(TfLiteContext* context,
+                                         Span<Subgraph> subgraphs)
+    : context_(context), subgraphs_(subgraphs) {}
+
+void* MicroCodegenContext::GetScratchBuffer(int buffer_idx) {
+  // TODO(rjascani): Implement scratch buffers
+  return nullptr;
+}
+
+TfLiteEvalTensor* MicroCodegenContext::GetEvalTensor(int tensor_idx) {
+  TFLITE_DCHECK(static_cast<size_t>(tensor_idx) <
+                subgraphs_[current_subgraph_idx_].tensors.size());
+  return &subgraphs_[current_subgraph_idx_].tensors[tensor_idx];
+}
+
+TfLiteStatus MicroCodegenContext::set_external_context(
+    void* external_context_payload) {
+  if (external_context_payload == nullptr ||
+      external_context_payload_ != nullptr) {
+    MicroPrintf(
+        "Attempting to set external context to %x but it was %x already",
+        external_context_payload, external_context_payload_);
+    return kTfLiteError;
+  }
+
+  external_context_payload_ = external_context_payload;
+  return kTfLiteOk;
+}
+
+void* MicroCodegenContext::external_context() {
+  return external_context_payload_;
+}
+
+MicroGraph& MicroCodegenContext::graph() { return *this; }
+
+void* MicroCodegenContext::AllocatePersistentBuffer(size_t) {
+  // Not allowed at Eval
+  TFLITE_ABORT;
+  return nullptr;
+}
+
+TfLiteStatus MicroCodegenContext::RequestScratchBufferInArena(size_t, int*) {
+  // Not allowed at Eval
+  TFLITE_ABORT;
+  return kTfLiteError;
+}
+
+TfLiteTensor* MicroCodegenContext::AllocateTempTfLiteTensor(int) {
+  // Not allowed at Eval
+  TFLITE_ABORT;
+  return nullptr;
+}
+
+void MicroCodegenContext::DeallocateTempTfLiteTensor(TfLiteTensor*) {
+  // Not allowed at Eval
+  TFLITE_ABORT;
+}
+
+uint8_t* MicroCodegenContext::AllocateTempBuffer(size_t, size_t) {
+  // Not allowed at Eval
+  TFLITE_ABORT;
+  return nullptr;
+}
+
+void MicroCodegenContext::DeallocateTempBuffer(uint8_t*) {
+  // Not allowed at Eval
+  TFLITE_ABORT;
+}
+
+TfLiteStatus MicroCodegenContext::InvokeSubgraph(int subgraph_idx) {
+  TF_LITE_ENSURE(context_,
+                 static_cast<size_t>(subgraph_idx) < subgraphs_.size());
+  size_t previous_subgraph_idx = current_subgraph_idx_;
+  current_subgraph_idx_ = subgraph_idx;
+  TfLiteStatus status =
+      subgraphs_[subgraph_idx].invoke(context_, subgraphs_[subgraph_idx].nodes);
+  current_subgraph_idx_ = previous_subgraph_idx;
+  return status;
+}
+
+size_t MicroCodegenContext::NumSubgraphInputs(int subgraph_idx) {
+  TFLITE_DCHECK(static_cast<size_t>(subgraph_idx) < subgraphs_.size());
+  return subgraphs_[subgraph_idx].inputs.size();
+}
+
+TfLiteEvalTensor* MicroCodegenContext::GetSubgraphInput(int subgraph_idx,
+                                                        int input_idx) {
+  TFLITE_DCHECK(static_cast<size_t>(subgraph_idx) < subgraphs_.size());
+  TFLITE_DCHECK(static_cast<size_t>(input_idx) <
+                subgraphs_[subgraph_idx].inputs.size());
+  const size_t tensor_idx = subgraphs_[subgraph_idx].inputs[input_idx];
+  return &subgraphs_[subgraph_idx].tensors[tensor_idx];
+}
+
+size_t MicroCodegenContext::NumSubgraphOutputs(int subgraph_idx) {
+  TFLITE_DCHECK(static_cast<size_t>(subgraph_idx) < subgraphs_.size());
+  return subgraphs_[subgraph_idx].outputs.size();
+}
+
+TfLiteEvalTensor* MicroCodegenContext::GetSubgraphOutput(int subgraph_idx,
+                                                         int output_idx) {
+  TFLITE_DCHECK(static_cast<size_t>(subgraph_idx) < subgraphs_.size());
+  TFLITE_DCHECK(static_cast<size_t>(output_idx) <
+                subgraphs_[subgraph_idx].outputs.size());
+  const size_t tensor_idx = subgraphs_[subgraph_idx].outputs[output_idx];
+  return &subgraphs_[subgraph_idx].tensors[tensor_idx];
+}
+
+int MicroCodegenContext::NumSubgraphs() { return subgraphs_.size(); }
+
+MicroResourceVariables* MicroCodegenContext::GetResourceVariables() {
+  return nullptr;
+}
+
+}  // namespace tflite
diff --git a/codegen/runtime/micro_codegen_context.h b/codegen/runtime/micro_codegen_context.h
new file mode 100644
index 0000000..3693ad2
--- /dev/null
+++ b/codegen/runtime/micro_codegen_context.h
@@ -0,0 +1,75 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef CODEGEN_RUNTIME_MICRO_CODEGEN_CONTEXT_H_
+#define CODEGEN_RUNTIME_MICRO_CODEGEN_CONTEXT_H_
+
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/micro/micro_context.h"
+#include "tensorflow/lite/micro/micro_graph.h"
+#include "tensorflow/lite/micro/span.h"
+
+namespace tflite {
+
+struct Subgraph {
+  Span<const size_t> inputs;
+  Span<const size_t> outputs;
+  Span<TfLiteNode> nodes;
+  Span<TfLiteEvalTensor> tensors;
+  TfLiteStatus (*invoke)(TfLiteContext*, Span<TfLiteNode>);
+};
+
+class MicroCodegenContext : public MicroContext, MicroGraph {
+ public:
+  MicroCodegenContext(TfLiteContext* context, Span<Subgraph> subgraphs);
+
+  ~MicroCodegenContext() = default;
+
+  // MicroContext API
+  void* AllocatePersistentBuffer(size_t bytes) override;
+  TfLiteStatus RequestScratchBufferInArena(size_t bytes,
+                                           int* buffer_idx) override;
+  void* GetScratchBuffer(int buffer_idx) override;
+  TfLiteTensor* AllocateTempTfLiteTensor(int tensor_idx) override;
+  void DeallocateTempTfLiteTensor(TfLiteTensor* tensor) override;
+  uint8_t* AllocateTempBuffer(size_t size, size_t alignment) override;
+  void DeallocateTempBuffer(uint8_t* buffer) override;
+  TfLiteEvalTensor* GetEvalTensor(int tensor_idx) override;
+  TfLiteStatus set_external_context(void* external_context_payload) override;
+  void* external_context() override;
+  MicroGraph& graph() override;
+
+  // MicroGraph API
+  TfLiteStatus InvokeSubgraph(int subgraph_idx) override;
+  size_t NumSubgraphInputs(int subgraph_idx) override;
+  TfLiteEvalTensor* GetSubgraphInput(int subgraph_idx, int input_idx) override;
+  size_t NumSubgraphOutputs(int subgraph_idx) override;
+  TfLiteEvalTensor* GetSubgraphOutput(int subgraph_idx,
+                                      int output_idx) override;
+  int NumSubgraphs() override;
+  MicroResourceVariables* GetResourceVariables() override;
+
+ private:
+  TfLiteContext* context_;
+  Span<Subgraph> subgraphs_;
+  size_t current_subgraph_idx_ = 0;
+  void* external_context_payload_ = nullptr;
+
+  TF_LITE_REMOVE_VIRTUAL_DELETE
+};
+
+}  // namespace tflite
+
+#endif  // CODEGEN_RUNTIME_MICRO_CODEGEN_CONTEXT_H_
diff --git a/codegen/templates/inference.cc.mako b/codegen/templates/inference.cc.mako
new file mode 100644
index 0000000..cb6e59a
--- /dev/null
+++ b/codegen/templates/inference.cc.mako
@@ -0,0 +1,93 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+/* AUTOMATICALLY GENERATED DO NOT MODIFY */
+
+#include "${header_file}"
+
+#include "codegen/runtime/micro_codegen_context.h"
+#include "tensorflow/lite/c/builtin_op_data.h"
+#include "tensorflow/lite/c/c_api_types.h"
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/kernels/internal/compatibility.h"
+#include "tensorflow/lite/micro/kernels/micro_ops.h"
+#include "tensorflow/lite/micro/micro_common.h"
+#include "tensorflow/lite/micro/micro_context.h"
+
+namespace ${model_name} {
+namespace {
+// TODO(rjascani): We should probably split out the OpTable to a separate file
+// once we start generating for multiple models.
+enum OpCode {
+% for op_code in op_code_table.op_codes:
+  ${op_code.enum_name},
+% endfor
+  kCount
+};
+
+TFLMInferenceRegistration op_table[OpCode::kCount] = {
+% for op_code in op_code_table.op_codes:
+    ${op_code.register_function}(),
+% endfor
+};
+
+% for buffer in graph.buffers:
+${buffer.generate_c_buffer_array("")}
+% endfor
+% for subgraph in graph.subgraphs:
+${subgraph.generate_c_input_array("")}
+
+${subgraph.generate_c_output_array("")}
+
+${subgraph.generate_c_node_data("")}
+
+${subgraph.generate_c_tensor_data("")}
+% endfor
+% if graph.needs_zero_length_int_array:
+
+TfLiteIntArray zero_length_int_array = {};
+% endif
+
+% for subgraph in graph.subgraphs:
+${subgraph.generate_c_invoke("")}
+% endfor
+
+}  // namespace
+
+Model::Model()
+  : subgraphs_{
+%for subgraph in graph.subgraphs:
+${subgraph.generate_c_subgraph_init("      ")}
+%endfor
+    },
+    micro_context_{&context_, {&subgraphs_[0], ${len(graph.subgraphs)}}} {
+  context_.impl_ = static_cast<void*>(&micro_context_);
+  context_.ReportError = nullptr;
+  context_.GetTensor = nullptr;
+  context_.GetEvalTensor = tflite::MicroContextGetEvalTensor;
+  context_.profiler = nullptr;
+  context_.GetExternalContext = nullptr;
+  context_.GetScratchBuffer = nullptr;
+
+% for subgraph in graph.subgraphs:
+${subgraph.generate_c_node_init("  ")}
+
+${subgraph.generate_c_tensor_init("  ")}
+% endfor
+}
+
+TfLiteStatus Model::Invoke() { return micro_context_.InvokeSubgraph(0); }
+
+}  // namespace ${model_name}
diff --git a/codegen/templates/inference.h.mako b/codegen/templates/inference.h.mako
new file mode 100644
index 0000000..5ab64e1
--- /dev/null
+++ b/codegen/templates/inference.h.mako
@@ -0,0 +1,44 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+/* AUTOMATICALLY GENERATED DO NOT MODIFY */
+
+#pragma once
+
+#include "codegen/runtime/micro_codegen_context.h"
+#include "tensorflow/lite/c/c_api_types.h"
+#include "tensorflow/lite/c/common.h"
+
+namespace ${model_name} {
+
+class Model {
+ public:
+  Model();
+
+  TfLiteStatus Invoke();
+
+ private:
+  TfLiteContext context_ = {};
+  tflite::Subgraph subgraphs_[${len(graph.subgraphs)}];
+  tflite::MicroCodegenContext micro_context_;
+% for subgraph in graph.subgraphs:
+  TfLiteNode ${subgraph.nodes_array}[${len(subgraph.operators)}] = {};
+% endfor
+% for subgraph in graph.subgraphs:
+  TfLiteEvalTensor ${subgraph.tensors_array}[${len(subgraph.tensors)}] = {};
+% endfor
+};
+
+}  // namespace ${model_name}
diff --git a/codegen/tensor.py b/codegen/tensor.py
new file mode 100644
index 0000000..83870fc
--- /dev/null
+++ b/codegen/tensor.py
@@ -0,0 +1,127 @@
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+""" Tensor class """
+
+from typing import Dict, Optional
+import string
+import textwrap
+
+from tflite_micro.codegen import utils
+from tflite_micro.tensorflow.lite.python import schema_py_generated as schema_fb
+
+_TENSOR_TYPES: Dict[int, str] = {
+    schema_fb.TensorType.FLOAT16: "kTfLiteFloat16",
+    schema_fb.TensorType.FLOAT32: "kTfLiteFloat32",
+    schema_fb.TensorType.FLOAT64: "kTfLiteFloat64",
+    schema_fb.TensorType.INT16: "kTfLiteInt16",
+    schema_fb.TensorType.UINT16: "kTfLiteUInt16",
+    schema_fb.TensorType.INT32: "kTfLiteInt32",
+    schema_fb.TensorType.UINT32: "kTfLiteUInt32",
+    schema_fb.TensorType.UINT8: "kTfLiteUInt8",
+    schema_fb.TensorType.INT8: "kTfLiteInt8",
+    schema_fb.TensorType.INT64: "kTfLiteInt64",
+    schema_fb.TensorType.UINT64: "kTfLiteUInt64",
+    schema_fb.TensorType.STRING: "kTfLiteString",
+    schema_fb.TensorType.BOOL: "kTfLiteBool",
+    schema_fb.TensorType.COMPLEX64: "kTfLiteComplex64",
+    schema_fb.TensorType.COMPLEX128: "kTfLiteComplex128",
+    schema_fb.TensorType.RESOURCE: "kTfLiteResource",
+    schema_fb.TensorType.VARIANT: "kTfLiteVariant",
+    schema_fb.TensorType.INT4: "kTfLiteInt4",
+}
+
+
+class Buffer(object):
+  """ This buffer could be either a static array or a pointer into the arena """
+
+  def __init__(self, buffer_name: str, buffer: schema_fb.BufferT):
+    # TODO(rjascani): Get arena allocation offsets from preprocessor
+    self._buffer_name = buffer_name
+    self._buffer = buffer
+
+  @property
+  def address(self) -> str:
+    if self._buffer is None or self._buffer.data is None:
+      # TODO(rjascani): This needs to point into the arena
+      return f"nullptr /* {self._buffer_name} */"
+    return f"&{self._buffer_name}"
+
+  def generate_c_buffer_array(self, indent: str) -> str:
+    if self._buffer is None or self._buffer.data is None:
+      return f"// {self._buffer_name} is located in the arena\n"
+
+    buffer_template = string.Template(
+        "alignas(16) uint8_t ${buffer_name}[${size}] = {\n"
+        "${body}\n"
+        "};\n")
+
+    byte_strs = ['0x{:02X}'.format(b) for b in self._buffer.data]
+
+    lines = []
+    for byte_strs_for_line in utils.split_into_chunks(byte_strs, 12):
+      bytes_segment = ', '.join(byte_strs_for_line)
+      lines.append(f'    {bytes_segment},')
+
+    return textwrap.indent(
+        buffer_template.substitute(buffer_name=self._buffer_name,
+                                   size=len(self._buffer.data),
+                                   body='\n'.join(lines)), indent)
+
+
+class Tensor(object):
+
+  def __init__(self, buffer: Buffer, tensor: schema_fb.TensorT):
+    self._buffer = buffer
+    self._tensor: schema_fb.TensorT = tensor
+
+  @property
+  def buffer_index(self) -> bool:
+    return self._tensor.buffer
+
+  @property
+  def buffer(self) -> Buffer:
+    return self._buffer
+
+  @property
+  def has_shape(self) -> bool:
+    return self._tensor.shape is not None
+
+  @property
+  def needs_zero_length_int_array(self) -> bool:
+    return not self.has_shape
+
+  def generate_c_tensor_dims(self, type_name: str, tensor_name: str) -> str:
+    if not self.has_shape:
+      return f"// No data dims necessary for {tensor_name}"
+    return utils.IntArray(self._tensor.shape).generate_c_struct(
+        type_name + "Dims", tensor_name + "_dims")
+
+  def generate_c_tensor_init(self, tflite_tensor_name: str,
+                             tensor_name: str) -> str:
+    init_template = string.Template(
+        "${tflite_tensor_name} = TfLiteEvalTensor{\n"
+        "    .data = {.data = static_cast<void*>(${data})},\n"
+        "    .dims = ${dims},\n"
+        "    .type = ${tflite_type}};")
+    dims = "reinterpret_cast<TfLiteIntArray*>(&{})".format(
+        f"{tensor_name}_dims" if self._tensor.
+        shape is not None else "zero_length_int_array")
+
+    return init_template.substitute(
+        tflite_tensor_name=tflite_tensor_name,
+        tensor_name=tensor_name,
+        data=self._buffer.address,
+        dims=dims,
+        tflite_type=_TENSOR_TYPES[self._tensor.type])
diff --git a/codegen/utils.py b/codegen/utils.py
new file mode 100644
index 0000000..c6c31c8
--- /dev/null
+++ b/codegen/utils.py
@@ -0,0 +1,101 @@
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+""" Utility functions and classes for code generation. """
+
+from typing import Any, Generator, Iterable, List, Optional, Sequence, Tuple
+import string
+import textwrap
+import itertools
+
+
+def to_pascal_case(s: str) -> str:
+  """ Basic function for converting snake_case to PascalCase. """
+  # This isn't perfect, as there might be some cases where we want underscores
+  # to remain if they are used as number separators.
+  return s.title().replace('_', '')
+
+
+def bool_to_c_str(b: bool) -> str:
+  """ Convert a python bool value to a C bool string. Ie, False -> 'false' """
+  return str(b).lower()
+
+
+def split_into_chunks(
+    data: Iterable[Any],
+    chunk_size: int) -> Generator[Tuple[Any, ...], None, None]:
+  """Splits an iterable into chunks of a given size."""
+  data_iterator = iter(data)
+  while True:
+    chunk = tuple(itertools.islice(data_iterator, chunk_size))
+    if not chunk:
+      break
+    yield chunk
+
+
+def generate_c_int_array(indent: str, int_type: str, name: str,
+                         ints: Sequence[int]) -> str:
+  int_strs = ['{}'.format(i) for i in ints]
+
+  # Try to do it on a single line first
+  single_line_array_template = string.Template(
+      "constexpr ${int_type} ${name}[${size}] = {${data}};")
+  single_line = textwrap.indent(
+      single_line_array_template.substitute(int_type=int_type,
+                                            name=name,
+                                            size=len(int_strs),
+                                            data=', '.join(int_strs)), indent)
+
+  if len(single_line) < 81:
+    return single_line
+
+  # Couldn't fit, so split it across multiple lines
+  multi_line_array_template = string.Template(
+      "constexpr ${int_type} ${name}[${size}] = {\n"
+      "${body}\n"
+      "};\n")
+
+  lines = []
+  for int_strs_for_line in split_into_chunks(int_strs, 12):
+    ints_segment = ', '.join(int_strs_for_line)
+    lines.append(f'    {ints_segment},')
+
+  return textwrap.indent(
+      multi_line_array_template.substitute(int_type=int_type,
+                                           name=name,
+                                           size=len(ints),
+                                           body='\n'.join(lines)), indent)
+
+
+class IntArray(object):
+  """ A helper class for generating int arrays that can be used to provide the
+      backing storage for a TfLiteIntArray. """
+
+  def __init__(self, data: List[int]):
+    self._data = data
+
+  def generate_c_struct(self, type_name: str,
+                        variable_name: Optional[str]) -> str:
+    struct_template = string.Template("struct ${type_name} {\n"
+                                      "  int size = ${size};\n"
+                                      "  int data[${size}] = {${data}};\n"
+                                      "}")
+    # TODO(rjascani): Make this pretty print in multi-line chunks
+    int_strs = ['{}'.format(i) for i in self._data]
+    c_struct_str = struct_template.substitute(type_name=type_name,
+                                              size=len(int_strs),
+                                              data=', '.join(int_strs))
+    if variable_name:
+      return c_struct_str + " {};".format(variable_name)
+    return c_struct_str + ";"
diff --git a/debugging_output.md b/debugging_output.md
new file mode 100644
index 0000000..59aa14e
--- /dev/null
+++ b/debugging_output.md
@@ -0,0 +1,76 @@
+# How to debug invalid output
+
+The TFLM debugging output tools allow TFLM users to easily debug their models
+by providing a tool that will compare the intermediate  values(output of each OP/Kernel)
+from a model post invoke between the TFLM and TfLite. As well as a way to
+compare intermediate values between TFLM x86 implementations and Optimized 
+Implementations.
+
+## How to debug TFLM Interpreter output on embedded targets
+
+First you call a C++ binary that takes a TfLite model and returns a file that has
+random inputs and their corresponding output values for each layer of the model
+it was provided.
+
+The second is you provide a TfLite model and file outputted by C++ binary above
+to a  python script. The script runs TFLM x86 inference comparison to the 
+expected output.
+
+## How to debug TFLM Python Interpreter output
+
+Using a python script mentioned in the section above when only a TfLite model is
+provided as input, the script generates random input and compares TFLM vs TfLite
+inference outputs for each layer of the model.
+
+## C++ Expected Layer by Layer Output Tool on TFLite Micro
+
+This C++ binary allows you to pass in a TfLite model and returns a flatbuffer
+file with input and the corresponding output values appended into it that can be
+passed into a python debugging tool which can compare those golden values vs
+the x86 TFLM reference kernel implementation.
+
+The C++ Tool/binary will write a debugging file to the path provide in
+2nd arg using the tflite_model provided in the 1st arg.
+
+##### Command bazel/blaze:
+
+```
+ bazel run tensorflow/lite/micro/tools:layer_cc -- \
+    </path/to/input_model.tflite>
+   </path/to/output.file_name>
+```
+
+##### How to Build using Makefile :
+
+```
+make -f tensorflow/lite/micro/tools/make/Makefile layer_by_layer_output_tool -j24
+```
+
+## Python Layer by Layer Debugging Tool 
+
+The Python Tool/Script can first be used to compare TFLM vs Tflite outputs for
+random inputs by only providing a TfLite file.
+
+#### TfLite vs TFLM command:
+``` 
+ bazel run tensorflow/lite/micro/tools:layer_by_layer_debugger -- \
+    --input_tflite_file=</path/to/my_model.tflite>
+```
+
+The Python Tool/Script can also be used to compare TFLM's python x86 output
+vs expected output provided by the C++ Tool/binary.
+
+#### TFLM vs Expected Command:
+``` 
+  bazel run tensorflow/lite/micro/tools:layer_by_layer_debugger -- \
+    --input_tflite_file=</path/to/my_model.tflite> \
+    --layer_by_layer_data_file=</path/to/my_debug_flatbuffer_file>
+```
+
+#### Optional Flags:
+ ` --print_dump  `
+When this flag is set, it will print the TFLM output for each layer that is
+compared.
+
+ ` --rng`
+Integer random number seed for generating input data for comparisons against TFLite. (Default: 42)
diff --git a/python/tflite_micro/BUILD b/python/tflite_micro/BUILD
index 68321fd..282e9db 100644
--- a/python/tflite_micro/BUILD
+++ b/python/tflite_micro/BUILD
@@ -1,5 +1,8 @@
+load("@bazel_skylib//rules:common_settings.bzl", "string_flag")
+load("@rules_python//python:defs.bzl", "py_library", "py_test")
 load("@pybind11_bazel//:build_defs.bzl", "pybind_extension")
 load("//python:py_namespace.bzl", "py_namespace")
+load("//tools:expand_stamp_vars.bzl", "expand_stamp_vars")
 load("@rules_python//python:packaging.bzl", "py_package", "py_wheel")
 load("@tflm_pip_deps//:requirements.bzl", "requirement")
 load(
@@ -70,9 +73,7 @@
     srcs = [
         "runtime.py",
     ],
-    data = [
-        ":_runtime.so",
-    ],
+    data = [":_runtime.so"],
     srcs_version = "PY3",
     visibility = ["//visibility:public"],
     deps = [
@@ -91,9 +92,10 @@
         "noubsan",
     ],
     deps = [
-        requirement("numpy"),
-        requirement("tensorflow-cpu"),
         ":runtime",
+        requirement("numpy"),
+        requirement("tensorflow"),
+        "//tensorflow/lite/micro/examples/recipes:add_four_numbers",
         "//tensorflow/lite/micro/testing:generate_test_models_lib",
     ],
 )
@@ -108,6 +110,14 @@
     ],
 )
 
+# Generate a version attribute, imported as tflite_micro.__version__, using
+# stamp (a.k.a. workspace status) variables.
+expand_stamp_vars(
+    name = "version",
+    out = "_version.py",
+    template = "_version.py.in",
+)
+
 # Collect the `deps` and their transitive dependences together into a set of
 # files to package. The files retain their full path relative to the workspace
 # root, which determines the subpackage path at which they're located within
@@ -128,6 +138,7 @@
     deps = [
         ":postinstall_check",
         ":runtime",
+        ":version",
     ],
 )
 
@@ -150,16 +161,78 @@
     ],
 )
 
+expand_stamp_vars(
+    name = "description_file",
+    out = "README.pypi.md",
+    template = "README.pypi.md.in",
+)
+
+# Building the :whl or its descendants requires the following build setting to
+# supply the Python compatibility tags for the wheel metadata.
+string_flag(
+    name = "compatibility_tag",
+    build_setting_default = "local",
+    values = [
+        "cp310_cp310_manylinux_2_28_x86_64",
+        "cp311_cp311_manylinux_2_28_x86_64",
+        "local",
+    ],
+)
+
+config_setting(
+    name = "cp310_cp310_manylinux_2_28_x86_64",
+    flag_values = {
+        ":compatibility_tag": "cp310_cp310_manylinux_2_28_x86_64",
+    },
+)
+
+config_setting(
+    name = "cp311_cp311_manylinux_2_28_x86_64",
+    flag_values = {
+        ":compatibility_tag": "cp311_cp311_manylinux_2_28_x86_64",
+    },
+)
+
+config_setting(
+    name = "local",
+    flag_values = {
+        ":compatibility_tag": "local",
+    },
+)
+
 py_wheel(
     name = "whl",
+    # This macro yields additional targets:
+    #
+    # - whl.dist: build a properly named file under whl_dist/
+    #
+    abi = select({
+        ":cp310_cp310_manylinux_2_28_x86_64": "cp310",
+        ":cp311_cp311_manylinux_2_28_x86_64": "cp311",
+        ":local": "none",
+    }),
+    description_file = ":description_file",
     distribution = "tflite_micro",
+    platform = select({
+        ":cp310_cp310_manylinux_2_28_x86_64": "manylinux_2_28_x86_64",
+        ":cp311_cp311_manylinux_2_28_x86_64": "manylinux_2_28_x86_64",
+        ":local": "any",
+    }),
+    python_tag = select({
+        ":cp310_cp310_manylinux_2_28_x86_64": "cp310",
+        ":cp311_cp311_manylinux_2_28_x86_64": "cp311",
+        ":local": "py3",
+    }),
     requires = [
         "flatbuffers",
         "numpy",
         "tensorflow",
     ],
+    stamp = 1,  # 1 == always stamp
     strip_path_prefixes = [package_name()],
-    version = "0.1.0",
+    summary = "TensorFlow Lite for Microcontrollers",
+    twine = "@tflm_pip_deps_twine//:pkg",
+    version = "{BUILD_EMBED_LABEL}.dev{STABLE_GIT_COMMIT_TIME}",
     deps = [
         ":namespace",
     ],
@@ -176,4 +249,7 @@
     data = [
         ":whl",
     ],
+    tags = [
+        "notap",  # See http://b/294278650#comment4 for more details.
+    ],
 )
diff --git a/python/tflite_micro/README.md b/python/tflite_micro/README.md
index cf5f638..927705f 100644
--- a/python/tflite_micro/README.md
+++ b/python/tflite_micro/README.md
@@ -1,25 +1,127 @@
-# TFLM Python Interpreter
+# The `tflite_micro` Python Package
 
-The TFLM interpreter can be invoked from Python by using the Python interpreter
-wrapper in this directory.
+This directory contains the `tflite_micro` Python package. The following is
+mainly documentation for its developers.
 
-## Usage
+The `tflite_micro` package contains a complete TFLM interpreter built as a
+CPython extension module. The build of simple Python packages may be driven by
+standard Python package builders such as `build`, `setuptools`, and `flit`;
+however, as TFLM is first and foremost a large C/C++ project, `tflite_micro`'s
+build is instead driven by its C/C++ build system Bazel.
 
-There are two ways to import the Python wrapper, either by using Bazel/Blaze, or
-in near future by installing a PyPi package.
+## Building and installing locally
 
-### Bazel
+### Building
 
-#### Build
+The Bazel target `//python/tflite_micro:whl.dist` builds a `tflite_micro`
+Python *.whl* under the output directory `bazel-bin/python/tflite_micro/whl_dist`. For example:
+```
+% bazel build //python/tflite_micro:whl.dist
+....
+Target //python/tflite_micro:whl.dist up-to-date:
+  bazel-bin/python/tflite_micro/whl_dist
+
+% tree bazel-bin/python/tflite_micro/whl_dist
+bazel-bin/python/tflite_micro/whl_dist
+└── tflite_micro-0.dev20230920161638-py3-none-any.whl
+```
+
+### Installing
+
+Install the resulting *.whl* via pip. For example, in a Python virtual
+environment:
+```
+% python3 -m venv ~/tmp/venv
+% source ~/tmp/venv/bin/activate
+(venv) $ pip install bazel-bin/python/tflite_micro/whl_dist/tflite_micro-0.dev20230920161638-py3-none-any.whl
+Processing ./bazel-bin/python/tflite_micro/whl_dist/tflite_micro-0.dev20230920161638-py3-none-any.whl
+....
+Installing collected packages: [....]
+```
+
+The package should now be importable and usable. For example:
+```
+(venv) $ python
+Python 3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0] on linux
+Type "help", "copyright", "credits" or "license" for more information.
+>>> import tflite_micro
+>>> tflite_micro.postinstall_check.passed()
+True
+>>>  i = tflite_micro.runtime.Interpreter.from_file("foo.tflite")
+>>> # etc.
+```
+
+## Building and uploading to PyPI
+
+The *.whl* generated above is unsuitable for distribution to the wider world
+via PyPI. The extension module is inevitably compiled against a particular
+Python implementation and platform C library. The resulting package is only
+binary-compatible with a system running the same Python implementation and a
+compatible (typically the same or newer) C library.
+
+The solution is to distribute multiple *.whl*s, one built for each Python
+implementation and platform combination. TFLM accomplishes this by running
+Bazel builds from within multiple, uniquely configured Docker containers. The
+images used are based on standards-conforming images published by the Python
+Package Authority (PyPA) for exactly such use.
+
+Python *.whl*s contain metadata used by installers such as `pip` to determine
+which distributions (*.whl*s) are compatible with the target platform. See the PyPA
+specification for [platform compatibility
+tags](https://packaging.python.org/en/latest/specifications/platform-compatibility-tags/).
+
+### Building
+
+In an environment with a working Docker installation, run the script
+`python/tflite_micro/pypi_build.sh <python-tag>` once for each tag. The
+script's online help (`--help`) lists the available tags. The script builds an
+appropriate Docker container and invokes a Bazel build and test within it.
+For example:
+```
+% python/tflite_micro/pypi_build.sh cp310
+[+] Building 2.6s (7/7) FINISHED
+=> writing image sha256:900704dad7fa27938dcc1c5057c0e760fb4ab0dff676415182455ae66546bbd4
+bazel build //python/tflite_micro:whl.dist \
+    --//python/tflite_micro:compatibility_tag=cp310_cp310_manylinux_2_28_x86_64
+bazel test //python/tflite_micro:whl_test \
+    --//python/tflite_micro:compatibility_tag=cp310_cp310_manylinux_2_28_x86_64
+//python/tflite_micro:whl_test
+Executed 1 out of 1 test: 1 test passes.
+Output:
+bazel-pypi-out/tflite_micro-0.dev20230920031310-cp310-cp310-manylinux_2_28_x86_64.whl
+```
+
+By default, *.whl*s are generated under the output directory `bazel-pypi-out/`.
+
+### Uploading to PyPI
+
+Upload the generated *.whl*s to PyPI with the script
+`python/tflite_micro/pypi_upload.sh`. This script lightly wraps the standard
+upload tool `twine`. A PyPI authentication token must be assigned to
+`TWINE_PASSWORD` in the environment. For example:
+```
+% export TWINE_PASSWORD=pypi-AgENdGV[....]
+% ./python/tflite_micro/pypi_upload.sh --test-pypi bazel-pypi-out/tflite_micro-*.whl
+Uploading distributions to https://test.pypi.org/legacy/
+Uploading tflite_micro-0.dev20230920031310-cp310-cp310-manylinux_2_28_x86_64.whl
+Uploading tflite_micro-0.dev20230920031310-cp311-cp311-manylinux_2_28_x86_64.whl
+View at:
+https://test.pypi.org/project/tflite-micro/0.dev20230920031310/
+```
+
+See the script's online help (`--help`) for more.
+
+## Using `tflite_micro` from within the TFLM source tree
+
+:construction:
+*The remainder of this document is under construction and may contain some
+obsolete information.*
+:construction:
 
 The only package that needs to be included in the `BUILD` file is
 `//python/tflite_micro:runtime`. It contains all
 the correct dependencies to build the Python interpreter.
 
-### PyPi
-
-Work in progress.
-
 ### Examples
 
 Depending on the workflow, the package import path may be slightly different.
@@ -55,7 +157,7 @@
 print(tflm_interpreter.get_output_details(0))
 ```
 
-## Technical Details
+### Technical Details
 
 The Python interpreter uses [pybind11](https://github.com/pybind/pybind11) to
 expose an evolving set of C++ APIs. The Bazel build leverages the
@@ -64,7 +166,7 @@
 The most updated Python APIs can be found in
 `python/tflite_micro/runtime.py`.
 
-## Custom Ops
+### Custom Ops
 
 The Python interpreter works with models with
 [custom ops](https://www.tensorflow.org/lite/guide/ops_custom) but special steps
@@ -126,7 +228,7 @@
 properly included in TFLM's op resolver. This approach is very similar to
 TFLite's custom op support.
 
-## Print Allocations
+### Print Allocations
 
 The Python interpreter can also be used to print memory arena allocations. This
 is very helpful to figure out actual memory arena usage.
diff --git a/python/tflite_micro/README.pypi.md.in b/python/tflite_micro/README.pypi.md.in
new file mode 100644
index 0000000..a04356e
--- /dev/null
+++ b/python/tflite_micro/README.pypi.md.in
@@ -0,0 +1,5 @@
+# TensorFlow Lite for Microcontrollers
+
+This package is built from commit
+[{STABLE_GIT_HASH}](https://github.com/tensorflow/tflite-micro/blob/{STABLE_GIT_HASH}/python/tflite_micro)
+of [github.com/tensorflow/tflite-micro](https://github.com/tensorflow/tflite-micro).
diff --git a/python/tflite_micro/__init__.py b/python/tflite_micro/__init__.py
index 940289c..9f15213 100644
--- a/python/tflite_micro/__init__.py
+++ b/python/tflite_micro/__init__.py
@@ -11,6 +11,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+# ----
 
 # Define a public API for the package by providing aliases for modules which
 # are otherwise deeply nested in subpackages determined by their location in
@@ -20,5 +21,8 @@
 
 from tflite_micro.python.tflite_micro import runtime
 
+# Unambiguously identify the source used to build the package.
+from tflite_micro.python.tflite_micro._version import __version__
+
 # Ordered after `runtime` to avoid a circular dependency
 from tflite_micro.python.tflite_micro import postinstall_check
diff --git a/python/tflite_micro/_runtime.cc b/python/tflite_micro/_runtime.cc
index 824b3b4..246545f 100644
--- a/python/tflite_micro/_runtime.cc
+++ b/python/tflite_micro/_runtime.cc
@@ -24,14 +24,19 @@
 
 PYBIND11_MODULE(_runtime, m) {
   m.doc() = "TFLite Micro Runtime Extension";
-
+  py::enum_<tflite::InterpreterConfig>(m, "PythonInterpreterConfig")
+      .value("kAllocationRecording",
+             tflite::InterpreterConfig::kAllocationRecording)
+      .value("kPreserveAllTensors",
+             tflite::InterpreterConfig::kPreserveAllTensors);
   py::class_<InterpreterWrapper>(m, "InterpreterWrapper")
       .def(py::init([](const py::bytes& data,
                        const std::vector<std::string>& registerers_by_name,
-                       size_t arena_size, int num_resource_variables) {
+                       size_t arena_size, int num_resource_variables,
+                       tflite::InterpreterConfig config) {
         return std::unique_ptr<InterpreterWrapper>(
             new InterpreterWrapper(data.ptr(), registerers_by_name, arena_size,
-                                   num_resource_variables));
+                                   num_resource_variables, config));
       }))
       .def("PrintAllocations", &InterpreterWrapper::PrintAllocations)
       .def("Invoke", &InterpreterWrapper::Invoke)
@@ -55,6 +60,14 @@
           },
           py::arg("index"))
       .def(
+          "GetTensor",
+          [](InterpreterWrapper& self, size_t tensor_index,
+             size_t subgraph_index = 0) {
+            return tflite::PyoOrThrow(
+                self.GetTensor(tensor_index, subgraph_index));
+          },
+          py::arg("tensor_index"), py::arg("subgraph_index"))
+      .def(
           "GetOutputTensorDetails",
           [](InterpreterWrapper& self, size_t index) {
             return tflite::PyoOrThrow(self.GetOutputTensorDetails(index));
diff --git a/python/tflite_micro/_version.py.in b/python/tflite_micro/_version.py.in
new file mode 100644
index 0000000..77768f7
--- /dev/null
+++ b/python/tflite_micro/_version.py.in
@@ -0,0 +1 @@
+__version__ = "{BUILD_EMBED_LABEL}.dev{STABLE_GIT_COMMIT_TIME}-g{STABLE_GIT_HASH}"
diff --git a/python/tflite_micro/interpreter_wrapper.cc b/python/tflite_micro/interpreter_wrapper.cc
index 41c4f7a..53efe8e 100644
--- a/python/tflite_micro/interpreter_wrapper.cc
+++ b/python/tflite_micro/interpreter_wrapper.cc
@@ -15,6 +15,11 @@
 
 #include "python/tflite_micro/interpreter_wrapper.h"
 
+#include <cstddef>
+
+#include "tensorflow/lite/micro/micro_allocator.h"
+#include "tensorflow/lite/micro/micro_utils.h"
+
 // Disallow Numpy 1.7 deprecated symbols.
 #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
 // See https://numpy.org/doc/1.16/reference/c-api.array.html#importing-the-api
@@ -187,6 +192,39 @@
   return result;
 }
 
+PyObject* GetEvalTensorDetails(const TfLiteEvalTensor* eval_tensor) {
+  PyObject* tensor_type =
+      PyArray_TypeObjectFromType(TfLiteTypeToPyArrayType(eval_tensor->type));
+  PyObject* np_size_array =
+      PyArrayFromIntVector(eval_tensor->dims->data, eval_tensor->dims->size);
+  PyObject* tensor_size =
+      PyArray_Return(reinterpret_cast<PyArrayObject*>(np_size_array));
+
+  size_t eval_tensor_bytes = tflite::EvalTensorBytes(eval_tensor);
+  void* data = malloc(eval_tensor_bytes);
+  memcpy(data, eval_tensor->data.data, eval_tensor_bytes);
+
+  std::vector<npy_intp> dims(eval_tensor->dims->data,
+                             eval_tensor->dims->data + eval_tensor->dims->size);
+  int py_type_num = TfLiteTypeToPyArrayType(eval_tensor->type);
+  PyObject* np_array =
+      PyArray_SimpleNewFromData(dims.size(), dims.data(), py_type_num, data);
+
+  // Transfer ownership to Python so that there's Python will take care of
+  // releasing this buffer
+  PyArray_ENABLEFLAGS(reinterpret_cast<PyArrayObject*>(np_array),
+                      NPY_ARRAY_OWNDATA);
+
+  PyObject* result = PyDict_New();
+  PyDict_SetItemString(result, "dtype", tensor_type);
+  PyDict_SetItemString(result, "shape", tensor_size);
+  PyDict_SetItemString(
+      result, "tensor_data",
+      PyArray_Return(reinterpret_cast<PyArrayObject*>(np_array)));
+
+  return result;
+}
+
 }  // namespace
 
 InterpreterWrapper::~InterpreterWrapper() {
@@ -204,7 +242,7 @@
 
 InterpreterWrapper::InterpreterWrapper(
     PyObject* model_data, const std::vector<std::string>& registerers_by_name,
-    size_t arena_size, int num_resource_variables) {
+    size_t arena_size, int num_resource_variables, InterpreterConfig config) {
   interpreter_ = nullptr;
 
   // `model_data` is used as a raw pointer beyond the scope of this
@@ -223,12 +261,6 @@
   const Model* model = GetModel(buf);
   model_ = model_data;
   memory_arena_ = std::unique_ptr<uint8_t[]>(new uint8_t[arena_size]);
-  allocator_ = RecordingMicroAllocator::Create(memory_arena_.get(), arena_size);
-  MicroResourceVariables* resource_variables_ = nullptr;
-  if (num_resource_variables > 0)
-    resource_variables_ =
-        MicroResourceVariables::Create(allocator_, num_resource_variables);
-
   for (const std::string& registerer : registerers_by_name) {
     if (!AddCustomOpRegistererByName(registerer.c_str(),
                                      &python_ops_resolver_)) {
@@ -237,6 +269,24 @@
     }
   }
 
+  switch (config) {
+    case InterpreterConfig::kAllocationRecording: {
+      recording_allocator_ =
+          RecordingMicroAllocator::Create(memory_arena_.get(), arena_size);
+      allocator_ = recording_allocator_;
+      break;
+    }
+    case InterpreterConfig::kPreserveAllTensors: {
+      allocator_ = MicroAllocator::Create(memory_arena_.get(), arena_size,
+                                          MemoryPlannerType::kLinear);
+      break;
+    }
+  }
+  MicroResourceVariables* resource_variables_ = nullptr;
+  if (num_resource_variables > 0)
+    resource_variables_ =
+        MicroResourceVariables::Create(allocator_, num_resource_variables);
+
   interpreter_ = new MicroInterpreter(model, python_ops_resolver_, allocator_,
                                       resource_variables_);
 
@@ -250,7 +300,13 @@
   ImportNumpy();
 }
 
-void InterpreterWrapper::PrintAllocations() { allocator_->PrintAllocations(); }
+void InterpreterWrapper::PrintAllocations() {
+  if (!recording_allocator_) {
+    ThrowValueError("Cannot print allocations as they were not recorded");
+    return;
+  }
+  return recording_allocator_->PrintAllocations();
+}
 
 int InterpreterWrapper::Invoke() {
   TfLiteStatus status = interpreter_->Invoke();
@@ -358,6 +414,18 @@
   return PyArray_Return(reinterpret_cast<PyArrayObject*>(np_array));
 }
 
+PyObject* InterpreterWrapper::GetTensor(size_t tensor_index,
+                                        size_t subgraph_index) {
+  if (!interpreter_->preserve_all_tensors()) {
+    ThrowRuntimeError(
+        "TFLM only supports GetTensor() when using a python interpreter with "
+        "the InterpreterConfig.kPeserverAllTensors interpreter_config");
+    return nullptr;
+  }
+  return GetEvalTensorDetails(
+      interpreter_->GetTensor(tensor_index, subgraph_index));
+}
+
 PyObject* InterpreterWrapper::GetInputTensorDetails(size_t index) const {
   return GetTensorDetails(interpreter_->input(index));
 }
diff --git a/python/tflite_micro/interpreter_wrapper.h b/python/tflite_micro/interpreter_wrapper.h
index 1ead5af..9bb31b0 100644
--- a/python/tflite_micro/interpreter_wrapper.h
+++ b/python/tflite_micro/interpreter_wrapper.h
@@ -18,16 +18,29 @@
 #include <Python.h>
 
 #include "python/tflite_micro/python_ops_resolver.h"
+#include "tensorflow/lite/micro/micro_allocator.h"
 #include "tensorflow/lite/micro/micro_interpreter.h"
 #include "tensorflow/lite/micro/recording_micro_allocator.h"
 
 namespace tflite {
 
+// Allocation Recording is mutually exclusive from the PreserveAllTensors
+// debugging feature because PreserveAllTensors uses the LinearMemoryPlanner.
+// This means that the Allocations recorded by the RecordingMicroAllocator
+// wouldn't be accurate because the GreedyMemoryPlanner would have to be used.
+// So this Enum was made to represent the two possible modes/configs you can use
+// the python interpreter for.
+enum InterpreterConfig {
+  kAllocationRecording = 0,
+  kPreserveAllTensors = 1,
+};
+
 class InterpreterWrapper {
  public:
-  InterpreterWrapper(PyObject* model_data,
-                     const std::vector<std::string>& registerers_by_name,
-                     size_t arena_size, int num_resource_variables);
+  InterpreterWrapper(
+      PyObject* model_data, const std::vector<std::string>& registerers_by_name,
+      size_t arena_size, int num_resource_variables,
+      InterpreterConfig config = InterpreterConfig::kAllocationRecording);
   ~InterpreterWrapper();
 
   void PrintAllocations();
@@ -37,9 +50,11 @@
   PyObject* GetOutputTensor(size_t index) const;
   PyObject* GetInputTensorDetails(size_t index) const;
   PyObject* GetOutputTensorDetails(size_t index) const;
+  PyObject* GetTensor(size_t tensor_index, size_t subgraph_index = 0);
 
  private:
-  tflite::RecordingMicroAllocator* allocator_;
+  tflite::MicroAllocator* allocator_ = nullptr;
+  tflite::RecordingMicroAllocator* recording_allocator_ = nullptr;
   const PyObject* model_;
   std::unique_ptr<uint8_t[]> memory_arena_;
   tflite::PythonOpsResolver python_ops_resolver_;
diff --git a/python/tflite_micro/numpy_utils.cc b/python/tflite_micro/numpy_utils.cc
index 4a4aad8..20f43c9 100644
--- a/python/tflite_micro/numpy_utils.cc
+++ b/python/tflite_micro/numpy_utils.cc
@@ -40,6 +40,9 @@
       return NPY_FLOAT32;
     case kTfLiteFloat16:
       return NPY_FLOAT16;
+    case kTfLiteBFloat16:
+      // TODO(b/329491949): NPY_BFLOAT16 currently doesn't exist
+      return NPY_FLOAT16;
     case kTfLiteFloat64:
       return NPY_FLOAT64;
     case kTfLiteInt32:
diff --git a/python/tflite_micro/pypi_build.dockerfile b/python/tflite_micro/pypi_build.dockerfile
new file mode 100644
index 0000000..a2ac320
--- /dev/null
+++ b/python/tflite_micro/pypi_build.dockerfile
@@ -0,0 +1,16 @@
+# Use the Python Packaging Authority's reference build environment
+# for binary extensions. Binary extensions are typically built and distributed
+# for each target Python version and OS platform. The reference build
+# environment contains Python installations for each version, and a C/C++
+# toolchain specified for maximum compatibility among x86_64 Linux paltforms.
+FROM quay.io/pypa/manylinux_2_28_x86_64
+
+# Install bazel (via bazelisk)
+ENV BAZELISK=https://github.com/bazelbuild/bazelisk/releases/download/v1.18.0/bazelisk-linux-amd64
+ENV BAZEL=/usr/local/bin/bazel
+RUN curl --output $BAZEL --location $BAZELISK && chmod 755 $BAZEL
+
+# Append the location of the C/C++ toolchain to the default PATH, where
+# bazel expects to find it. The reference environment provides the location
+# (typically somewhere under /opt) in DEVTOOLSET_ROOTPATH.
+RUN echo "PATH="${PATH}:/${DEVTOOLSET_ROOTPATH}"" >>/etc/environment
diff --git a/python/tflite_micro/pypi_build.sh b/python/tflite_micro/pypi_build.sh
new file mode 100755
index 0000000..0581225
--- /dev/null
+++ b/python/tflite_micro/pypi_build.sh
@@ -0,0 +1,119 @@
+#!/bin/sh
+
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+OUT_DIR_DEFAULT=bazel-pypi-out
+
+USAGE="$(basename $0) <python-tag> [<output-directory>]
+
+Build a Python wheel for public release to PyPI using a special Docker build
+container. Uses bazel, but does not pollute the WORKSPACE's default cache.
+
+<python-tag> must be one of the supported interpreters:
+   cp310
+   cp311
+
+<output-directory> defaults to $OUT_DIR_DEFAULT.
+"
+
+case "$1" in
+    cp310|cp311)
+        PY_TAG=$1
+        OUTDIR=$(realpath ${2:-$OUT_DIR_DEFAULT})
+        mkdir -p $OUTDIR
+        break
+        ;;
+    *)
+        echo usage: "$USAGE" >&2
+        exit 1
+esac
+
+SRCDIR=$(realpath .)
+if ! test -f $SRCDIR/WORKSPACE; then
+    echo "error: must run from the top of the source tree" >&2
+    exit 1
+fi
+
+# Remove Bazel's workspace symlinks so they'll be rewritten below, pointing into
+# OUTDIR.
+find . -maxdepth 1 -type l -name bazel-\* | xargs rm -f
+
+# Build the Docker image from its source file. Don't pollute the public list of
+# images by tagging; just use the image's ID.
+DOCKERFILE=python/tflite_micro/pypi_build.dockerfile
+IMAGE_ID_FILE=$OUTDIR/image-id
+docker build - --iidfile $IMAGE_ID_FILE <$DOCKERFILE
+IMAGE_ID=$(cat $IMAGE_ID_FILE)
+
+# Build the Python package within an ephemeral container.
+docker run \
+    --rm \
+    --interactive \
+    --mount type=bind,source=$SRCDIR,destination=$SRCDIR \
+    --mount type=bind,source=$OUTDIR,destination=$OUTDIR \
+    --workdir $SRCDIR \
+    --env USER=$(id -un) \
+    $IMAGE_ID \
+    /bin/bash -s -e -x -u \
+<<EOF
+    # Setup the Python compatibility tags. The PY_ABI always matches the Python
+    # interpreter tag. The platform tag is supplied by the build image in the
+    # environment variable AUDITWHEEL_PLAT.
+    PY_ABI=$PY_TAG
+    PY_PLATFORM=\$AUDITWHEEL_PLAT
+    PY_COMPATIBILITY=${PY_TAG}_\${PY_ABI}_\${PY_PLATFORM}
+
+    # Link the desired Python version into the PATH, where bazel will find it.
+    # The build image contains many different Python versions as options.
+    ln -sf /opt/python/$PY_TAG-$PY_TAG/bin/* /usr/bin
+
+    # Bazelisk fails if it can't check HOME for a .rc file., and pip (in
+    # :whl_test) installation of some dependencies (e.g., wrapt) expects HOME.
+    export HOME=$OUTDIR
+
+    # Bazelisk, bazel, and pip all need a writable cache directory.
+    export XDG_CACHE_HOME=$OUTDIR/cache
+
+    # Relocate the bazel root to keep the cache used for each Python toolchain
+    # separate. Drop root privledges and run as the invoking user.
+    call_bazel() {
+        setpriv --reuid=$(id -u) --regid=$(id -g) --clear-groups \
+            bazel \
+                --output_user_root=$OUTDIR/$PY_TAG-out \
+                "\$@" \
+                --action_env=HOME `# help setuptools find HOME in container` \
+                --action_env=USER `# bazel reads USER via whoami` \
+                --action_env=XDG_CACHE_HOME `# locate pip's cache inside OUTDIR`
+    }
+
+    # Build the wheel via bazel, using the Python compatibility tag matching the
+    # build environment.
+    call_bazel build //python/tflite_micro:whl.dist \
+        --//python/tflite_micro:compatibility_tag=\$PY_COMPATIBILITY
+
+    # Test, in the container environment.
+    call_bazel test //python/tflite_micro:whl_test \
+            --//python/tflite_micro:compatibility_tag=\$PY_COMPATIBILITY
+EOF
+
+# Make the output directory tree writable so it can be removed easily by the
+# user with `rm -rf $OUTDIR`. Bazel leaves it write-protected.
+chmod -R +w $OUTDIR
+
+# Copy the generated wheel file to the root of the $OUTDIR.
+cp bazel-bin/python/tflite_micro/whl_dist/*.whl $OUTDIR
+echo "Output:\n$(ls $OUTDIR/*.whl)"
diff --git a/python/tflite_micro/pypi_upload.sh b/python/tflite_micro/pypi_upload.sh
new file mode 100755
index 0000000..a5194c4
--- /dev/null
+++ b/python/tflite_micro/pypi_upload.sh
@@ -0,0 +1,57 @@
+#!/bin/sh
+
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+USAGE="$(basename $0) [--test-pypi] <whl>...
+
+Upload the given Python wheels to PyPI using the program twine. Requires an
+authentication token in the environment variable TWINE_PASSWORD. TWINE_USERNAME
+is set to \`__token__\` if not set in the environment.
+"
+
+die () {
+    echo "$*" >&2
+    exit 1
+}
+
+case "$1" in
+    --test-pypi)
+        export TWINE_REPOSITORY=testpypi
+        shift
+        ;;
+    -h|--help)
+        echo "$USAGE"
+        exit
+esac
+
+if [ ! "$#" -ge 1 ]; then
+    die "$USAGE"
+fi
+
+if [ ! -x $(command -v twine) ]; then
+    die "error: twine not found. On Debian and derivatives, try \`apt install twine\`."
+fi
+
+if [ ! "$TWINE_PASSWORD" ]; then
+    die "error: TWINE_PASSWORD is not set"
+fi
+
+: ${TWINE_USERNAME:="__token__"}
+
+export TWINE_PASSWORD
+export TWINE_USERNAME
+twine upload "$@"
diff --git a/python/tflite_micro/python_ops_resolver.cc b/python/tflite_micro/python_ops_resolver.cc
index a47a261..f5d6e63 100644
--- a/python/tflite_micro/python_ops_resolver.cc
+++ b/python/tflite_micro/python_ops_resolver.cc
@@ -28,6 +28,7 @@
   AddArgMin();
   AddAssignVariable();
   AddAveragePool2D();
+  AddBatchMatMul();
   AddBatchToSpaceNd();
   AddBroadcastArgs();
   AddBroadcastTo();
@@ -45,8 +46,9 @@
   AddDequantize();
   AddDetectionPostprocess();
   AddDiv();
-  AddEnergy();
   AddElu();
+  AddEmbeddingLookup();
+  AddEnergy();
   AddEqual();
   AddEthosU();
   AddExp();
@@ -55,8 +57,8 @@
   AddFill();
   AddFilterBank();
   AddFilterBankLog();
-  AddFilterBankSquareRoot();
   AddFilterBankSpectralSubtraction();
+  AddFilterBankSquareRoot();
   AddFloor();
   AddFloorDiv();
   AddFloorMod();
@@ -75,11 +77,11 @@
   AddLess();
   AddLessEqual();
   AddLog();
+  AddLogSoftmax();
   AddLogicalAnd();
   AddLogicalNot();
   AddLogicalOr();
   AddLogistic();
-  AddLogSoftmax();
   AddMaxPool2D();
   AddMaximum();
   AddMean();
@@ -89,6 +91,7 @@
   AddNeg();
   AddNotEqual();
   AddOverlapAdd();
+  AddPCAN();
   AddPack();
   AddPad();
   AddPadV2();
@@ -117,8 +120,8 @@
   AddSquare();
   AddSquaredDifference();
   AddSqueeze();
-  AddStridedSlice();
   AddStacker();
+  AddStridedSlice();
   AddSub();
   AddSum();
   AddSvdf();
diff --git a/python/tflite_micro/runtime.py b/python/tflite_micro/runtime.py
index 06a62b0..fbf2f20 100644
--- a/python/tflite_micro/runtime.py
+++ b/python/tflite_micro/runtime.py
@@ -14,15 +14,64 @@
 # ==============================================================================
 """Python package for TFLM Python Interpreter"""
 
+import enum
 import os
-
-from tflite_micro.python.tflite_micro import _runtime
 from tflite_micro.tensorflow.lite.tools import flatbuffer_utils
+from tflite_micro.python.tflite_micro import _runtime
+
+
+class InterpreterConfig(enum.Enum):
+  """There are two mutually exclusive types of way you could use the TFLM python
+
+  interpreter, this enum is made so that users can clearly choose between the
+  two
+  different usage method for the interpreter.
+
+  The first default way is kRecordingAllocation where all memory usage by the
+  interpreter is recorded on inference. When using this config the GetTensor()
+  api is disabled by the interpreter since this interpreter configuration
+  doesn’t
+  guarantee that the valid data for all tensors is available post inference.
+
+  The second way is kPreserveAllTensors where the GetTensor() api is disabled by
+  the interpreter since this interpreter configuration doesn’t guarantee that
+  the
+  valid data for all tensors is available post inference. But the memory usage
+  by
+  the interpreter won’t be recorded on inference.
+
+  Usage:
+
+  default_interpreter = Interpreter(…
+        intrepreter_config=InterpreterConfig.kAllocationRecording)
+
+  preserve_interpreter = Interpreter(…
+        intrepreter_config=InterpreterConfig.kPreserveAllTensors)
+  """
+
+  kAllocationRecording = 0
+  kPreserveAllTensors = 1
+
+
+#TODO(b/297118768): Once Korko Docker contrainer for ubuntu x86 has imutabledict
+# added to it, this should be turned into an immutabledict.
+_ENUM_TRANSLATOR = {
+    InterpreterConfig.kAllocationRecording:
+    (_runtime.PythonInterpreterConfig.kAllocationRecording),
+    InterpreterConfig.kPreserveAllTensors:
+    (_runtime.PythonInterpreterConfig.kPreserveAllTensors),
+}
 
 
 class Interpreter(object):
 
-  def __init__(self, model_data, custom_op_registerers, arena_size):
+  def __init__(
+      self,
+      model_data,
+      custom_op_registerers,
+      arena_size,
+      intrepreter_config=InterpreterConfig.kAllocationRecording,
+  ):
     if model_data is None:
       raise ValueError("Model must not be None")
 
@@ -33,20 +82,28 @@
     # This is a heuristic to ensure that the arena is sufficiently sized.
     if arena_size is None:
       arena_size = len(model_data) * 10
-
     # Some models make use of resource variables ops, get the count here
     num_resource_variables = flatbuffer_utils.count_resource_variables(
         model_data)
     print("Number of resource variables the model uses = ",
           num_resource_variables)
 
-    self._interpreter = _runtime.InterpreterWrapper(model_data,
-                                                    custom_op_registerers,
-                                                    arena_size,
-                                                    num_resource_variables)
+    self._interpreter = _runtime.InterpreterWrapper(
+        model_data,
+        custom_op_registerers,
+        arena_size,
+        num_resource_variables,
+        _ENUM_TRANSLATOR[intrepreter_config],
+    )
 
   @classmethod
-  def from_file(self, model_path, custom_op_registerers=[], arena_size=None):
+  def from_file(
+      self,
+      model_path,
+      custom_op_registerers=[],
+      arena_size=None,
+      intrepreter_config=InterpreterConfig.kAllocationRecording,
+  ):
     """Instantiates a TFLM interpreter from a model .tflite filepath.
 
     Args:
@@ -65,10 +122,21 @@
     with open(model_path, "rb") as f:
       model_data = f.read()
 
-    return Interpreter(model_data, custom_op_registerers, arena_size)
+    return Interpreter(
+        model_data,
+        custom_op_registerers,
+        arena_size,
+        intrepreter_config,
+    )
 
   @classmethod
-  def from_bytes(self, model_data, custom_op_registerers=[], arena_size=None):
+  def from_bytes(
+      self,
+      model_data,
+      custom_op_registerers=[],
+      arena_size=None,
+      intrepreter_config=InterpreterConfig.kAllocationRecording,
+  ):
     """Instantiates a TFLM interpreter from a model in byte array.
 
     Args:
@@ -82,7 +150,12 @@
       An Interpreter instance
     """
 
-    return Interpreter(model_data, custom_op_registerers, arena_size)
+    return Interpreter(
+        model_data,
+        custom_op_registerers,
+        arena_size,
+        intrepreter_config,
+    )
 
   def print_allocations(self):
     """Invoke the RecordingMicroAllocator to print the arena usage.
@@ -157,6 +230,9 @@
 
     return self._interpreter.GetOutputTensor(index)
 
+  def GetTensor(self, tensor_index, subgraph_index):
+    return self._interpreter.GetTensor(tensor_index, subgraph_index)
+
   def get_input_details(self, index):
     """Get input tensor information
 
diff --git a/python/tflite_micro/runtime_test.py b/python/tflite_micro/runtime_test.py
index 6a127fc..2a9003c 100644
--- a/python/tflite_micro/runtime_test.py
+++ b/python/tflite_micro/runtime_test.py
@@ -21,13 +21,78 @@
 
 import gc
 import weakref
+
 import numpy as np
 import tensorflow as tf
 
 from tensorflow.python.framework import test_util
 from tensorflow.python.platform import test
-from tflite_micro.tensorflow.lite.micro.testing import generate_test_models
 from tflite_micro.python.tflite_micro import runtime
+from tflite_micro.tensorflow.lite.micro.examples.recipes import add_four_numbers
+from tflite_micro.tensorflow.lite.micro.testing import generate_test_models
+
+
+class PeserveAllTensorsTest(test_util.TensorFlowTestCase):
+
+  def AddFourNumbersTestInterpreterMaker(self, inputs):
+    """Returns a tflm interpreter with a simple model that loads 4 numbers loaded
+
+    into it and loads the 4 inputs into the model
+    """
+    model_data = add_four_numbers.generate_model(write_file=False)
+    tflm_interpreter = runtime.Interpreter.from_bytes(
+        model_data,
+        intrepreter_config=runtime.InterpreterConfig.kPreserveAllTensors,
+    )
+    self.assertEqual(len(inputs), 4)
+    tflm_interpreter.set_input(inputs[0], 0)
+    tflm_interpreter.set_input(inputs[1], 1)
+    tflm_interpreter.set_input(inputs[2], 2)
+    tflm_interpreter.set_input(inputs[3], 3)
+    return tflm_interpreter
+
+  def testGetTensorAccuratelyGetsAllTenors(self):
+    """Test checks that GetTensor() returns accurate values for each tensor in the
+
+    model based on inputs of 1 2 3 4 into the AddFourNumbers TfLite model
+    """
+    tflm_interpreter = self.AddFourNumbersTestInterpreterMaker(
+        [[np.float32(1)], [np.float32(2)], [np.float32(3)], [np.float32(4)]])
+
+    tflm_interpreter.invoke()
+
+    tflm_output = tflm_interpreter.get_output(0)
+    self.assertEqual(tflm_output[0].astype("float32"), 10.0)
+    self.assertEqual(tflm_interpreter.GetTensor(0, 0)["tensor_data"][0], 1.0)
+    self.assertEqual(tflm_interpreter.GetTensor(1, 0)["tensor_data"][0], 2.0)
+    self.assertEqual(tflm_interpreter.GetTensor(2, 0)["tensor_data"][0], 3.0)
+    self.assertEqual(tflm_interpreter.GetTensor(3, 0)["tensor_data"][0], 4.0)
+    self.assertEqual(tflm_interpreter.GetTensor(4, 0)["tensor_data"][0], 7.0)
+    self.assertEqual(tflm_interpreter.GetTensor(5, 0)["tensor_data"][0], 9.0)
+    self.assertEqual(tflm_interpreter.GetTensor(6, 0)["tensor_data"][0], 10.0)
+
+  def testGetTensorAllUniqueTensors(self):
+    """Test checks that GetTensor() returns all the tensors in the model.
+
+    Due to the values used as inputs all the Tensors have unique data values so
+    this test confirms that this is the case.
+    """
+    tflm_interpreter = self.AddFourNumbersTestInterpreterMaker(
+        [[np.float32(1)], [np.float32(2)], [np.float32(3)], [np.float32(4)]])
+
+    tflm_interpreter.invoke()
+    tensors = [
+        tflm_interpreter.GetTensor(0, 0)["tensor_data"][0],
+        tflm_interpreter.GetTensor(1, 0)["tensor_data"][0],
+        tflm_interpreter.GetTensor(2, 0)["tensor_data"][0],
+        tflm_interpreter.GetTensor(3, 0)["tensor_data"][0],
+        tflm_interpreter.GetTensor(4, 0)["tensor_data"][0],
+        tflm_interpreter.GetTensor(5, 0)["tensor_data"][0],
+        tflm_interpreter.GetTensor(6, 0)["tensor_data"][0],
+    ]
+
+    # Check that all tensors are unique
+    self.assertEqual(len(set(tensors)), 7)
 
 
 class ConvModelTests(test_util.TensorFlowTestCase):
diff --git a/python/tflite_micro/signal/BUILD b/python/tflite_micro/signal/BUILD
index 2bf5a94..0fce266 100644
--- a/python/tflite_micro/signal/BUILD
+++ b/python/tflite_micro/signal/BUILD
@@ -1,8 +1,10 @@
+load("@rules_python//python:defs.bzl", "py_library", "py_test")
 load("//python/tflite_micro/signal:tflm_signal.bzl", "py_tflm_signal_library")
 load("//tensorflow:extra_rules.bzl", "tflm_signal_friends")
 load("@tflm_pip_deps//:requirements.bzl", "requirement")
 
 package(
+    default_visibility = [":__subpackages__"],
     licenses = ["notice"],
 )
 
@@ -21,6 +23,7 @@
         ":filter_bank_ops_cc",
         ":framer_op_cc",
         ":overlap_add_op_cc",
+        ":pcan_op_cc",
         ":stacker_op_cc",
         ":window_op_cc",
     ],
@@ -33,7 +36,7 @@
         "ops/__init__.py",
     ],
     srcs_version = "PY3",
-    visibility = ["//python/tflite_micro/signal/utils:__subpackages__"],
+    visibility = ["//visibility:public"],
     deps = [
         ":delay_op",
         ":energy_op",
@@ -41,6 +44,7 @@
         ":filter_bank_ops",
         ":framer_op",
         ":overlap_add_op",
+        ":pcan_op",
         ":stacker_op",
         ":window_op",
     ],
@@ -53,6 +57,9 @@
     cc_op_kernels = [
         "//signal/tensorflow_core/kernels:delay_kernel",
     ],
+    deps = [
+        "//python/tflite_micro/signal/utils:util",
+    ],
 )
 
 py_test(
@@ -63,9 +70,8 @@
     srcs_version = "PY3",
     deps = [
         ":delay_op",
-        "//python/tflite_micro/signal/utils:util",
         requirement("numpy"),
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
     ],
 )
 
@@ -76,6 +82,9 @@
     cc_op_kernels = [
         "//signal/tensorflow_core/kernels:energy_kernel",
     ],
+    deps = [
+        "//python/tflite_micro/signal/utils:util",
+    ],
 )
 
 py_test(
@@ -89,9 +98,8 @@
     srcs_version = "PY3",
     deps = [
         ":energy_op",
-        "//python/tflite_micro/signal/utils:util",
         requirement("numpy"),
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
     ],
 )
 
@@ -102,6 +110,9 @@
     cc_op_kernels = [
         "//signal/tensorflow_core/kernels:fft_kernel",
     ],
+    deps = [
+        "//python/tflite_micro/signal/utils:util",
+    ],
 )
 
 py_test(
@@ -115,9 +126,8 @@
     srcs_version = "PY3",
     deps = [
         ":fft_ops",
-        "//python/tflite_micro/signal/utils:util",
         requirement("numpy"),
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
     ],
 )
 
@@ -130,6 +140,7 @@
     ],
     deps = [
         "//python/tflite_micro/signal/utils:freq_to_mel",
+        "//python/tflite_micro/signal/utils:util",
     ],
 )
 
@@ -149,9 +160,8 @@
     srcs_version = "PY3",
     deps = [
         ":filter_bank_ops",
-        "//python/tflite_micro/signal/utils:util",
         requirement("numpy"),
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
     ],
 )
 
@@ -162,6 +172,9 @@
     cc_op_kernels = [
         "//signal/tensorflow_core/kernels:framer_kernel",
     ],
+    deps = [
+        "//python/tflite_micro/signal/utils:util",
+    ],
 )
 
 py_test(
@@ -175,9 +188,8 @@
     srcs_version = "PY3",
     deps = [
         ":framer_op",
-        "//python/tflite_micro/signal/utils:util",
         requirement("numpy"),
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
     ],
 )
 
@@ -188,6 +200,9 @@
     cc_op_kernels = [
         "//signal/tensorflow_core/kernels:overlap_add_kernel",
     ],
+    deps = [
+        "//python/tflite_micro/signal/utils:util",
+    ],
 )
 
 py_test(
@@ -198,10 +213,42 @@
     srcs_version = "PY3",
     deps = [
         ":overlap_add_op",
-        "//python/tflite_micro/signal/utils:util",
         "@absl_py//absl/testing:parameterized",
         requirement("numpy"),
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
+    ],
+)
+
+py_tflm_signal_library(
+    name = "pcan_op",
+    srcs = ["ops/pcan_op.py"],
+    cc_op_defs = ["//signal/tensorflow_core/ops:pcan_op"],
+    cc_op_kernels = [
+        "//signal/tensorflow_core/kernels:pcan_kernel",
+    ],
+    deps = [
+        "//python/tflite_micro/signal/utils:util",
+        "//python/tflite_micro/signal/utils:wide_dynamic_func_lut",
+    ],
+)
+
+py_test(
+    name = "pcan_op_test",
+    srcs = ["ops/pcan_op_test.py"],
+    data = [
+        "//python/tflite_micro/signal/ops/testdata:pcan_op_test1.txt",
+    ],
+    python_version = "PY3",
+    srcs_version = "PY3",
+    tags = [
+        "noasan",
+        "nomsan",
+        "noubsan",
+    ],
+    deps = [
+        ":pcan_op",
+        requirement("numpy"),
+        requirement("tensorflow"),
     ],
 )
 
@@ -212,6 +259,9 @@
     cc_op_kernels = [
         "//signal/tensorflow_core/kernels:stacker_kernel",
     ],
+    deps = [
+        "//python/tflite_micro/signal/utils:util",
+    ],
 )
 
 py_test(
@@ -225,9 +275,8 @@
     srcs_version = "PY3",
     deps = [
         ":stacker_op",
-        "//python/tflite_micro/signal/utils:util",
         requirement("numpy"),
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
     ],
 )
 
@@ -238,6 +287,9 @@
     cc_op_kernels = [
         "//signal/tensorflow_core/kernels:window_kernel",
     ],
+    deps = [
+        "//python/tflite_micro/signal/utils:util",
+    ],
 )
 
 py_test(
@@ -250,8 +302,7 @@
     srcs_version = "PY3",
     deps = [
         ":window_op",
-        "//python/tflite_micro/signal/utils:util",
         requirement("numpy"),
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
     ],
 )
diff --git a/python/tflite_micro/signal/ops/pcan_op.py b/python/tflite_micro/signal/ops/pcan_op.py
new file mode 100644
index 0000000..753e76b
--- /dev/null
+++ b/python/tflite_micro/signal/ops/pcan_op.py
@@ -0,0 +1,70 @@
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+import tensorflow as tf
+from tflite_micro.python.tflite_micro.signal.utils import util
+from tflite_micro.python.tflite_micro.signal.utils import wide_dynamic_func_lut_wrapper
+
+gen_pcan_op = util.load_custom_op("pcan_op.so")
+
+PCAN_SNR_BITS = 12
+
+
+def _pcan_wrapper(pcan_fn, default_name):
+  """Wrapper around gen_pcan.pcan*."""
+
+  def _pcan(input_tensor,
+            noise_estimate,
+            strength,
+            offset,
+            gain_bits,
+            smoothing_bits,
+            input_correction_bits,
+            name=default_name):
+    with tf.name_scope(name) as scope:
+      input_tensor = tf.convert_to_tensor(input_tensor, dtype=tf.uint32)
+      noise_estimate = tf.convert_to_tensor(noise_estimate, dtype=tf.uint32)
+
+      input_bits = smoothing_bits - input_correction_bits
+      snr_shift = gain_bits - input_correction_bits - PCAN_SNR_BITS
+      if snr_shift < 1:
+        raise ValueError("SNR shift must be non-negative: %d" % snr_shift)
+
+      lut = wide_dynamic_func_lut_wrapper.wide_dynamic_func_lut(
+          strength, offset, input_bits, gain_bits)
+
+      lut_tensor = tf.convert_to_tensor(lut, dtype=tf.int16)
+
+      dim_list = input_tensor.shape.as_list()
+      if len(dim_list) != 1:
+        raise ValueError("Input tensor must have a rank of 1")
+      dim_list = noise_estimate.shape.as_list()
+      if len(dim_list) != 1:
+        raise ValueError("Noise estimate must have a rank of 1")
+
+      snr_shift = 6
+      return pcan_fn(input_tensor,
+                     noise_estimate,
+                     lut_tensor,
+                     snr_shift=snr_shift,
+                     name=scope)
+
+  return _pcan
+
+
+# TODO(b/286250473): change back name after name clash resolved
+pcan = _pcan_wrapper(gen_pcan_op.signal_pcan, "signal_pcan")
+
+tf.no_gradient("pcan")
diff --git a/python/tflite_micro/signal/ops/pcan_op_test.py b/python/tflite_micro/signal/ops/pcan_op_test.py
new file mode 100644
index 0000000..1400bf3
--- /dev/null
+++ b/python/tflite_micro/signal/ops/pcan_op_test.py
@@ -0,0 +1,83 @@
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+import os
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.platform import resource_loader
+from tflite_micro.python.tflite_micro.signal.ops import pcan_op
+from tflite_micro.python.tflite_micro.signal.utils import util
+
+
+class PcanOpTest(tf.test.TestCase):
+
+  _PREFIX_PATH = resource_loader.get_path_to_datafile('')
+
+  def GetResource(self, filepath):
+    full_path = os.path.join(self._PREFIX_PATH, filepath)
+    with open(full_path, 'rt') as f:
+      file_text = f.read()
+    return file_text
+
+  def SinglePcanOpTest(self, filename):
+    lines = self.GetResource(filename).splitlines()
+    args = lines[0].split()
+    strength = float(args[0])
+    offset = float(args[1])
+    gain_bits = int(args[2])
+    smoothing_bits = int(args[3])
+    input_correction_bits = int(args[4])
+
+    func = tf.function(pcan_op.pcan)
+    channel_num = len(lines[1].split())
+
+    concrete_function = func.get_concrete_function(
+        tf.TensorSpec(channel_num, dtype=tf.uint32),
+        tf.TensorSpec(channel_num, dtype=tf.uint32),
+        strength=strength,
+        offset=offset,
+        gain_bits=gain_bits,
+        smoothing_bits=smoothing_bits,
+        input_correction_bits=input_correction_bits)
+    interpreter = util.get_tflm_interpreter(concrete_function, func)
+
+    # Read lines in pairs <input, noise_estimate, expected>
+    for i in range(1, len(lines), 3):
+      in_frame = np.array([int(j) for j in lines[i + 0].split()],
+                          dtype='uint32')
+      noise_estimate = np.array([int(j) for j in lines[i + 1].split()],
+                                dtype='uint32')
+      output_expected = np.array([int(j) for j in lines[i + 2].split()],
+                                 dtype='uint32')
+      # TFLM
+      interpreter.set_input(in_frame, 0)
+      interpreter.set_input(noise_estimate, 1)
+      interpreter.invoke()
+      output = interpreter.get_output(0)
+      self.assertAllEqual(output_expected, output)
+      # TF
+      output = self.evaluate(
+          pcan_op.pcan(in_frame, noise_estimate, strength, offset, gain_bits,
+                       smoothing_bits, input_correction_bits))
+      self.assertAllEqual(output_expected, output)
+
+  def testPcanOp(self):
+    self.SinglePcanOpTest('testdata/pcan_op_test1.txt')
+
+
+if __name__ == '__main__':
+  tf.test.main()
diff --git a/python/tflite_micro/signal/ops/testdata/BUILD b/python/tflite_micro/signal/ops/testdata/BUILD
index 87fd9f2..4c8d7bc 100644
--- a/python/tflite_micro/signal/ops/testdata/BUILD
+++ b/python/tflite_micro/signal/ops/testdata/BUILD
@@ -16,6 +16,7 @@
     "filter_bank_square_root_test1.txt",
     "filter_bank_spectral_subtraction_test1.txt",
     "framer_test1.txt",
+    "pcan_op_test1.txt",
     "rfft_test1.txt",
     "stacker_test1.txt",
     "window_test1.txt",
diff --git a/python/tflite_micro/signal/ops/testdata/pcan_op_test1.txt b/python/tflite_micro/signal/ops/testdata/pcan_op_test1.txt
new file mode 100644
index 0000000..68da771
--- /dev/null
+++ b/python/tflite_micro/signal/ops/testdata/pcan_op_test1.txt
@@ -0,0 +1,1348 @@
+0.950000 80.000000 21 10 3
+286 298 305 291 290 279 273 257 250 240 240 233 234 230 221 205 183 159 156 188 239 298 345 374 380 369 359 364 372 354 302 243 194 135 64 72 171 245 277 304
+7310 18308 7796 17878 7413 17141 6978 15789 6390 14745 6135 14314 5981 14130 5649 12594 4677 9768 3987 11550 6109 18308 8819 22977 9713 22670 9176 22363 9509 21748 7719 14929 4959 8294 1636 4423 4371 15052 7080 18677
+1301 836 1354 827 1312 811 1263 779 1192 753 1160 743 1140 738 1096 698 956 607 845 667 1157 836 1461 912 1546 908 1496 904 1527 895 1346 758 999 548 378 344 908 761 1274 843
+6 8 8 11 10 8 5 4 4 2 6 8 11 11 10 11 6 7 5 6 11 8 9 8 7 6 6 11 14 12 11 11 11 9 8 9 9 11 11 5
+7280 17701 7805 17481 7483 16604 6931 15087 6332 13983 6135 13946 6112 13958 5763 12514 4713 9612 4015 11225 6237 17701 8828 22089 9649 21678 9100 21697 9629 21180 7807 14709 5116 8349 1799 4710 4491 14824 7184 17863
+3 2 5 4 8 2 2 0 1 0 3 3 12 5 10 6 4 3 3 2 12 2 6 1 3 1 2 3 13 4 10 5 13 6 11 9 10 5 10 0
+8 5 9 7 12 10 9 7 5 4 4 6 8 8 12 8 8 10 9 8 16 12 10 10 9 7 5 12 9 9 12 8 10 11 10 11 10 9 9 7
+7302 16946 7840 16862 7602 16222 6988 14611 6301 13389 6084 13477 6163 13612 5925 12254 4799 9649 4144 11043 6490 17376 8863 21378 9638 20807 9000 21132 9618 20462 7918 14317 5243 8523 2009 5103 4634 14487 7234 17221
+5 0 6 1 12 4 7 2 2 0 1 1 6 3 15 3 7 7 10 3 24 5 7 2 5 1 1 4 5 2 12 2 11 9 17 13 12 3 7 1
+11 14 8 17 14 7 9 8 3 5 7 6 8 10 5 3 4 14 14 6 11 11 11 11 4 11 7 7 9 9 10 14 15 11 11 13 10 10 11 6
+7400 16789 7848 16894 7770 15678 7043 14225 6220 12892 6111 13037 6213 13409 5904 11703 4781 9930 4398 10749 6609 17009 8922 20771 9499 20234 8954 20294 9607 19787 7975 14318 5495 8687 2240 5595 4773 14232 7334 16556
+10 7 5 11 16 2 7 3 0 1 4 1 6 5 2 0 1 13 24 2 11 4 9 3 1 3 3 1 5 2 8 9 24 9 20 18 11 4 10 1
+6 8 3 5 6 5 7 8 7 8 10 9 10 6 8 7 7 12 10 8 12 8 13 9 7 12 9 9 8 13 10 12 17 12 8 9 8 9 10 8
+7368 16273 7728 16187 7729 15044 7046 13863 6243 12610 6214 12807 6313 12973 5961 11430 4840 10071 4543 10595 6750 16480 9031 20077 9440 19757 8960 19629 9571 19398 8031 14196 5792 8903 2388 5812 4858 13931 7406 16054
+3 2 0 1 3 1 4 3 4 3 10 4 9 1 6 2 5 9 12 4 13 2 12 2 3 4 6 2 4 5 8 6 30 10 10 8 7 3 8 2
+7 9 11 5 5 7 10 6 5 7 6 4 7 6 5 10 9 11 10 11 11 6 9 6 10 8 11 12 9 12 11 13 15 11 10 9 8 11 11 9
+7363 15849 7816 15523 7663 14571 7125 13399 6214 12283 6212 12284 6334 12563 5940 11358 4949 10142 4685 10635 6862 15859 9035 19241 9459 19063 9017 19188 9562 18971 8111 14142 6030 9044 2584 6016 4941 13770 7502 15643
+4 3 10 1 2 2 9 1 2 2 3 0 4 1 2 6 9 8 12 7 11 1 6 1 7 2 9 4 5 4 9 8 23 9 16 8 7 5 10 3
+7 15 13 7 6 7 6 5 10 9 4 8 7 3 9 11 7 9 5 5 11 10 6 5 8 8 10 14 9 7 8 11 13 13 16 10 8 8 5 5
+7358 15819 7953 15021 7625 14126 7100 12902 6314 12098 6159 12038 6354 11993 6021 11352 5004 10086 4695 10304 6971 15521 8962 18393 9427 18410 9047 18896 9553 18262 8113 13969 6211 9300 2928 6269 5022 13435 7442 15011
+4 9 14 2 3 2 3 1 9 4 1 3 4 0 8 7 5 5 3 1 11 4 2 0 4 2 7 6 5 1 5 5 17 12 39 10 7 3 2 1
+5 7 5 6 5 5 7 9 8 7 4 4 4 4 7 10 12 9 5 6 8 7 7 6 7 9 6 8 10 10 10 14 10 13 11 11 8 9 7 6
+7302 15299 7882 14488 7562 13585 7101 12680 6360 11802 6107 11561 6297 11519 6049 11285 5185 10033 4705 10054 7001 15019 8917 17658 9370 17858 8974 18253 9570 17780 8166 13991 6311 9540 3136 6568 5101 13181 7435 14479
+2 2 2 1 2 1 4 4 6 2 1 0 1 0 5 6 16 5 3 2 5 2 3 1 3 2 2 2 7 3 8 9 9 12 18 11 7 4 4 1
+3 1 3 5 4 5 10 8 6 8 3 3 2 4 6 6 6 8 7 5 7 8 8 5 11 10 10 11 11 6 8 10 9 11 12 9 10 10 9 10
+7196 14442 7761 13925 7475 13077 7179 12410 6354 11585 6031 11051 6190 11073 6051 10976 5208 9922 4766 9757 7005 14609 8898 16905 9417 17400 9005 17833 9612 17081 8166 13765 6383 9643 3364 6726 5229 13004 7479 14224
+0 0 0 1 1 1 9 3 3 3 0 0 0 0 3 2 4 4 5 1 4 2 4 0 8 3 7 4 8 1 5 4 7 8 20 7 11 5 7 4
+4 8 6 3 5 9 6 4 6 7 6 8 4 3 7 10 8 7 4 3 10 8 13 10 9 10 9 12 9 7 7 10 11 8 9 11 7 11 7 7
+7118 14067 7720 13273 7416 12845 7153 11911 6348 11319 6033 10879 6137 10592 6078 10931 5282 9756 4749 9355 7085 14223 9008 16505 9411 16970 9010 17500 9602 16486 8141 13553 6504 9555 3510 6998 5277 12899 7471 13800
+1 3 3 0 2 4 3 0 3 2 3 4 1 0 5 6 7 3 1 0 9 3 12 4 5 3 6 5 5 1 3 4 11 4 11 11 5 6 4 2
+7 13 9 9 8 8 5 4 5 5 4 6 8 5 5 6 6 6 7 5 7 8 12 9 4 9 8 6 6 8 5 9 10 9 9 10 9 11 8 6
+7119 14021 7757 13029 7435 12565 7102 11442 6317 10947 5984 10594 6188 10263 6054 10643 5303 9539 4809 9100 7087 13861 9089 16067 9278 16504 8989 16818 9515 15988 8065 13292 6597 9534 3652 7192 5375 12800 7488 13340
+4 8 6 4 5 3 2 0 2 1 1 2 6 1 2 2 4 2 5 1 4 3 10 3 1 3 4 1 2 2 2 4 9 5 11 9 9 6 5 1
+6 7 10 8 11 14 6 7 8 8 6 6 12 8 6 7 7 5 6 6 11 11 8 10 9 9 11 9 10 12 12 9 14 14 14 16 11 7 8 7
+7094 13609 7818 12738 7530 12671 7078 11185 6363 10781 5987 10327 6340 10138 6056 10434 5349 9273 4842 8922 7191 13705 9066 15717 9276 16066 9045 16361 9533 15766 8170 13047 6790 9822 3918 7743 5522 12462 7505 12969
+3 2 8 3 10 10 3 2 6 4 3 2 14 4 3 3 5 1 4 2 10 5 4 4 5 3 9 3 7 6 11 4 18 13 26 21 13 2 5 2
+7 7 3 4 4 6 6 8 8 5 5 6 8 6 4 7 6 9 10 8 6 9 14 11 8 10 9 14 10 9 11 9 11 16 11 9 10 9 6 6
+7095 13222 7699 12219 7444 12279 7054 11005 6408 10441 5965 10076 6386 9898 6007 10238 5368 9269 4976 8878 7164 13435 9197 15449 9248 15716 9049 16239 9550 15373 8247 12817 6901 10215 4101 7831 5639 12267 7471 12559
+4 2 0 0 1 1 3 3 6 1 2 2 6 2 1 3 4 5 11 4 3 4 14 5 4 4 6 7 7 3 9 4 11 17 15 6 10 4 3 1
+15 8 5 6 8 15 15 7 8 7 5 3 5 7 9 9 8 6 5 7 7 10 15 8 6 11 9 11 9 10 8 10 13 14 7 6 7 8 9 10
+7301 12920 7634 11854 7462 12463 7261 10774 6452 10244 5943 9655 6354 9734 6087 10176 5438 9081 4979 8775 7164 13243 9350 15013 9170 15448 9053 15940 9541 15065 8245 12662 7061 10462 4177 7729 5677 12022 7514 12419
+20 3 2 2 5 12 20 3 6 3 2 0 2 3 8 5 7 2 2 3 4 5 16 2 2 5 6 5 5 4 5 5 15 12 6 3 5 3 7 5
+10 9 6 3 5 9 13 11 5 3 10 8 6 4 7 9 9 9 5 5 10 15 7 7 8 8 7 5 8 9 10 11 11 11 12 9 9 10 9 10
+7374 12697 7596 11327 7403 12268 7412 10803 6418 9813 6050 9567 6348 9395 6113 10118 5532 9089 4982 8555 7240 13370 9295 14542 9145 15012 9005 15290 9507 14714 8294 12578 7165 10510 4379 7818 5765 11915 7556 12288
+8 4 3 0 2 4 14 7 2 0 10 4 3 1 4 5 8 6 2 1 8 11 3 2 4 2 3 1 4 3 8 6 10 7 18 6 8 5 7 5
+11 8 7 12 8 11 9 7 4 7 8 7 6 3 4 4 8 9 8 10 9 9 8 12 12 8 8 13 12 6 8 9 11 8 8 8 8 10 11 8
+7471 12426 7585 11384 7422 12207 7457 10584 6360 9654 6103 9423 6342 9015 6062 9756 5598 9096 5062 8656 7289 13120 9267 14406 9223 14602 8984 15171 9576 14199 8291 12376 7267 10370 4474 7840 5825 11814 7648 12042
+10 3 4 8 5 6 7 3 1 3 6 3 3 0 1 1 6 6 7 7 7 4 4 6 10 2 4 7 10 1 5 4 10 4 7 5 6 5 10 3
+14 13 18 9 10 6 6 5 4 6 7 5 3 5 4 4 8 10 8 12 13 10 7 4 5 12 14 9 13 9 12 7 9 13 10 8 12 9 9 9
+7642 12479 7855 11253 7492 11843 7424 10256 6303 9443 6129 9164 6260 8781 6012 9416 5662 9164 5140 8873 7439 12947 9214 13787 9120 14463 9117 14813 9669 13900 8390 12063 7315 10546 4617 7861 5986 11658 7687 11872
+16 9 27 4 8 2 3 1 1 2 4 1 0 1 1 1 6 7 7 10 14 5 3 0 1 6 14 3 11 3 11 2 7 10 12 5 14 4 6 4
+10 12 5 6 8 7 8 9 10 5 7 7 8 7 7 6 7 13 7 13 10 7 7 6 8 12 11 12 13 11 15 15 9 9 9 8 7 7 8 6
+7706 12467 7786 10946 7509 11562 7443 10193 6401 9183 6154 9044 6308 8684 6040 9219 5699 9412 5190 9139 7508 12600 9162 13328 9096 14332 9170 14661 9759 13741 8563 12260 7362 10466 4731 7880 6015 11388 7699 11528
+8 7 2 2 5 2 5 5 9 1 4 3 6 3 5 2 5 12 5 12 8 2 3 1 4 6 8 6 11 5 17 12 7 5 9 5 5 2 5 2
+13 10 6 11 8 7 9 7 7 10 9 5 5 4 7 5 5 6 7 10 16 8 9 7 7 12 12 8 9 10 15 8 10 10 11 8 6 8 9 7
+7845 12333 7745 10965 7526 11298 7487 10011 6420 9246 6230 8808 6278 8408 6068 8973 5684 9215 5239 9205 7729 12335 9163 12958 9047 14209 9247 14272 9745 13530 8732 12015 7433 10452 4894 7898 6018 11196 7736 11266
+14 5 3 7 5 2 7 3 4 7 8 1 2 1 5 1 2 2 5 7 21 3 5 2 3 6 10 3 5 4 17 3 8 6 14 5 3 3 6 2
+8 7 5 11 7 7 6 7 9 7 9 11 6 7 9 8 5 5 7 7 9 10 12 10 9 10 10 11 11 10 14 10 11 11 10 9 8 6 5 5
+7853 12023 7679 10982 7517 11050 7453 9840 6489 9121 6304 8955 6274 8333 6146 8926 5669 8969 5287 9082 7766 12209 9241 12794 9051 13970 9271 14091 9782 13332 8871 11908 7528 10500 5027 7977 6072 10892 7670 10897
+5 2 2 7 4 3 3 3 7 3 8 9 3 3 8 4 2 1 5 3 6 5 10 5 6 4 7 5 8 5 14 5 10 7 11 6 6 2 2 1
+10 8 5 8 6 4 11 9 5 10 11 12 6 5 9 6 4 8 9 14 9 9 12 9 8 6 9 10 9 11 9 13 14 13 10 11 9 9 8 7
+7912 11793 7615 10814 7482 10632 7548 9802 6454 9188 6427 9154 6270 8140 6222 8759 5629 8922 5385 9397 7802 12029 9317 12579 9029 13500 9269 13859 9767 13207 8879 11992 7697 10668 5157 8174 6150 10791 7683 10673
+8 3 2 4 3 1 10 5 2 7 11 10 3 2 8 2 1 4 9 14 6 4 10 4 4 1 5 4 5 6 6 9 16 10 11 9 8 5 5 3
+10 7 8 10 12 8 9 13 7 5 5 5 5 7 8 5 5 5 8 10 9 7 8 7 10 9 9 11 9 8 8 9 11 7 9 11 9 10 9 7
+7970 11515 7629 10779 7601 10485 7589 10012 6471 8943 6394 8911 6241 8081 6271 8540 5616 8693 5455 9447 7837 11737 9288 12254 9059 13242 9267 13703 9753 12906 8861 11825 7786 10458 5258 8359 6226 10757 7721 10462
+8 2 5 6 12 4 6 11 4 1 2 1 2 4 6 1 2 1 7 7 6 2 4 2 7 4 5 5 5 3 4 4 10 3 9 9 8 6 6 3
+8 7 11 10 12 6 11 10 7 10 5 5 10 5 6 8 8 7 8 6 8 9 11 6 6 7 5 12 14 10 9 12 11 10 14 11 8 5 5 7
+7975 11254 7719 10746 7718 10224 7680 10025 6488 9020 6362 8683 6340 7903 6267 8519 5680 8601 5523 9248 7845 11585 9337 11887 8986 12877 9163 13618 9867 12746 8869 11852 7872 10444 5484 8533 6275 10418 7656 10264
+5 2 10 6 12 2 10 6 4 7 2 1 9 2 3 5 6 3 6 2 5 4 8 2 2 2 1 7 13 5 6 8 10 6 21 9 6 1 2 3
+7 14 14 10 12 7 6 5 6 6 5 4 6 9 10 8 9 9 9 5 8 7 9 8 6 8 8 9 10 9 8 7 9 14 12 8 9 9 6 8
+7954 11438 7884 10715 7832 10040 7641 9730 6479 8847 6330 8407 6335 7981 6366 8499 5768 8637 5615 9000 7853 11319 9333 11665 8915 12595 9138 13353 9876 12534 8852 11570 7905 10677 5653 8512 6348 10345 7618 10139
+4 11 16 6 12 3 3 1 3 2 2 1 3 6 9 5 8 6 8 1 5 2 5 3 2 3 4 4 6 4 4 2 6 12 15 5 8 5 3 4
+6 8 7 5 6 6 3 3 7 5 4 4 7 4 5 7 10 7 7 4 7 8 12 8 7 7 6 11 14 11 9 7 9 9 8 10 11 8 6 6
+7908 11243 7866 10379 7789 9806 7526 9330 6496 8623 6274 8148 6355 7747 6334 8419 5879 8548 5653 8705 7835 11131 9406 11456 8871 12269 9063 13227 9987 12457 8861 11305 7937 10589 5716 8615 6470 10215 7581 9899
+3 3 4 1 3 2 0 0 4 1 1 1 4 1 2 3 10 3 5 1 4 3 10 3 3 2 2 6 13 6 6 2 6 5 6 7 11 4 3 2
+5 3 7 10 7 7 11 8 14 16 9 5 9 4 3 6 9 7 5 10 7 5 7 8 7 8 10 9 15 10 8 5 10 11 12 10 11 7 7 7
+7838 10752 7848 10370 7773 9647 7619 9261 6691 9088 6347 7966 6426 7527 6252 8282 5962 8465 5639 8797 7818 10770 9350 11260 8828 12024 9092 12986 10121 12323 8844 10933 7994 10629 5880 8712 6589 10032 7570 9735
+2 0 4 6 4 3 10 4 18 19 8 2 7 1 0 2 8 3 2 7 4 1 3 3 3 3 7 4 15 5 4 1 8 7 15 7 11 3 4 3
+5 7 8 8 13 14 10 7 9 10 8 5 7 7 6 12 10 6 6 10 9 6 7 8 11 10 13 8 10 10 8 12 9 9 15 11 10 9 11 9
+7770 10536 7856 10239 7911 9928 7684 9135 6754 9157 6393 7795 6444 7505 6249 8522 6068 8325 5651 8883 7852 10492 9295 11075 8888 11916 9197 12698 10123 12198 8827 11014 8024 10544 6116 8865 6680 9983 7662 9703
+2 3 5 4 14 13 8 3 7 7 6 2 4 4 3 11 10 2 3 7 6 2 3 3 9 5 12 3 6 5 4 8 6 5 22 9 9 5 10 5
+5 4 6 5 8 5 3 4 5 8 10 7 11 7 7 11 8 9 9 12 8 7 11 9 7 12 14 10 10 9 11 12 12 12 9 11 8 9 9 5
+7703 10149 7813 9931 7918 9639 7568 8832 6713 9099 6489 7757 6564 7484 6271 8686 6121 8378 5739 9087 7860 10292 9344 10963 8845 11938 9325 12550 10125 12019 8887 11090 8130 10648 6193 9008 6717 9936 7700 9428
+2 1 3 1 5 1 0 1 2 4 9 4 11 4 4 9 6 6 8 10 5 3 8 5 3 8 14 5 6 4 9 8 11 9 8 9 6 5 6 1
+3 6 4 3 4 3 2 2 3 5 3 4 10 10 5 5 10 7 8 8 8 7 8 6 8 10 5 8 10 13 10 12 10 9 7 7 9 10 9 6
+7587 9908 7720 9519 7822 9244 7430 8424 6622 8860 6403 7537 6655 7649 6242 8472 6223 8305 5800 9033 7868 10104 9315 10673 8828 11836 9220 12288 10127 12096 8920 11161 8182 10562 6217 8897 6779 9954 7737 9230
+0 2 1 0 1 0 0 0 0 1 0 1 9 8 2 1 10 3 6 4 5 3 4 2 4 5 1 3 6 9 7 8 8 5 4 3 7 6 6 2
+5 8 6 12 10 6 5 2 3 7 4 2 7 5 8 5 9 7 10 7 5 6 6 8 7 9 8 9 11 8 10 10 8 8 9 10 7 11 9 5
+7525 9805 7680 9685 7882 9058 7372 8041 6533 8758 6345 7207 6667 7497 6290 8270 6297 8236 5910 8921 7799 9866 9235 10524 8786 11678 9194 12103 10155 11861 8952 11105 8182 10419 6291 8977 6788 10032 7773 8983
+2 4 3 10 8 2 2 0 0 3 1 0 4 2 6 2 8 3 10 3 2 2 2 4 3 4 4 4 8 3 7 6 5 4 8 7 4 8 6 1
+5 3 3 5 3 5 9 6 4 4 7 6 12 6 10 10 10 7 10 13 12 10 8 7 11 9 11 12 9 7 14 12 8 11 10 10 8 7 8 5
+7464 9401 7564 9411 7761 8821 7418 7927 6472 8478 6365 7143 6807 7415 6388 8388 6395 8171 6018 9184 7911 9888 9208 10322 8847 11530 9245 12114 10131 11579 9086 11175 8182 10469 6389 9052 6823 9860 7783 8751
+2 0 0 1 0 1 7 3 1 1 4 3 13 3 9 7 9 3 10 12 12 6 4 3 9 4 8 8 5 2 14 8 5 7 9 7 6 3 5 1
+10 7 4 5 4 3 3 7 4 5 7 9 8 7 5 7 8 11 6 10 9 8 10 8 11 10 12 11 10 10 12 10 7 5 6 10 10 6 6 5
+7533 9267 7477 9153 7669 8476 7309 7881 6412 8276 6385 7267 6841 7400 6356 8314 6439 8356 6021 9247 7943 9786 9233 10194 8907 11452 9320 12063 10133 11498 9165 11118 8156 10148 6382 9123 6908 9637 7742 8533
+8 3 1 1 1 0 0 4 1 2 4 7 5 4 2 3 6 9 3 7 6 4 7 4 9 5 10 6 6 5 10 6 3 1 3 7 9 2 3 1
+10 9 4 6 7 8 8 4 7 7 6 5 5 5 8 6 6 6 4 8 7 7 9 9 11 11 14 16 14 9 11 8 7 8 10 9 10 9 8 7
+7600 9263 7392 8972 7656 8458 7331 7653 6430 8209 6378 7138 6798 7263 6401 8183 6431 8223 5972 9183 7923 9628 9232 10135 8965 11440 9445 12322 10237 11361 9217 10942 8131 10030 6478 9128 6991 9611 7753 8451
+8 5 1 2 4 5 5 1 4 3 3 2 2 2 6 2 3 2 1 4 4 3 5 5 9 7 14 14 13 4 8 4 4 4 9 6 9 5 5 3
+10 11 5 8 9 13 14 8 15 9 10 11 7 9 10 8 7 7 8 9 13 10 12 12 13 13 13 13 16 10 10 7 8 11 12 9 10 11 11 9
+7665 9383 7335 8925 7694 8749 7505 7685 6652 8269 6474 7385 6807 7380 6496 8183 6449 8159 6027 9184 8057 9664 9308 10264 9073 11552 9541 12381 10390 11293 9242 10715 8132 10104 6623 9133 7072 9710 7840 8496
+8 8 2 4 6 12 17 5 21 6 9 10 4 7 9 5 4 3 6 5 13 7 10 9 12 9 12 9 16 6 7 3 5 8 13 6 9 8 10 6
+6 6 4 4 5 10 11 10 8 8 5 6 8 9 7 6 8 8 10 10 7 7 7 7 8 8 9 7 9 8 9 10 8 12 12 9 12 10 7 8
+7627 9188 7254 8635 7629 8838 7598 7838 6690 8264 6440 7310 6841 7490 6512 8060 6492 8160 6132 9247 8034 9514 9254 10078 9051 11350 9532 12068 10360 11106 9241 10686 8133 10235 6764 9137 7202 9741 7823 8477
+3 2 1 1 2 7 10 8 6 5 2 3 5 7 4 2 6 5 10 7 4 3 3 3 4 3 5 2 5 3 5 6 5 9 13 5 12 6 4 5
+8 11 13 6 4 7 10 4 5 6 5 7 7 6 5 6 7 5 7 9 8 6 11 8 7 8 13 14 13 13 10 11 11 13 12 9 9 9 9 7
+7641 9312 7405 8485 7540 8737 7663 7613 6650 8136 6407 7301 6849 7409 6477 7945 6508 7977 6157 9245 8037 9311 9304 9964 9003 11160 9626 12204 10433 11238 9265 10720 8211 10419 6901 9141 7252 9709 7857 8398
+5 8 14 2 1 3 8 1 2 2 2 4 4 3 2 2 4 2 4 5 5 2 8 4 3 3 11 10 11 10 7 7 9 11 13 5 7 5 6 3
+3 1 8 8 4 10 14 7 5 3 7 5 6 6 5 10 7 5 10 9 4 11 8 12 15 11 16 12 11 17 10 10 11 12 9 9 9 8 6 6
+7526 8814 7424 8467 7454 8827 7829 7586 6611 7832 6425 7170 6831 7333 6443 8082 6524 7805 6258 9243 7938 9428 9276 10103 9161 11166 9794 12209 10453 11608 9289 10691 8287 10531 6958 9145 7301 9617 7814 8262
+0 0 5 5 1 7 16 4 2 0 4 2 3 3 2 8 4 2 10 5 1 8 4 9 16 7 17 8 7 17 7 6 9 9 7 5 7 4 3 2
+3 3 4 7 7 16 8 5 5 3 6 6 6 9 6 10 13 9 9 10 5 4 6 8 10 9 9 10 11 10 8 9 10 8 7 7 8 7 7 6
+7414 8469 7340 8389 7446 9280 7838 7438 6573 7546 6417 7108 6813 7445 6435 8211 6693 7889 6331 9302 7867 9108 9197 9988 9187 11049 9779 12090 10473 11525 9261 10602 8335 10390 6963 9026 7323 9470 7797 8134
+0 0 1 3 4 18 5 2 2 0 3 3 3 7 3 8 16 6 8 7 2 1 2 4 7 5 5 5 7 5 4 5 8 4 4 3 5 3 4 2
+6 4 8 9 11 10 13 8 4 6 5 8 6 7 11 16 12 9 12 12 9 7 8 9 6 6 7 11 8 9 6 7 8 7 9 6 6 6 9 6
+7382 8206 7361 8438 7541 9337 7974 7483 6511 7461 6384 7173 6796 7428 6555 8701 6832 7968 6479 9481 7900 8991 9171 9941 9111 10754 9713 12040 10416 11386 9183 10395 8331 10196 7019 8853 7293 9270 7832 8014
+3 1 5 6 10 7 14 5 1 3 2 5 3 4 11 19 13 6 14 10 6 3 4 5 2 2 3 6 4 4 2 3 5 3 7 2 3 2 6 2
+13 10 10 6 9 4 9 12 5 3 5 8 5 7 10 8 5 8 10 9 11 10 7 10 10 12 8 13 12 13 10 6 6 9 8 8 7 8 9 8
+7530 8328 7432 8300 7582 9022 8005 7771 6476 7197 6352 7234 6754 7412 6646 8670 6789 7981 6572 9465 7983 9065 9120 9958 9139 10846 9675 12116 10462 11501 9209 10139 8276 10137 7048 8813 7289 9205 7866 8024
+14 8 8 2 6 1 6 12 2 0 2 5 2 4 9 4 2 5 9 5 10 7 3 6 7 9 4 9 9 10 7 2 2 5 5 4 4 4 6 5
+5 5 5 3 6 8 11 9 6 8 10 12 7 5 8 6 11 12 9 9 4 6 6 7 5 8 9 9 10 10 14 10 8 9 11 10 11 9 9 7
+7469 8135 7374 7986 7546 8972 8086 7857 6467 7256 6449 7537 6764 7274 6684 8518 6900 8239 6638 9450 7885 8889 9045 9790 9038 10686 9663 11942 10456 11425 9336 10145 8273 10081 7153 8898 7388 9205 7899 7972
+2 2 2 0 3 4 9 6 3 5 9 12 4 2 6 2 11 11 7 5 1 2 2 3 1 4 5 4 6 5 14 6 5 5 10 7 10 5 6 4
+4 4 3 7 6 3 5 3 5 7 5 10 7 5 7 8 10 9 6 9 7 6 11 9 7 8 11 11 11 9 13 11 7 10 12 11 11 7 6 5
+7384 7892 7266 7936 7511 8618 8011 7569 6433 7250 6415 7699 6774 7144 6696 8498 6983 8297 6625 9435 7867 8724 9100 9755 8991 10536 9702 11901 10476 11292 9435 10212 8245 10090 7281 9039 7484 9082 7855 7800
+1 1 0 4 3 0 2 0 2 4 2 8 4 2 4 5 9 6 3 5 4 2 8 5 3 4 8 6 7 4 12 8 3 6 12 9 10 3 3 2
+13 12 6 6 5 4 6 9 5 3 5 4 4 4 6 6 6 9 8 7 7 6 10 13 10 12 10 12 10 12 14 11 10 9 8 10 11 9 8 6
+7531 8155 7237 7828 7451 8346 7964 7667 6400 6999 6382 7482 6707 6961 6682 8356 6962 8352 6664 9298 7849 8569 9128 9968 9022 10641 9715 11924 10470 11351 9557 10275 8294 10037 7303 9111 7578 9090 7863 7700
+14 11 3 3 2 1 2 6 2 0 2 1 1 1 3 2 3 6 6 3 4 2 7 11 7 9 7 8 6 8 13 8 8 5 5 7 10 6 5 3
+6 11 16 11 5 7 5 7 6 4 4 4 5 3 7 7 7 7 9 8 11 9 6 11 6 12 10 9 10 11 12 13 7 9 9 10 10 6 7 7
+7496 8341 7465 8034 7392 8275 7893 7637 6393 6824 6324 7278 6667 6727 6694 8284 6967 8280 6727 9231 7934 8607 9053 10045 8950 10739 9728 11761 10464 11345 9625 10457 8265 9987 7350 9178 7644 8913 7845 7668
+3 9 22 9 2 3 2 4 3 1 1 1 2 0 4 3 4 3 7 4 10 6 2 8 2 9 6 4 6 7 10 11 3 5 7 7 8 2 4 4
+4 3 5 8 7 3 5 4 7 7 4 8 11 7 5 6 4 6 6 10 8 7 8 8 12 10 12 10 7 13 14 14 7 11 10 8 10 9 9 5
+7411 8024 7406 8043 7386 7962 7823 7424 6412 6844 6268 7332 6781 6753 6654 8155 6895 8151 6712 9291 7940 8520 9031 9933 9033 10709 9791 11669 10381 11463 9742 10689 8237 10063 7422 9118 7708 8931 7879 7515
+1 0 2 5 4 0 2 1 4 4 1 5 11 4 2 2 1 2 3 7 5 3 4 4 10 6 10 5 3 10 13 12 3 8 8 4 8 6 6 2
+5 2 4 6 6 6 5 10 10 5 8 12 9 5 6 5 7 9 9 8 8 7 8 7 9 11 10 9 14 12 8 11 10 11 10 7 8 7 10 8
+7353 7665 7323 7929 7354 7852 7755 7592 6507 6740 6316 7629 6841 6655 6641 7972 6901 8214 6774 9225 7946 8438 9010 9767 9037 10742 9802 11521 10479 11512 9703 10723 8287 10135 7492 9001 7720 8825 7937 7555
+2 0 1 2 3 3 2 8 9 2 6 12 7 2 3 2 4 6 7 4 5 3 4 3 6 7 6 4 12 8 4 7 8 8 8 3 5 3 8 5
+9 5 10 6 2 4 5 5 9 6 6 8 12 9 5 8 7 11 7 6 6 5 5 6 10 9 8 7 9 9 9 7 9 12 7 10 9 8 7 7
+7399 7512 7395 7821 7221 7626 7689 7443 6574 6704 6311 7662 6976 6808 6603 7985 6907 8396 6783 9040 7901 8238 8912 9549 9067 10650 9761 11259 10447 11374 9690 10509 8310 10264 7483 9075 7757 8787 7917 7531
+7 2 8 3 0 1 2 2 7 3 3 5 13 7 2 5 4 9 4 2 3 2 1 2 7 5 4 2 5 4 5 3 6 9 4 7 6 4 4 4
+7 11 12 4 2 3 4 5 3 6 11 8 9 9 8 6 8 9 9 6 4 5 6 10 12 8 10 10 10 9 9 9 9 10 9 9 7 11 11 6
+7393 7737 7517 7597 7091 7352 7599 7303 6486 6670 6434 7693 7031 6952 6642 7874 6939 8445 6843 8866 7806 8050 8842 9590 9147 10502 9772 11197 10441 11244 9678 10431 8332 10262 7526 9083 7742 8935 8000 7447
+4 10 12 1 0 0 1 2 0 3 11 5 7 7 6 3 5 6 7 2 1 2 2 7 10 4 6 6 6 4 5 5 6 6 7 6 4 9 10 3
+12 6 11 9 6 4 6 7 6 12 9 4 6 9 7 9 6 9 10 11 7 5 14 12 9 9 9 10 9 9 11 13 9 11 16 9 10 10 9 8
+7515 7641 7610 7694 7067 7156 7562 7294 6477 7007 6503 7477 7008 7087 6655 7954 6919 8491 6927 9009 7790 7874 8979 9751 9148 10424 9758 11139 10410 11122 9717 10603 8354 10322 7747 9090 7804 9013 8030 7491
+12 3 10 6 3 1 3 4 3 13 7 1 3 7 4 6 3 6 9 9 4 2 14 10 5 5 5 6 5 4 8 10 6 8 21 6 8 7 6 5
+13 5 5 13 7 4 3 8 4 11 8 5 10 11 8 9 8 6 9 6 7 7 12 13 12 9 11 10 9 12 11 9 11 7 8 10 9 9 8 8
+7659 7489 7547 8031 7069 6972 7449 7347 6417 7262 6545 7335 7088 7337 6693 8029 6950 8350 6984 8837 7774 7831 9061 9964 9226 10351 9795 11085 10380 11191 9755 10519 8426 10132 7758 9158 7839 9025 8034 7533
+14 2 2 13 4 1 0 5 1 10 6 2 9 10 6 6 5 2 7 2 4 4 10 11 10 5 8 6 5 8 8 5 9 3 5 7 6 6 5 5
+9 8 10 12 12 8 4 8 4 7 6 8 8 6 8 10 7 8 8 5 7 7 12 11 8 10 9 10 11 13 9 15 10 9 8 11 10 7 8 8
+7697 7531 7614 8286 7199 7045 7365 7397 6359 7256 6534 7386 7115 7265 6730 8161 6955 8340 7014 8613 7758 7791 9141 10041 9200 10344 9780 11034 10402 11318 9741 10809 8471 10077 7768 9284 7898 8913 8037 7572
+6 5 8 11 12 5 1 5 1 4 3 5 5 3 6 8 4 5 5 1 4 4 10 8 4 6 5 6 7 10 5 14 7 5 5 8 8 3 5 5
+6 12 15 12 8 7 5 5 3 5 10 6 5 10 5 5 4 6 14 10 9 10 8 6 8 7 8 10 13 14 9 13 11 13 12 12 10 10 9 6
+7658 7816 7807 8526 7223 7052 7308 7260 6276 7127 6626 7311 7065 7443 6689 7978 6883 8208 7196 8710 7794 7937 9117 9807 9174 10153 9740 10986 10474 11499 9727 10959 8540 10271 7880 9464 7956 8992 8066 7486
+3 12 18 11 5 4 2 2 0 2 9 3 2 8 2 2 1 2 17 7 6 8 4 2 4 3 4 6 11 11 5 10 9 11 12 10 8 7 6 3
+9 10 14 10 5 7 8 7 3 5 8 7 7 11 10 7 5 7 6 7 6 7 10 7 10 6 9 15 14 8 9 10 9 13 9 9 7 7 8 7
+7696 7961 7969 8628 7170 7058 7330 7254 6196 7006 6665 7302 7067 7672 6777 7929 6838 8145 7169 8617 7752 7890 9145 9648 9200 9912 9726 11248 10570 11300 9714 10915 8556 10453 7913 9449 7936 8882 8069 7466
+6 8 16 7 2 4 5 4 0 2 6 4 4 10 9 4 2 3 3 3 3 4 7 3 7 2 5 13 12 3 5 6 6 11 6 5 4 3 5 4
+10 13 12 9 8 14 10 3 6 5 8 6 7 8 8 7 8 4 4 8 6 9 10 7 11 9 12 11 15 11 12 8 9 10 10 11 12 7 5 6
+7759 8282 8076 8663 7195 7494 7402 7003 6194 6892 6703 7232 7069 7703 6812 7883 6871 7902 7092 8591 7711 7969 9172 9499 9251 9870 9789 11248 10689 11297 9778 10751 8572 10440 7971 9557 8044 8779 7995 7386
+8 13 11 6 5 17 8 0 3 2 6 3 4 5 6 4 5 1 1 5 3 6 7 3 8 5 10 7 14 7 10 4 6 6 8 8 11 3 2 3
+15 12 11 4 5 7 5 7 8 4 6 7 7 5 8 7 7 8 6 11 8 9 8 7 7 8 7 9 8 14 12 8 8 9 10 12 7 8 10 7
+7948 8522 8155 8388 7143 7474 7345 7012 6243 6724 6689 7228 7071 7548 6846 7840 6878 7919 7068 8751 7723 8043 9147 9359 9199 9769 9723 11126 10626 11479 9840 10597 8562 10366 8027 9720 8022 8743 8051 7372
+18 11 9 1 2 4 2 4 6 1 3 4 4 2 5 4 4 5 3 9 5 6 4 3 3 4 3 4 4 11 9 4 5 5 8 10 4 4 8 4
+14 9 8 3 8 12 6 5 13 9 5 4 5 6 12 12 8 8 7 8 8 8 12 10 6 7 7 13 11 12 7 9 8 9 8 9 9 8 9 6
+8107 8563 8155 8069 7169 7762 7315 6898 6419 6873 6649 7040 7022 7463 6981 8106 6910 7935 7070 8717 7734 8051 9225 9411 9122 9612 9659 11257 10641 11527 9773 10514 8552 10297 8031 9689 8051 8709 8080 7298
+16 6 5 0 5 12 3 2 16 7 2 1 2 3 13 11 5 5 4 4 5 5 10 7 2 3 3 10 7 8 3 5 5 5 5 5 6 4 6 3
+7 6 6 8 9 14 8 5 14 7 5 12 8 4 7 7 8 6 5 5 6 3 8 10 5 6 6 11 10 13 10 11 13 14 11 9 7 9 7 6
+8083 8417 8104 8076 7220 8156 7336 6791 6616 6890 6610 7354 7051 7260 6985 8049 6942 7827 7021 8501 7694 7752 9199 9460 9022 9403 9571 11257 10630 11634 9784 10558 8670 10539 8111 9660 8028 8739 8057 7228
+4 2 2 5 7 15 5 2 18 4 2 12 5 1 4 4 5 3 2 1 3 0 4 7 1 2 2 7 6 9 6 7 13 12 9 5 4 6 4 3
+12 6 4 7 9 9 6 6 10 5 4 7 5 6 9 7 4 8 11 8 9 6 7 11 6 6 6 10 9 12 12 12 13 12 8 9 10 10 10 6
+8187 8280 8003 8021 7269 8219 7306 6752 6706 6783 6547 7342 7002 7193 7040 7996 6870 7848 7126 8482 7731 7655 9148 9568 8950 9207 9485 11195 10594 11673 9846 10661 8785 10643 8113 9633 8083 8829 8111 7162
+11 2 1 4 7 6 3 3 9 2 1 4 2 3 7 4 1 5 10 5 6 3 3 8 2 2 2 6 5 8 9 9 12 9 5 5 8 7 8 3
+8 7 4 2 4 5 11 7 5 6 8 6 6 7 6 6 11 9 8 7 10 7 7 10 10 14 10 6 8 8 8 12 11 10 9 6 6 9 7 6
+8187 8213 7905 7662 7189 8033 7404 6776 6666 6744 6588 7270 6980 7191 7017 7884 6979 7930 7152 8403 7793 7625 9098 9608 8982 9514 9503 10891 10534 11464 9804 10758 8846 10618 8140 9423 8034 8852 8087 7100
+5 3 1 0 1 2 10 4 2 3 6 3 3 4 3 3 11 6 5 3 8 4 3 7 7 14 7 2 4 3 4 9 9 6 6 2 2 6 4 3
+9 12 8 11 7 8 6 7 11 9 7 7 10 13 12 10 8 13 12 11 11 10 10 5 6 16 13 11 11 12 9 7 15 13 11 8 9 8 8 7
+8212 8457 7912 7878 7188 8042 7372 6799 6780 6892 6602 7263 7061 7558 7148 8025 7009 8252 7280 8574 7879 7781 9126 9338 8911 9926 9598 10913 10552 11513 9789 10542 9008 10779 8217 9349 8063 8812 8089 7104
+6 11 5 10 4 5 3 4 11 7 4 4 9 14 13 8 5 13 12 9 10 8 7 1 2 17 11 7 7 8 5 3 16 10 9 4 6 4 5 4
+13 11 13 13 8 9 7 6 6 4 10 7 4 11 11 8 9 9 11 9 15 8 9 17 11 14 7 9 11 9 11 10 10 11 9 12 10 6 10 9
+8339 8625 8046 8204 7213 8112 7366 6759 6764 6724 6692 7257 6986 7780 7250 8035 7064 8309 7379 8612 8065 7805 9128 9822 8969 10190 9537 10811 10569 11375 9825 10523 9038 10808 8241 9525 8117 8651 8142 7230
+13 9 13 13 5 6 4 3 3 1 9 4 1 10 10 5 7 6 10 6 18 5 6 20 9 13 3 5 7 4 8 6 7 7 6 10 8 2 8 7
+6 7 9 5 5 9 7 5 8 4 5 5 9 11 9 11 12 7 6 7 9 6 11 14 8 8 10 12 13 9 8 9 11 13 11 10 9 5 8 6
+8284 8537 8075 8018 7160 8178 7361 6660 6799 6566 6652 7128 7041 7989 7299 8228 7194 8240 7348 8525 8093 7705 9181 10092 8949 10070 9554 10899 10637 11245 9784 10444 9093 10958 8316 9567 8144 8439 8143 7164
+2 3 6 2 2 6 4 2 6 1 2 2 7 10 7 9 12 3 3 3 6 3 8 13 4 4 7 9 10 4 4 5 9 10 9 7 6 1 5 3
+5 11 8 2 4 7 6 10 8 6 8 11 10 10 10 6 8 11 10 6 7 9 9 6 9 8 7 9 10 15 12 13 10 11 11 8 7 9 8 7
+8205 8700 8077 7659 7083 8117 7330 6874 6833 6540 6690 7376 7120 8124 7372 8102 7218 8421 7420 8382 8069 7795 9181 9855 8955 9957 9494 10798 10627 11491 9846 10616 9121 10976 8389 9484 8119 8485 8144 7164
+2 9 5 0 1 4 3 9 5 3 6 10 9 8 8 2 5 9 8 2 4 6 5 2 6 4 3 5 6 13 9 10 7 7 9 4 4 6 5 4
+14 13 6 7 4 6 9 6 5 6 5 8 7 4 8 6 4 6 10 13 6 8 10 6 8 9 10 12 11 11 13 11 13 13 11 7 6 8 9 8
+8358 8976 8028 7629 7008 7998 7377 6830 6790 6516 6650 7424 7121 7882 7392 7984 7140 8284 7490 8677 8020 7818 9207 9632 8935 9912 9512 10887 10642 11477 9932 10654 9225 11116 8460 9345 8069 8467 8170 7225
+15 12 2 4 1 2 7 3 2 3 2 5 4 1 5 2 1 2 8 13 2 5 7 2 4 5 7 9 7 7 11 7 12 10 9 3 2 5 6 5
+7 8 10 6 4 8 11 5 6 4 5 4 6 8 5 6 9 6 4 7 8 11 9 6 8 10 15 13 9 11 12 10 9 12 8 10 9 9 9 9
+8328 8928 8083 7539 6935 8009 7474 6727 6773 6370 6611 7224 7096 7900 7335 7873 7191 8155 7405 8586 8024 8024 9207 9422 8916 9931 9657 11032 10606 11464 9990 10629 9224 11186 8453 9398 8097 8511 8196 7344
+3 4 8 3 1 5 10 2 3 1 2 1 3 5 2 3 7 2 1 3 5 9 5 2 4 6 15 10 5 7 9 6 5 8 5 7 6 6 6 7
+5 4 4 3 7 8 7 6 4 5 7 3 5 4 9 11 12 5 5 6 7 10 10 11 10 10 6 10 10 13 10 9 12 11 12 8 8 10 6 7
+8247 8638 7983 7270 6940 8019 7466 6692 6706 6295 6624 6974 7046 7671 7381 8076 7318 7972 7347 8439 8002 8156 9232 9532 8949 9949 9569 10984 10596 11574 9996 10544 9300 11190 8548 9325 8099 8614 8144 7333
+2 1 1 0 4 5 4 3 1 2 4 0 2 1 7 9 12 2 2 2 4 8 7 8 7 6 2 6 6 9 6 5 10 7 11 4 5 7 2 4
+6 4 6 3 6 7 8 7 13 15 10 4 7 9 10 7 7 7 8 8 6 9 11 9 6 12 11 10 9 10 12 15 14 11 11 11 9 10 8 7
+8194 8365 7937 7018 6920 7967 7484 6720 6870 6838 6714 6801 7049 7763 7452 8021 7314 7923 7368 8424 7955 8219 9282 9513 8878 10089 9611 10939 10561 11493 10053 10832 9425 11194 8615 9441 8126 8711 8145 7323
+2 1 2 0 3 4 5 4 15 21 9 1 4 6 8 4 4 4 5 5 2 6 8 5 2 9 8 6 5 5 9 14 14 7 9 8 6 7 5 4
+8 6 7 7 7 5 10 10 10 9 11 6 9 12 11 6 5 7 6 6 4 5 7 6 9 9 12 11 10 10 12 14 14 10 10 8 9 9 7 6
+8193 8231 7917 7026 6926 7796 7552 6931 6954 6980 6827 6761 7103 8034 7547 7908 7259 7877 7337 8287 7858 8033 9229 9310 8886 10036 9677 10958 10552 11417 10108 11042 9547 11136 8655 9366 8153 8741 8120 7252
+5 2 4 4 4 2 8 9 9 7 11 3 7 11 10 3 2 4 3 2 1 2 3 2 6 5 10 7 6 5 9 12 13 6 7 4 6 6 4 3
+7 12 8 6 7 6 11 7 3 6 9 10 7 8 9 5 7 5 3 8 7 6 12 10 7 12 11 8 9 8 14 12 12 8 10 9 10 10 6 5
+8167 8474 7923 6973 6932 7696 7644 6945 6857 6929 6886 6969 7104 8043 7588 7740 7256 7711 7230 8281 7840 7919 9305 9365 8843 10171 9716 10792 10518 11223 10213 11116 9615 10959 8694 9357 8205 8830 8070 7124
+3 11 5 3 4 3 10 4 0 3 7 9 4 5 6 2 4 2 0 5 4 3 10 7 3 9 8 4 5 3 13 8 10 3 7 5 8 7 2 2
+8 12 8 13 14 5 3 5 8 10 7 6 5 7 5 6 7 5 6 6 4 5 6 6 9 11 10 11 12 13 10 10 10 13 10 10 8 9 7 5
+8167 8702 7929 7353 7116 7541 7529 6835 6890 7127 6893 6919 7054 7990 7526 7644 7253 7555 7202 8152 7746 7751 9226 9171 8852 10236 9729 10820 10562 11348 10213 11063 9630 11100 8732 9409 8204 8853 8047 7003
+5 11 5 14 17 2 0 2 5 9 4 3 2 4 2 3 4 2 3 2 1 2 2 2 6 8 6 7 9 10 6 6 7 10 7 7 5 6 4 2
+3 3 10 8 8 5 8 7 6 8 9 9 10 9 9 5 10 7 6 8 7 9 9 7 10 14 11 9 9 13 14 14 10 10 10 11 7 8 8 5
+8039 8364 7986 7403 7142 7395 7545 6854 6871 7190 6950 7056 7133 8063 7568 7492 7327 7531 7175 8154 7731 7838 9225 9050 8886 10481 9767 10723 10528 11465 10315 11259 9645 11048 8769 9520 8178 8813 8050 6890
+0 0 8 5 5 2 5 4 3 5 7 7 9 6 7 2 8 4 3 5 4 6 5 3 7 12 8 5 5 10 12 11 7 6 7 8 3 4 5 2
+9 15 12 7 5 7 7 7 3 5 4 6 9 13 13 12 9 11 9 8 8 10 7 10 9 11 10 9 9 10 11 11 12 9 8 9 7 8 8 6
+8068 8783 8093 7388 7091 7381 7535 6872 6776 7065 6878 7001 7184 8377 7711 7779 7374 7754 7225 8156 7742 7982 9173 9121 8894 10527 9778 10632 10495 11391 10338 11259 9710 10938 8754 9501 8152 8775 8053 6845
+6 17 11 4 2 4 4 4 0 2 1 3 7 13 14 12 7 10 7 5 5 8 3 7 6 7 6 5 5 6 7 7 10 5 4 5 3 4 5 3
+3 7 7 9 5 3 4 4 5 4 3 4 7 8 7 7 12 8 6 7 10 11 9 8 10 9 12 10 12 10 10 10 9 7 8 9 8 8 9 8
+7943 8686 8069 7497 7041 7122 7449 6705 6734 6886 6782 6826 7183 8365 7697 7742 7496 7780 7198 8096 7804 8178 9174 9065 8927 10448 9840 10608 10539 11321 10335 11197 9697 10711 8739 9483 8152 8740 8082 6925
+0 3 4 7 2 0 1 1 2 1 0 1 4 5 4 4 12 5 3 4 8 9 5 4 7 5 9 6 9 6 6 6 5 3 4 5 5 4 6 5
+15 11 15 7 5 11 9 8 8 8 6 5 4 6 6 5 11 9 11 10 8 11 13 13 9 6 6 9 10 10 11 12 11 9 12 10 10 9 7 7
+8128 8840 8251 7477 6993 7370 7493 6794 6770 6964 6766 6723 7105 8231 7658 7584 7590 7866 7299 8224 7813 8363 9277 9319 8934 10189 9747 10524 10531 11256 10358 11262 9736 10621 8827 9528 8204 8768 8059 6939
+18 9 18 4 2 10 7 6 6 5 3 2 1 2 3 2 10 6 10 8 5 9 12 12 6 2 2 5 6 6 7 8 8 5 10 7 8 6 4 4
+10 14 13 7 8 6 6 5 6 10 12 9 9 5 7 8 7 13 12 11 7 5 6 7 9 12 11 9 9 8 8 10 12 10 12 7 10 10 7 7
+8180 9169 8377 7458 7022 7296 7459 6693 6754 7160 6903 6872 7157 8044 7645 7620 7579 8192 7423 8406 7796 8168 9198 9189 8941 10314 9784 10445 10498 11072 10303 11200 9799 10598 8913 9386 8254 8856 8036 6952
+8 14 13 4 5 3 3 2 3 9 13 7 7 2 4 5 4 13 12 9 4 2 2 3 6 9 8 5 5 3 4 6 10 6 10 3 8 7 4 4
+5 10 12 12 9 8 9 8 3 8 8 8 10 8 10 10 6 7 6 5 6 6 5 7 10 11 11 11 11 11 11 12 10 10 10 9 11 10 9 9
+8103 9233 8474 7747 7076 7349 7502 6782 6662 7221 6935 6951 7233 8052 7709 7777 7543 8130 7391 8208 7754 8046 9096 9067 8973 10370 9820 10494 10517 11083 10326 11265 9810 10576 8946 9375 8329 8939 8065 7087
+2 7 11 12 7 5 7 6 0 5 5 5 8 5 8 8 3 4 3 2 3 2 1 3 7 7 8 7 7 7 8 8 6 6 7 5 9 7 6 7
+7 5 8 19 14 4 6 4 1 6 9 4 5 3 7 7 4 8 5 7 7 8 6 8 9 11 9 8 12 12 12 8 8 10 9 9 8 9 8 5
+8079 8986 8466 8449 7257 7153 7468 6620 6521 7156 6991 6779 7180 7753 7695 7740 7456 8133 7334 8145 7739 8054 9022 9014 8979 10423 9804 10355 10561 11155 10374 11080 9769 10555 8952 9365 8325 8955 8068 6968
+4 1 5 28 17 1 3 1 0 3 7 1 2 0 4 4 1 5 2 3 4 5 2 4 6 7 5 4 9 8 9 3 4 6 6 5 5 6 5 2
+6 5 9 9 9 6 5 6 6 6 8 4 9 7 7 7 8 8 6 8 10 11 11 11 16 10 11 8 11 11 10 12 16 13 8 5 8 9 9 9
+8030 8754 8484 8495 7305 7092 7409 6591 6511 7095 7020 6618 7230 7717 7681 7705 7474 8136 7304 8147 7801 8246 9077 9148 9163 10412 9840 10225 10578 11161 10370 11152 9934 10720 8933 9110 8321 8970 8096 7102
+2 1 6 6 7 3 2 3 3 3 5 1 7 4 4 4 5 5 3 5 8 9 9 8 18 6 8 4 7 7 6 8 17 10 4 1 5 6 6 7
+6 9 6 6 9 10 8 6 4 4 5 4 10 13 11 6 8 8 6 5 9 12 10 8 11 7 7 7 6 9 7 10 13 14 8 10 9 9 6 6
+7982 8781 8425 8353 7352 7280 7428 6564 6450 6915 6972 6466 7305 8052 7770 7611 7491 8139 7275 7965 7836 8488 9106 9090 9215 10217 9773 10041 10467 11044 10290 11097 10018 10936 8914 9177 8343 8984 8047 7044
+2 6 2 2 7 8 5 3 1 1 2 1 8 13 10 3 5 5 3 2 6 11 7 4 8 3 3 3 2 5 3 6 11 12 4 7 6 6 2 3
+4 11 5 2 9 9 6 2 3 5 5 6 6 5 4 7 7 6 8 9 10 11 7 4 4 8 6 8 7 6 6 6 10 7 7 7 8 7 6 4
+7884 8929 8342 7974 7398 7396 7395 6293 6365 6807 6925 6446 7276 7876 7678 7584 7482 8019 7297 8040 7896 8654 9057 8790 9087 10095 9682 9930 10384 10750 10186 10799 10023 10709 8870 9056 8339 8875 7999 6867
+1 9 1 0 7 7 3 0 0 2 2 3 3 2 1 4 4 2 5 6 8 9 3 1 1 4 2 4 3 2 2 2 6 3 3 3 5 3 2 1
+9 19 21 18 12 10 6 7 6 4 5 8 6 6 8 12 9 10 12 10 9 12 12 8 8 9 8 7 7 8 12 11 10 10 8 6 7 9 11 5
+7917 9560 8670 8601 7520 7566 7363 6345 6359 6644 6879 6550 7247 7772 7690 7866 7525 8152 7421 8171 7928 8872 9137 8754 9064 10042 9644 9764 10303 10596 10238 10826 10028 10680 8853 8881 8309 8895 8080 6762
+6 25 34 25 12 8 3 4 3 1 2 6 3 3 5 12 7 8 12 8 6 10 10 4 4 5 4 3 3 4 9 7 6 6 4 2 3 6 9 2
+7 9 8 11 7 8 9 9 9 8 8 8 9 10 7 7 8 7 6 8 8 12 9 11 9 9 6 5 6 10 10 12 10 9 9 8 7 6 8 4
+7898 9539 8658 8760 7511 7603 7409 6517 6430 6736 6911 6648 7296 7920 7676 7824 7541 8092 7389 8172 7934 9076 9138 8904 9067 9992 9556 9485 10199 10574 10238 10913 10033 10592 8862 8839 8280 8729 8082 6602
+4 5 4 9 4 5 7 7 7 6 5 6 7 8 4 4 5 4 3 5 5 10 5 9 6 5 2 1 2 6 6 9 6 5 6 4 3 2 5 1
+4 10 10 9 5 8 5 9 9 9 13 9 8 11 8 5 9 8 8 4 7 11 9 12 12 8 6 11 11 8 6 7 9 13 11 9 7 9 10 6
+7803 9581 8697 8787 7451 7638 7351 6678 6499 6884 7070 6802 7318 8120 7688 7661 7582 8097 7409 7927 7914 9207 9139 9107 9147 9884 9470 9591 10225 10431 10135 10688 10012 10755 8921 8861 8252 8758 8135 6574
+1 7 7 6 2 5 2 7 7 7 15 7 5 9 5 2 6 5 5 1 4 8 5 10 10 4 2 8 8 4 2 3 5 10 9 6 3 6 8 3
+3 6 5 3 4 9 8 7 4 5 7 10 6 8 5 4 5 7 9 4 7 7 7 10 8 9 10 12 9 11 9 9 12 8 11 9 6 7 8 7
+7684 9374 8607 8444 7367 7732 7371 6707 6439 6778 7072 7008 7288 8124 7623 7447 7520 8041 7454 7697 7895 9084 9089 9174 9123 9843 9489 9752 10199 10480 10112 10599 10068 10601 8979 8882 8199 8662 8136 6609
+0 2 1 0 1 6 5 4 1 2 4 9 3 5 2 1 2 4 7 1 4 3 3 7 4 5 7 10 5 7 5 5 9 4 9 6 2 3 5 4
+9 5 9 7 4 4 7 5 3 5 5 5 5 7 4 4 7 8 13 12 8 7 8 9 12 9 8 15 11 9 11 7 10 13 12 9 9 8 8 7
+7722 9118 8622 8367 7285 7513 7365 6611 6354 6678 7023 6894 7233 8066 7534 7245 7511 8050 7600 7972 7902 8969 9066 9176 9202 9805 9456 10088 10225 10404 10140 10393 10072 10763 9061 8902 8224 8633 8137 6642
+6 1 6 3 1 1 4 2 0 2 2 2 2 4 1 1 4 5 14 11 5 3 4 5 10 5 4 15 8 5 8 3 6 10 10 6 6 4 5 4
+12 9 10 8 7 4 3 5 9 5 10 7 6 6 8 4 5 4 5 10 8 6 9 11 8 11 10 8 10 11 11 12 9 11 14 10 7 8 8 8
+7835 9123 8662 8356 7282 7307 7257 6521 6425 6584 7103 6910 7205 7950 7550 7056 7451 7812 7538 8108 7909 8799 9069 9301 9176 9892 9475 9974 10225 10455 10168 10506 10050 10793 9192 8982 8197 8606 8138 6734
+12 6 7 5 4 1 0 2 7 2 9 4 3 2 5 1 2 1 2 8 5 2 6 8 4 8 7 4 6 7 8 9 5 7 14 7 3 4 5 6
+5 13 9 9 5 7 11 12 10 9 4 2 6 7 6 6 8 7 6 7 7 9 7 13 8 11 9 12 6 9 7 11 10 8 11 11 7 8 8 5
+7767 9374 8675 8407 7228 7298 7357 6867 6520 6741 7027 6618 7178 7903 7514 7001 7469 7773 7503 8051 7890 8824 9021 9541 9151 9974 9468 10112 10123 10380 10093 10551 10054 10636 9243 9118 8171 8581 8139 6637
+2 12 6 6 2 4 10 13 9 7 1 0 3 4 3 3 5 4 3 4 4 6 3 12 4 8 5 9 2 5 3 7 6 4 8 8 3 5 5 2
+8 10 10 9 9 8 8 11 5 5 5 9 10 8 6 8 8 6 7 7 8 12 9 8 8 7 6 9 7 6 9 11 9 8 9 9 6 11 9 9
+7777 9425 8714 8455 7277 7351 7377 7130 6485 6643 6979 6773 7254 7920 7479 7072 7487 7675 7494 7998 7897 9031 9025 9460 9127 9805 9385 10058 10049 10125 10071 10593 10033 10489 9242 9123 8120 8741 8165 6791
+5 7 7 6 7 5 5 10 2 2 2 7 8 5 3 5 5 3 4 4 5 10 6 4 4 3 2 5 3 2 5 7 5 4 5 6 2 9 6 7
+5 6 6 3 6 8 4 2 7 10 5 6 12 10 8 6 9 10 12 10 10 12 12 10 11 8 8 11 8 8 9 8 10 7 8 9 6 9 8 8
+7710 9228 8649 8132 7248 7401 7295 6825 6502 6858 6932 6735 7379 8059 7496 7016 7530 7828 7613 8132 7955 9226 9106 9506 9180 9708 9355 10130 10002 10009 10049 10448 10038 10289 9215 9128 8070 8769 8165 6875
+2 2 2 0 3 5 1 0 4 9 2 3 12 8 5 3 7 8 12 8 8 10 10 7 8 4 4 8 4 4 5 4 6 3 4 6 2 6 5 5
+4 3 5 7 5 2 4 7 7 8 9 10 11 7 8 7 9 9 7 8 9 7 11 10 8 11 10 10 10 7 6 6 11 10 8 6 7 8 7 8
+7619 8858 8560 8074 7194 7079 7215 6845 6518 6938 6989 6945 7475 8005 7513 7025 7572 7911 7601 8135 7986 9102 9159 9550 9155 9801 9377 10136 10007 9838 9951 10189 10068 10286 9189 8948 8047 8734 8140 6954
+1 0 1 4 2 0 1 4 4 5 7 9 10 4 5 4 7 6 4 5 6 3 8 7 4 8 7 6 6 3 2 2 8 6 4 2 4 4 3 5
+4 11 11 5 8 10 8 11 6 6 4 7 9 6 6 8 6 5 6 6 5 7 8 8 10 8 13 11 10 10 12 15 11 11 10 9 7 10 7 5
+7531 9002 8627 7896 7218 7268 7239 7110 6508 6890 6916 6958 7518 7893 7478 7095 7536 7743 7564 8015 7914 8985 9134 9468 9182 9704 9475 10203 10012 9862 10009 10499 10097 10344 9215 8964 8025 8824 8115 6843
+1 9 9 2 5 8 5 11 3 3 1 4 7 3 3 5 3 2 3 2 2 3 4 4 7 4 12 8 6 6 9 14 8 7 7 6 4 7 4 2
+4 11 10 9 5 5 6 7 9 5 5 9 8 7 7 9 9 9 7 7 8 5 8 12 10 9 9 9 8 7 11 8 8 14 15 11 9 8 8 6
+7445 9137 8667 7975 7165 7139 7211 7113 6575 6783 6871 7093 7534 7849 7470 7222 7577 7831 7554 7964 7920 8753 9110 9637 9208 9674 9468 10143 9966 9700 10040 10360 10049 10583 9368 9101 8054 8786 8116 6801
+1 8 7 6 2 2 3 4 7 2 2 7 5 4 4 7 7 6 4 4 5 1 4 10 7 5 5 5 4 3 8 4 4 12 16 8 6 4 5 3
+4 7 12 9 12 6 3 9 8 6 10 5 8 5 7 8 12 6 7 8 5 7 7 11 13 12 11 7 10 13 14 14 10 11 9 10 10 11 10 6
+7361 9018 8757 8049 7292 7079 7107 7239 6615 6744 6955 6974 7550 7685 7462 7280 7694 7729 7544 7977 7850 8657 9061 9734 9310 9830 9512 9964 9972 9916 10147 10598 10053 10623 9364 9169 8108 8934 8169 6761
+1 3 11 6 12 3 0 7 6 3 9 2 5 2 4 5 12 3 4 5 2 3 3 8 12 9 8 3 6 11 13 12 6 7 5 7 8 9 8 3
+3 4 7 6 11 11 4 3 5 8 6 5 7 7 5 5 8 6 7 6 9 16 10 9 12 13 9 6 7 13 10 13 10 10 11 8 6 9 9 5
+7253 8722 8717 7934 7391 7330 7031 6988 6577 6830 6934 6862 7540 7653 7403 7150 7706 7633 7534 7867 7884 9120 9090 9702 9384 10038 9504 9734 9902 10119 10149 10760 10057 10600 9411 9110 8058 8950 8195 6662
+0 1 3 2 10 10 1 0 2 6 3 2 4 4 2 2 5 3 4 3 6 18 7 5 10 11 5 2 3 11 6 10 6 6 8 4 2 6 6 2
+9 11 12 8 12 7 5 3 3 3 3 6 8 6 7 7 8 7 8 7 11 9 8 11 7 7 10 12 6 9 11 10 8 10 10 7 6 11 10 8
+7302 8874 8806 7949 7513 7320 6983 6753 6489 6604 6837 6818 7556 7562 7397 7151 7718 7605 7550 7825 7968 9125 9067 9795 9328 9865 9522 9887 9808 10064 10176 10728 10010 10578 9431 8993 8010 9088 8246 6753
+7 9 11 5 12 4 2 0 0 0 0 3 5 3 4 4 5 4 5 4 10 6 4 8 3 3 7 9 2 5 8 6 4 6 7 3 2 9 8 6
+9 5 7 9 8 9 9 8 7 6 6 6 5 6 6 9 10 6 6 6 11 10 7 13 12 8 6 9 8 9 12 17 14 11 9 6 8 9 8 7
+7349 8648 8765 8025 7529 7433 7038 6839 6505 6576 6819 6777 7495 7476 7365 7274 7780 7517 7514 7724 8050 9191 9019 10006 9401 9764 9437 9846 9767 10013 10228 11128 10117 10619 9425 8822 8014 9095 8244 6777
+7 1 3 6 5 7 7 5 4 3 3 3 2 3 3 7 8 3 3 3 9 7 3 11 10 4 2 5 4 5 9 17 13 7 5 2 5 6 5 4
+7 4 6 12 8 7 10 6 12 13 14 7 5 4 5 4 7 10 11 5 11 13 8 10 10 10 9 10 9 9 10 11 10 8 9 8 10 10 12 8
+7344 8374 8699 8280 7545 7417 7117 6797 6649 6980 7006 6800 7435 7273 7308 7083 7764 7680 7607 7567 8130 9438 8998 10020 9421 9792 9431 9869 9753 9965 10228 11136 10120 10473 9419 8784 8069 9163 8344 6861
+4 1 2 11 5 4 9 3 13 15 18 4 2 1 2 1 4 8 10 2 9 12 4 6 7 6 5 6 5 5 6 7 6 4 5 4 8 7 11 5
+13 9 8 9 7 11 9 8 9 8 9 5 7 2 4 5 9 7 6 6 11 8 8 7 8 7 10 12 8 13 7 9 10 10 11 9 8 8 11 9
+7492 8424 8686 8336 7535 7647 7169 6880 6713 7052 7061 6699 7428 6959 7227 6965 7800 7649 7570 7481 8208 9363 8977 9848 9390 9634 9451 10014 9714 10165 10151 11020 10122 10459 9465 8809 8072 9104 8416 7002
+14 6 4 6 4 10 7 5 7 5 7 2 4 0 1 2 6 4 3 3 9 4 4 3 4 3 7 9 4 11 3 5 6 6 8 6 5 4 9 7
+5 10 3 1 4 10 7 8 8 6 6 8 5 4 9 8 8 6 7 8 7 6 7 8 8 9 9 9 11 9 7 9 9 11 9 9 8 8 9 6
+7432 8532 8545 7897 7449 7802 7168 6958 6749 6997 7038 6788 7370 6787 7276 7038 7809 7558 7559 7523 8182 9169 8931 9748 9360 9608 9445 9966 9752 10108 10076 10911 10099 10507 9458 8833 8074 9049 8435 6950
+2 7 0 0 1 8 4 5 6 3 3 6 2 1 7 5 5 3 4 5 3 2 3 4 4 5 5 5 8 5 3 5 5 7 5 6 5 4 6 3
+9 13 8 5 5 5 3 4 8 7 4 4 5 6 7 5 7 9 8 7 7 8 7 13 13 8 10 9 14 10 12 16 8 7 8 13 9 11 11 9
+7476 8818 8536 7730 7390 7641 7065 6786 6785 7007 6964 6626 7313 6748 7273 6922 7792 7657 7574 7501 8156 9110 8886 9961 9458 9523 9464 9921 9866 10115 10131 11239 10051 10306 9426 9101 8102 9181 8505 7085
+7 12 5 2 2 2 0 1 6 4 1 1 2 3 4 2 4 6 5 4 3 4 3 11 12 4 7 5 13 6 9 15 4 3 4 12 6 8 9 7
+6 9 7 13 12 6 3 3 7 12 9 7 6 8 8 6 7 11 13 9 8 10 12 10 17 10 9 10 14 9 8 11 13 10 9 9 10 11 10 7
+7442 8841 8501 8064 7512 7551 6965 6563 6794 7323 7020 6658 7283 6834 7295 6875 7776 7873 7717 7603 8156 9177 8970 9977 9656 9566 9457 9940 9977 10061 10082 11240 10132 10302 9420 9107 8155 9305 8548 7089
+3 6 3 13 12 3 0 0 4 12 7 4 3 5 5 3 4 10 14 6 5 7 10 6 20 7 5 6 13 5 4 7 11 6 5 6 8 8 7 4
+6 8 11 16 8 3 7 6 6 7 7 7 9 6 5 6 6 11 8 7 7 7 9 9 11 9 7 9 10 11 13 9 10 9 10 10 11 7 6 5
+7409 8802 8569 8563 7528 7282 6970 6537 6777 7313 7023 6688 7331 6792 7240 6831 7735 8076 7728 7576 8131 9056 8976 9931 9696 9545 9399 9896 9983 10133 10162 11118 10134 10236 9440 9174 8232 9176 8487 6970
+3 4 9 20 5 0 4 3 3 4 4 4 7 3 2 3 3 9 5 4 4 3 6 5 8 5 3 5 6 8 11 4 6 5 7 7 9 3 2 2
+7 14 11 10 13 13 8 7 6 4 5 8 12 7 14 12 9 16 26 17 13 13 6 9 17 8 10 11 11 12 10 10 7 8 13 13 16 16 13 11
+7402 9134 8636 8663 7672 7643 7000 6574 6761 7119 6975 6778 7454 6814 7417 7158 7771 8574 8199 8165 8260 9311 8905 9888 9888 9463 9419 9978 10014 10262 10163 11065 10059 10113 9536 9422 8435 9608 8607 7227
+4 14 9 7 14 14 5 4 3 1 2 6 12 4 17 13 6 20 54 23 13 12 2 5 19 4 7 8 8 9 6 6 3 4 12 12 20 18 13 10
+13 14 13 7 4 5 4 5 7 10 8 5 3 5 12 11 15 13 12 10 11 10 7 9 13 14 10 10 13 12 10 9 12 11 9 9 9 8 8 9
+7549 9446 8752 8573 7582 7491 6927 6486 6771 7306 7005 6678 7344 6712 7538 7404 7960 8858 8301 8289 8334 9366 8861 9847 9973 9755 9439 9993 10096 10383 10164 10954 10114 10182 9528 9409 8454 9523 8596 7346
+14 14 12 3 1 2 1 2 4 8 5 2 0 2 12 10 18 12 11 8 9 7 3 5 11 13 7 6 11 9 6 5 9 8 5 5 6 4 4 7
+11 9 11 3 2 7 8 4 8 8 3 4 4 8 7 8 5 6 8 13 7 5 7 7 11 13 7 10 11 12 13 8 7 11 7 7 7 8 10 7
+7641 9432 8814 8242 7443 7471 6958 6342 6806 7359 6906 6523 7262 6800 7528 7451 7889 8695 8298 8590 8304 9111 8818 9686 10005 9968 9382 10007 10125 10497 10242 10788 10040 10246 9469 9274 8421 9443 8637 7335
+10 5 9 0 0 4 5 1 6 5 0 1 1 6 4 5 2 2 5 13 3 1 3 3 8 11 3 6 8 9 11 4 3 8 3 3 3 4 7 4
+13 9 9 8 3 4 6 8 6 9 8 2 2 8 12 11 6 11 8 9 8 4 5 5 4 7 10 9 14 11 12 12 12 14 7 7 10 12 9 7
+7782 9419 8824 8239 7333 7268 6937 6452 6789 7470 6938 6254 7131 6883 7646 7679 7845 8849 8295 8627 8301 8810 8725 9412 9857 9800 9403 9959 10230 10543 10293 10877 10096 10491 9411 9147 8466 9613 8651 7324
+14 5 6 5 0 1 3 6 3 7 5 0 0 5 12 10 3 9 5 6 5 1 1 1 1 3 7 5 13 7 9 9 9 12 3 3 7 10 6 4
+19 11 2 9 10 4 7 7 3 6 5 3 3 5 5 5 5 13 9 10 11 9 8 7 6 6 14 14 11 8 10 10 13 11 11 8 8 11 8 7
+8073 9529 8654 8297 7405 7077 6942 6494 6696 7390 6892 6063 7029 6777 7582 7525 7776 9116 8317 8723 8374 8834 8711 9277 9764 9580 9526 10221 10255 10401 10291 10838 10176 10537 9457 9089 8459 9712 8639 7314
+29 8 0 6 8 1 4 4 0 3 2 0 0 2 2 2 2 12 6 7 9 6 4 3 2 2 13 13 8 4 6 6 11 7 8 4 5 8 4 4
+19 11 4 8 5 7 7 9 9 7 6 5 9 5 8 8 5 5 6 6 7 9 5 11 11 7 6 9 9 10 11 8 12 12 9 9 6 9 7 8
+8357 9633 8540 8290 7347 7082 6947 6657 6758 7376 6873 6006 7083 6677 7597 7565 7709 8876 8262 8568 8343 8856 8621 9396 9801 9435 9441 10160 10229 10391 10315 10679 10228 10642 9450 9096 8401 9682 8602 7366
+28 8 1 5 2 4 4 7 7 4 3 2 7 2 5 5 2 1 2 2 3 6 1 8 8 3 2 5 5 6 8 4 9 9 5 6 2 5 3 5
+17 17 10 7 7 9 7 10 9 6 5 14 11 7 7 7 8 9 5 4 5 12 7 13 14 10 9 7 5 12 12 6 9 8 7 9 10 9 8 9
+8582 10099 8582 8222 7342 7210 6952 6871 6819 7302 6829 6505 7187 6706 7586 7541 7721 8896 8183 8299 8262 9061 8584 9630 9914 9483 9435 9980 10101 10504 10364 10406 10202 10495 9393 9103 8446 9654 8591 7476
+22 19 7 3 4 7 4 9 7 3 2 19 10 4 4 4 5 6 2 1 2 10 3 11 13 7 5 3 1 9 9 2 5 4 3 6 7 5 5 7
+8 7 8 6 6 9 13 14 6 13 20 19 20 16 11 12 12 13 12 10 10 14 9 7 10 13 12 7 8 12 13 10 15 21 18 19 26 23 14 13
+8572 9923 8572 8097 7312 7330 7110 7318 6802 7662 7169 7282 7518 7286 7677 7825 7835 9160 8285 8415 8311 9377 8599 9482 9922 9712 9506 9811 10053 10611 10437 10396 10330 11155 9618 9724 8899 10487 8734 7826
+5 3 5 2 3 7 15 17 3 14 36 32 34 22 10 12 12 12 11 7 8 14 6 3 6 11 10 3 4 9 11 6 14 27 22 25 51 34 15 14
+7 5 12 7 4 12 15 10 9 8 13 9 7 10 14 11 7 9 7 5 13 14 10 7 8 9 9 7 7 11 6 8 9 12 12 10 15 12 8 11
+8536 9634 8664 8041 7231 7627 7315 7493 6862 7693 7322 7398 7509 7463 7843 8031 7818 9163 8257 8217 8435 9674 8639 9343 9878 9682 9498 9652 9980 10650 10329 10263 10302 11222 9684 9754 9060 10595 8720 8032
+3 1 11 4 1 12 20 8 7 5 15 7 4 8 16 9 4 5 3 2 13 13 7 3 4 5 5 3 3 7 2 4 5 8 10 6 16 9 4 9
+10 7 8 6 5 4 5 6 7 6 5 5 6 10 12 7 9 8 13 5 11 11 8 7 9 12 14 10 8 6 5 7 10 11 14 9 8 11 6 6
+8578 9486 8652 7927 7178 7415 7260 7412 6869 7600 7267 7261 7474 7629 7953 7979 7852 9104 8383 8031 8505 9769 8627 9212 9861 9838 9618 9687 9935 10379 10198 10077 10300 11224 9800 9721 9038 10635 8655 7918
+7 3 4 3 2 1 2 3 4 3 2 2 3 8 11 4 6 4 13 2 9 8 4 3 5 9 13 7 4 2 1 3 6 7 13 5 4 7 2 3
+11 6 8 8 12 11 4 7 5 5 7 12 7 6 4 7 8 10 5 4 8 12 7 6 8 8 13 12 13 11 11 12 9 9 8 9 8 11 7 6
+8645 9285 8640 7942 7305 7645 7181 7397 6825 7451 7264 7562 7466 7539 7856 7930 7860 9172 8301 7794 8497 9920 8590 9027 9819 9739 9710 9843 10019 10432 10224 10209 10272 11103 9759 9690 9016 10672 8617 7811
+9 2 4 5 12 10 1 4 2 2 4 12 4 3 1 4 5 7 2 1 5 9 3 2 4 4 11 9 11 7 8 9 5 4 4 5 4 7 3 3
+12 10 5 7 14 7 5 6 5 5 5 13 6 6 6 8 10 12 8 8 7 7 8 5 8 12 10 15 14 9 12 12 10 8 11 11 7 4 7 7
+8735 9342 8552 7895 7480 7616 7129 7321 6782 7311 7210 7906 7432 7455 7813 7945 7919 9358 8298 7817 8463 9754 8580 8792 9778 9891 9723 10174 10126 10359 10275 10333 10271 10928 9796 9784 8969 10277 8580 7772
+11 7 1 4 17 4 2 3 2 2 2 14 3 3 3 5 8 10 5 5 3 3 5 1 4 9 7 15 13 5 9 9 6 4 8 8 3 1 3 4
+1 5 7 7 5 5 6 5 10 9 6 4 6 13 7 10 8 5 7 12 7 7 6 9 10 7 5 9 14 13 8 11 8 7 10 8 9 8 10 9
+8542 9088 8517 7851 7421 7466 7104 7188 6868 7425 7183 7677 7399 7806 7796 8082 7925 9103 8269 8085 8430 9598 8519 8817 9789 9727 9608 10116 10231 10536 10223 10388 10219 10702 9807 9688 8975 10151 8621 7858
+0 1 3 4 2 2 3 2 9 7 3 1 3 14 4 8 5 1 3 11 3 3 2 6 6 3 1 5 13 10 4 7 4 3 6 4 6 4 7 6
+3 6 7 7 5 4 8 7 10 7 5 5 5 7 3 9 6 6 8 11 7 8 6 10 11 10 9 10 12 10 11 10 10 7 8 9 8 8 7 7
+8405 8911 8483 7810 7363 7263 7131 7186 6952 7409 7131 7523 7342 7767 7678 8150 7880 8925 8267 8275 8398 9513 8459 8902 9825 9757 9598 10123 10282 10518 10248 10379 10219 10489 9766 9659 8955 10033 8584 7816
+0 2 3 4 2 1 5 4 9 4 2 2 2 4 0 6 3 2 5 9 3 4 2 7 8 6 5 6 9 6 8 6 6 3 4 5 4 4 3 4
+6 6 4 10 5 4 5 5 11 9 6 8 16 15 11 12 9 12 12 12 10 7 6 12 12 11 12 11 13 9 9 10 6 8 9 9 7 7 7 6
+8348 8744 8373 7955 7307 7072 7080 7062 7059 7517 7106 7563 7567 8222 7767 8398 7913 9126 8367 8515 8443 9372 8401 9105 9886 9847 9665 10191 10357 10439 10222 10370 10117 10351 9752 9632 8910 9861 8548 7715
+2 2 1 8 2 1 2 2 11 7 3 5 22 18 10 11 6 10 11 11 7 3 2 10 9 8 10 8 11 5 5 6 2 4 5 5 3 3 3 3
+1 3 7 8 7 8 3 4 4 5 4 5 12 9 14 8 5 10 9 7 6 8 6 7 11 10 10 7 8 6 7 13 10 8 8 9 7 10 10 7
+8165 8403 8342 7969 7303 7139 6979 6884 6985 7373 7030 7416 7684 8281 7930 8385 7843 9192 8388 8434 8385 9301 8344 8988 9920 9870 9679 10009 10302 10181 10145 10546 10120 10221 9713 9607 8866 9883 8590 7682
+0 0 3 5 4 5 0 1 1 2 1 2 12 6 16 5 2 7 6 3 2 4 2 3 8 6 7 3 4 2 3 10 6 4 4 5 3 6 7 4
+4 6 4 3 5 5 6 6 9 9 7 6 4 5 5 5 7 6 14 10 7 10 9 11 8 16 17 10 8 8 11 12 13 9 9 9 8 8 8 5
+8063 8267 8236 7675 7248 7017 6958 6839 7040 7483 7033 7339 7594 8091 7859 8189 7826 9009 8536 8542 8354 9357 8365 9124 9876 10260 9871 10022 10249 10061 10172 10650 10199 10160 9700 9583 8849 9781 8580 7528
+1 2 1 0 2 2 3 3 7 7 4 3 1 2 2 2 4 2 15 7 3 7 6 8 4 17 19 6 4 4 8 9 11 5 5 5 4 4 5 2
+13 9 8 5 8 4 5 13 10 8 10 5 3 5 6 6 9 7 10 12 9 8 6 9 10 9 9 10 6 5 7 10 8 10 9 8 9 8 8 6
+8194 8323 8234 7521 7271 6841 6912 7227 7119 7525 7113 7205 7481 7912 7816 8066 7860 8898 8578 8766 8375 9287 8309 9129 9885 10197 9854 10035 10146 9764 10097 10625 10148 10164 9687 9499 8858 9685 8570 7444
+13 6 5 2 5 1 2 15 9 5 9 2 0 2 3 2 6 3 7 11 6 4 2 6 6 5 5 6 2 1 3 6 4 6 5 4 6 4 5 3
+12 16 15 14 8 7 10 6 6 8 7 4 6 5 7 10 6 6 7 11 8 11 12 7 10 6 7 7 9 10 15 11 13 11 10 7 9 8 11 8
+8296 8806 8411 7929 7293 6860 6995 7162 7094 7565 7114 7018 7447 7744 7799 8196 7817 8732 8542 8915 8370 9405 8408 9011 9893 9953 9786 9862 10122 9792 10228 10663 10226 10229 9700 9359 8866 9595 8637 7488
+11 19 17 16 5 4 9 3 3 5 4 1 3 2 4 8 3 2 3 9 5 8 11 3 6 2 3 3 5 6 15 7 11 8 7 3 6 4 9 5
+17 23 16 6 11 11 6 5 7 11 8 6 8 6 5 9 6 9 11 9 9 7 9 11 7 7 7 8 14 7 10 10 8 8 8 10 9 6 9 5
+8523 9690 8610 7821 7392 7124 6973 7039 7095 7786 7140 6965 7465 7648 7732 8257 7775 8761 8609 8933 8391 9270 8428 9146 9824 9785 9720 9761 10227 9634 10228 10637 10175 10106 9662 9411 8874 9387 8651 7345
+22 37 19 3 10 10 3 2 4 10 5 3 5 3 2 6 3 6 9 6 6 3 6 8 3 3 3 4 13 3 6 6 4 4 4 7 6 2 6 2
+14 8 11 8 4 6 5 13 11 7 5 7 9 6 4 5 6 9 11 8 7 8 8 8 9 8 10 11 9 12 9 9 7 10 7 7 11 9 8 8
+8668 9600 8676 7843 7309 7065 6926 7415 7199 7748 7089 6977 7508 7557 7641 8068 7734 8788 8675 8888 8360 9205 8422 9088 9808 9689 9732 9851 10201 9793 10202 10551 10099 10114 9599 9276 8933 9376 8639 7395
+15 4 9 5 1 3 2 14 10 4 2 4 7 3 1 2 3 6 9 4 3 4 5 4 5 4 6 8 5 10 5 5 3 6 3 3 9 5 4 5
+4 6 4 5 5 3 6 12 9 6 6 8 7 6 7 9 11 11 7 9 9 6 7 7 10 12 13 8 12 12 8 9 11 11 8 8 11 11 7 7
+8553 9392 8561 7679 7254 6825 6906 7707 7249 7651 7065 7049 7499 7472 7629 8136 7822 8936 8637 8907 8381 9021 8390 8972 9818 9844 9821 9751 10253 9942 10151 10470 10128 10182 9563 9210 8991 9489 8602 7381
+1 2 1 2 2 0 3 12 7 3 3 5 4 3 4 6 10 9 3 6 6 2 3 3 6 9 11 4 9 9 4 5 8 8 4 4 9 8 3 4
+6 10 5 4 3 5 10 7 6 6 4 6 12 6 4 7 6 8 7 9 5 7 9 11 8 14 12 9 19 10 7 10 10 12 11 12 11 8 8 7
+8492 9442 8475 7464 7149 6722 6989 7674 7221 7560 6990 6994 7618 7392 7540 8077 7780 8891 8600 8925 8299 8909 8410 9109 9777 10113 9882 9718 10482 9959 10076 10456 10130 10308 9605 9394 9047 9411 8591 7368
+2 7 1 1 0 2 9 4 3 3 1 3 12 3 1 4 3 4 3 6 2 3 6 8 4 13 9 5 23 6 3 6 6 9 8 10 9 4 5 4
+6 8 9 10 4 8 12 7 4 6 9 9 7 7 5 7 6 14 7 5 4 6 10 10 10 15 10 8 13 10 10 9 13 9 6 9 11 8 7 5
+8433 9367 8493 7630 7072 6810 7121 7643 7142 7475 7045 7127 7606 7378 7479 8022 7739 9217 8564 8696 8194 8743 8455 9176 9788 10427 9890 9626 10552 9975 10080 10381 10209 10242 9518 9383 9102 9337 8555 7233
+2 4 6 8 1 6 13 4 1 3 7 7 4 4 2 4 3 14 3 1 1 2 7 7 6 14 6 4 10 6 6 5 11 5 2 5 8 4 3 2
+9 9 7 11 10 15 13 8 10 11 9 5 5 4 2 5 6 6 5 5 5 5 5 9 9 9 11 9 8 8 10 10 12 12 9 8 10 10 7 8
+8452 9357 8459 7848 7151 7322 7275 7675 7219 7702 7099 7006 7543 7181 7343 7847 7699 9032 8478 8481 8117 8525 8371 9178 9773 10354 9924 9601 10493 9868 10083 10372 10260 10364 9510 9311 9130 9391 8520 7290
+6 5 3 10 9 19 15 5 8 10 7 2 2 1 0 2 3 2 1 1 2 1 1 5 5 5 8 5 4 4 6 6 9 9 5 4 7 7 3 5
+14 14 11 10 9 12 13 10 8 9 8 8 9 6 10 10 10 7 6 7 10 6 10 8 5 6 8 10 9 16 12 14 9 12 8 9 8 11 7 6
+8598 9655 8529 7991 7202 7619 7425 7828 7243 7792 7126 7077 7584 7118 7415 7990 7762 8920 8419 8402 8169 8382 8417 9118 9656 10101 9880 9639 10461 10258 10138 10609 10233 10479 9477 9305 9106 9503 8486 7221
+15 13 9 8 7 12 14 8 5 6 5 5 6 3 8 8 8 3 2 3 8 2 7 4 1 2 4 7 5 17 9 12 5 9 4 5 4 8 3 3
+9 13 8 10 13 11 16 8 8 7 2 5 8 6 15 11 4 5 7 8 4 7 16 9 10 9 10 8 10 10 15 14 10 10 9 10 8 11 8 6
+8613 9874 8520 8125 7354 7837 7648 7849 7266 7754 6999 6959 7599 7059 7613 8186 7670 8692 8387 8389 8067 8309 8615 9123 9670 10047 9888 9552 10455 10256 10268 10832 10233 10464 9470 9361 9083 9608 8478 7156
+6 11 5 8 14 10 21 5 5 4 0 2 5 3 19 9 1 1 3 5 1 3 19 6 7 5 6 4 6 6 14 12 6 6 5 7 4 8 5 3
+4 8 10 10 6 11 17 10 7 9 6 5 5 10 14 4 7 6 6 9 9 5 7 8 10 11 5 6 7 9 13 14 12 12 11 9 11 10 9 6
+8500 9773 8562 8251 7323 8042 7891 7992 7263 7841 6977 6848 7537 7249 7780 7940 7657 8539 8331 8438 8095 8117 8578 9067 9684 10120 9768 9347 10372 10193 10343 11042 10284 10573 9514 9352 9137 9645 8496 7095
+1 4 7 8 3 9 24 8 4 6 3 2 2 8 16 1 4 2 2 6 6 2 3 4 7 8 1 2 3 5 11 12 9 9 8 5 8 7 6 3
+8 10 4 8 5 6 4 2 2 7 8 4 7 7 8 6 4 5 7 5 8 10 8 10 9 9 10 9 9 11 9 7 12 11 11 8 8 9 7 7
+8492 9801 8450 8247 7268 7928 7796 7635 7132 7800 7007 6682 7527 7244 7790 7832 7568 8333 8301 8238 8097 8244 8568 9137 9672 10065 9779 9339 10343 10257 10314 10809 10334 10614 9557 9282 9113 9619 8462 7099
+5 6 1 5 2 2 1 0 0 4 5 1 4 4 5 3 1 2 3 2 5 8 5 7 5 5 6 5 5 8 5 3 9 7 8 4 4 5 3 4
+8 11 7 4 6 7 4 6 7 4 7 4 9 7 4 4 4 6 10 9 7 14 8 10 12 9 7 6 8 8 10 13 13 10 10 9 8 8 8 7
+8484 9888 8417 7997 7239 7882 7703 7545 7132 7577 7011 6526 7569 7239 7697 7607 7481 8201 8349 8296 8073 8609 8558 9203 9737 10014 9713 9147 10289 10133 10312 10959 10408 10591 9574 9278 9090 9533 8455 7103
+5 8 3 1 3 4 1 3 4 1 4 1 7 4 1 1 1 2 7 6 4 15 5 7 10 5 3 2 4 4 6 10 11 6 7 5 4 4 5 4
+6 5 5 8 6 3 6 7 7 6 9 10 8 12 10 7 6 19 18 10 6 8 8 11 9 5 9 10 10 10 11 12 8 6 8 12 11 9 7 9
+8425 9601 8334 8008 7211 7593 7664 7522 7132 7491 7066 6748 7584 7541 7760 7580 7447 8876 8600 8412 8024 8583 8548 9326 9723 9720 9700 9212 10287 10139 10335 11038 10352 10324 9539 9458 9144 9513 8422 7229
+2 1 2 5 3 0 3 4 4 3 7 9 5 12 8 4 3 27 25 7 2 5 5 8 5 1 5 7 6 6 8 8 4 2 4 10 8 5 3 7
+11 5 8 11 10 9 6 6 6 16 14 8 7 7 8 5 6 10 6 8 6 8 6 11 8 9 8 9 12 10 9 12 9 9 9 8 12 11 7 6
+8495 9332 8330 8203 7286 7690 7626 7439 7107 8024 7247 6834 7573 7518 7770 7432 7414 8957 8538 8398 7977 8559 8487 9442 9684 9689 9662 9212 10336 10145 10307 11112 10323 10257 9530 9382 9222 9618 8390 7163
+9 1 5 9 8 6 3 3 3 21 17 5 4 4 5 2 3 7 2 5 2 5 2 8 4 5 4 5 9 6 5 8 5 5 5 4 10 8 3 3
+7 8 15 19 12 10 13 12 11 6 7 4 4 5 7 7 7 9 7 10 6 7 7 6 10 9 11 11 8 10 15 9 10 10 13 8 10 10 7 4
+8461 9263 8505 8878 7410 7842 7767 7729 7210 7911 7245 6669 7486 7374 7754 7416 7407 8972 8503 8508 7931 8475 8454 9244 9697 9660 9701 9335 10282 10150 10433 10998 10320 10255 9624 9310 9247 9655 8359 6978
+3 4 17 27 12 8 14 12 10 3 4 1 1 2 4 4 4 6 3 7 2 3 3 2 7 5 8 8 4 6 14 5 6 6 11 4 7 7 3 1
+6 7 7 13 16 12 9 11 6 5 4 4 4 8 9 5 8 4 5 6 7 11 8 10 7 8 11 11 10 12 11 9 11 10 9 9 12 12 8 10
+8403 9137 8471 9144 7634 8108 7803 7941 7183 7743 7166 6514 7401 7423 7790 7278 7426 8679 8418 8366 7911 8642 8447 9303 9633 9571 9740 9450 10280 10278 10453 10891 10343 10254 9613 9304 9322 9812 8354 7173
+2 3 3 12 22 11 6 10 3 2 1 1 1 5 6 2 5 1 1 2 4 9 5 7 3 4 8 8 6 9 7 5 7 6 5 5 10 10 5 9
+7 7 6 8 8 9 7 10 8 8 7 5 7 11 9 10 6 6 9 7 8 6 6 9 8 7 6 6 7 14 9 13 8 12 16 13 12 12 10 9
+8372 9018 8412 9086 7647 8174 7787 8078 7208 7769 7166 6430 7395 7653 7825 7455 7393 8526 8437 8294 7918 8492 8389 9297 9597 9426 9650 9251 10202 10521 10422 11036 10289 10376 9782 9544 9396 9960 8401 7295
+3 3 2 4 5 6 4 8 5 5 4 2 4 10 6 8 3 2 6 3 5 2 2 5 4 3 2 2 3 12 5 10 4 9 17 12 10 9 7 7
+3 3 9 16 11 9 7 8 7 7 5 6 5 8 6 11 9 9 11 7 9 8 11 10 9 8 5 7 8 7 12 11 10 11 13 12 9 12 8 6
+8239 8661 8432 9523 7737 8236 7771 8084 7207 7732 7114 6412 7338 7685 7783 7683 7438 8567 8507 8226 7950 8474 8460 9353 9587 9351 9536 9126 10151 10319 10468 11049 10287 10429 9870 9708 9391 10099 8395 7225
+0 0 6 18 10 6 4 5 4 4 2 3 2 5 3 10 7 6 9 3 6 5 9 7 5 4 1 3 4 3 9 7 6 7 11 10 5 9 5 3
+9 12 6 14 13 10 7 13 9 7 3 9 11 8 4 11 13 10 6 6 8 8 6 11 7 8 9 6 10 10 11 6 9 11 10 11 7 12 9 7
+8263 8878 8374 9811 7876 8356 7755 8397 7257 7698 7013 6580 7436 7715 7690 7897 7584 8667 8448 8101 7956 8457 8402 9467 9526 9281 9528 8947 10153 10314 10487 10754 10260 10479 9879 9801 9335 10230 8415 7221
+6 10 2 13 14 7 4 13 7 4 0 7 10 5 1 10 14 7 2 2 5 5 2 8 3 4 5 2 6 6 7 2 5 7 6 8 3 9 6 4
+9 5 10 15 8 7 11 15 6 4 7 9 6 6 6 13 15 7 6 6 7 8 11 12 6 9 11 7 11 12 13 7 11 12 12 9 9 8 7 8
+8286 8652 8420 10143 7883 8284 7842 8814 7229 7481 7016 6738 7403 7620 7651 8221 7778 8577 8390 7983 7936 8441 8473 9636 9441 9277 9571 8840 10180 10432 10557 10538 10285 10587 9939 9765 9332 10107 8383 7279
+6 1 7 15 5 3 10 17 3 1 4 7 3 3 3 13 19 3 2 2 4 5 9 10 2 5 8 3 8 9 10 3 8 9 9 5 5 4 3 5
+10 5 14 15 8 6 11 9 6 7 13 11 7 8 6 7 5 6 3 7 10 8 10 12 5 7 8 10 11 9 10 9 11 10 14 13 12 8 7 6
+8334 8440 8567 10456 7890 8155 7927 8838 7201 7462 7173 7009 7397 7654 7613 8157 7711 8431 8257 7934 7993 8426 8517 9795 9333 9150 9536 8923 10207 10359 10549 10458 10309 10566 10048 9977 9405 9992 8352 7210
+8 1 15 14 5 2 10 6 3 4 15 11 4 5 3 3 2 2 0 4 8 5 7 10 1 3 4 7 8 5 6 5 8 6 13 11 10 4 3 3
+12 11 14 8 9 7 9 8 7 6 4 5 6 11 9 4 4 7 6 7 8 10 10 8 6 8 8 14 11 11 10 9 9 10 12 11 8 10 10 10
+8432 8609 8711 10320 7923 8095 7959 8799 7200 7382 7096 6895 7365 7870 7653 7913 7620 8355 8204 7888 7997 8534 8560 9698 9253 9092 9502 9247 10233 10413 10541 10383 10281 10546 10103 10054 9374 10006 8399 7391
+11 9 15 4 6 4 6 4 4 3 1 2 3 10 6 1 1 3 2 4 5 7 7 4 2 4 4 14 8 7 6 5 5 6 9 8 4 6 7 8
+6 3 6 7 8 9 6 5 4 8 10 7 8 11 8 6 8 7 7 7 8 9 7 6 8 12 9 13 11 9 11 11 9 8 10 8 8 6 9 9
+8374 8276 8646 10130 7929 8162 7913 8578 7122 7430 7174 6911 7385 8073 7666 7806 7634 8283 8178 7844 8001 8574 8525 9484 9226 9283 9494 9490 10258 10341 10559 10435 10254 10404 10106 9942 9344 9774 8419 7500
+2 0 2 3 5 6 3 1 1 5 9 4 5 9 5 3 5 3 3 4 5 6 3 2 4 10 5 12 8 5 7 7 5 4 6 4 4 2 6 7
+4 7 11 17 10 7 5 7 7 5 7 6 15 9 5 6 6 7 6 6 7 7 6 8 9 6 8 10 12 10 9 10 10 10 10 8 8 8 8 6
+8267 8209 8711 10566 7986 8102 7843 8493 7123 7291 7173 6864 7584 8141 7602 7706 7596 8216 8127 7742 7980 8489 8465 9406 9225 9094 9461 9534 10308 10334 10525 10423 10253 10394 10109 9837 9315 9679 8413 7418
+1 3 9 18 8 4 2 3 4 2 4 3 19 6 2 3 3 3 2 3 4 3 2 4 5 2 4 7 9 6 5 6 6 6 6 4 4 4 5 3
+8 5 6 7 7 3 3 4 8 3 7 9 14 11 5 5 10 9 10 11 8 6 8 6 6 7 10 10 9 10 11 8 6 9 11 11 10 10 11 7
+8265 8023 8646 10362 7965 7800 7723 8229 7149 7037 7172 7005 7752 8328 7540 7550 7662 8275 8179 7953 7985 8348 8458 9210 9148 8978 9480 9576 10280 10328 10543 10289 10150 10323 10137 9922 9338 9712 8484 7403
+5 2 2 3 4 0 0 1 5 0 4 7 16 9 2 2 8 6 8 10 5 2 5 2 2 3 7 7 5 6 7 4 2 5 8 8 7 7 9 4
+17 14 11 11 7 5 7 5 6 4 4 4 8 10 10 14 10 8 4 9 8 5 12 7 11 7 7 10 11 9 12 11 9 8 7 7 11 8 9 6
+8493 8401 8711 10416 7945 7639 7709 8042 7123 6860 7095 6830 7762 8442 7607 7957 7726 8270 8077 8028 7990 8154 8553 9087 9200 8869 9422 9615 10304 10261 10586 10347 10126 10195 10062 9756 9386 9620 8502 7327
+22 15 9 7 4 2 4 2 3 1 1 1 5 7 8 16 8 5 1 6 5 2 11 3 8 3 3 7 8 5 9 7 5 4 3 3 8 4 6 3
+25 19 6 12 7 5 5 6 7 4 9 7 4 7 11 6 5 5 4 8 8 7 8 8 5 8 6 5 8 9 10 14 10 8 9 9 11 9 9 5
+8920 9064 8646 10528 7925 7487 7644 7928 7124 6694 7147 6850 7670 8365 7698 7848 7660 8081 7977 8037 7995 8094 8543 9033 9098 8828 9340 9345 10251 10198 10577 10586 10128 10074 10040 9723 9432 9595 8519 7194
+47 26 2 9 4 2 2 2 4 1 7 4 1 3 10 3 2 2 1 5 5 4 5 4 1 4 2 1 4 5 6 12 6 4 5 5 8 5 6 2
+9 17 16 8 6 11 13 10 9 4 4 8 7 5 5 3 4 5 4 5 5 10 8 5 8 6 6 5 8 11 9 14 13 9 10 12 9 8 6 4
+8927 9564 8839 10387 7880 7713 7785 8066 7176 6538 7070 6930 7657 8170 7633 7561 7571 7903 7880 7861 7923 8222 8534 8798 9075 8666 9260 9091 10199 10261 10543 10810 10207 10022 10044 9876 9426 9510 8459 7008
+6 20 19 4 3 10 14 8 7 1 1 5 4 2 2 0 1 2 1 2 2 8 5 1 4 2 2 1 4 8 5 12 11 5 6 9 5 4 2 1
+6 7 5 3 9 15 10 6 5 5 6 5 6 6 8 9 3 7 6 5 12 15 11 8 7 8 9 10 10 11 9 10 11 8 9 7 11 11 9 6
+8857 9420 8746 9948 7913 8171 7846 7950 7124 6452 7046 6821 7619 8048 7646 7660 7458 7858 7836 7696 8031 8650 8602 8761 9027 8637 9258 9159 10200 10321 10509 10775 10233 9912 10023 9713 9471 9615 8477 6956
+2 3 1 0 6 18 8 2 2 2 3 2 3 2 5 6 0 4 3 2 11 17 9 4 3 4 5 7 6 8 5 6 8 4 5 3 8 8 6 3
+8 9 14 9 8 4 3 4 5 5 5 5 10 8 8 6 5 7 6 6 6 8 4 8 11 6 10 11 11 12 10 13 11 10 10 9 9 10 7 5
+8840 9407 8885 9904 7919 7926 7726 7718 7073 6372 6997 6718 7684 8056 7659 7569 7399 7816 7793 7602 7983 8622 8489 8726 9082 8487 9282 9285 10226 10439 10502 10927 10258 9931 10028 9683 9464 9652 8444 6845
+4 5 14 5 5 1 0 1 2 2 2 2 8 5 5 3 2 4 3 3 2 4 1 4 9 2 7 8 8 9 6 10 8 6 6 5 5 7 3 2
+7 14 15 17 20 9 8 5 3 5 9 6 9 6 8 8 5 8 7 7 9 11 9 9 13 10 12 8 9 9 10 16 14 12 7 8 12 7 7 7
+8798 9702 9046 10354 8232 8003 7737 7562 6973 6296 7052 6683 7722 7941 7672 7606 7342 7838 7777 7575 8013 8780 8507 8755 9187 8592 9357 9219 10200 10365 10495 11254 10359 10072 9956 9593 9534 9502 8412 6864
+3 13 16 19 32 6 5 2 0 2 7 3 6 2 5 5 2 5 4 4 6 9 6 6 12 7 10 4 5 5 6 15 12 9 3 4 10 3 3 4
+2 3 8 11 11 10 11 7 9 5 5 11 10 8 7 5 10 14 7 8 10 9 10 8 6 7 8 12 8 15 13 12 15 15 7 10 13 10 6 6
+8629 9304 9024 10408 8307 8137 7825 7538 7028 6225 7003 6957 7784 7956 7659 7456 7414 8227 7761 7612 8068 8806 8550 8721 9111 8506 9327 9403 10149 10664 10565 11316 10483 10389 9886 9631 9628 9546 8355 6820
+0 0 4 7 9 8 10 4 7 2 2 11 8 5 4 2 8 15 4 5 8 6 7 4 2 3 4 10 4 14 10 8 14 14 3 7 11 7 2 3
+9 9 7 5 6 8 7 6 7 6 7 10 8 8 7 3 5 6 3 7 8 6 12 11 6 10 9 11 8 14 8 7 10 11 10 8 10 8 7 8
+8643 9298 8977 10090 8253 8140 7808 7454 7031 6220 7007 7153 7794 7970 7646 7192 7356 8102 7643 7585 8071 8646 8643 8873 9036 8610 9324 9514 10100 10884 10505 11067 10476 10441 9894 9544 9643 9464 8325 6902
+6 5 3 1 2 5 4 3 4 3 4 9 5 5 4 0 2 2 0 4 5 2 11 9 2 7 5 8 4 12 4 3 6 7 6 4 7 4 3 5
+10 5 11 9 13 9 4 12 10 3 8 7 8 9 5 8 11 6 6 9 7 9 10 9 13 6 11 10 13 12 10 11 7 7 7 7 6 8 7 6
+8682 9047 9034 10037 8379 8204 7715 7744 7111 6031 7036 7153 7803 8044 7582 7251 7453 7984 7605 7682 8048 8680 8682 8893 9142 8462 9372 9557 10180 10968 10498 11078 10393 10244 9825 9401 9555 9387 8296 6856
+7 1 9 5 13 6 1 12 9 0 5 4 5 6 2 5 10 2 3 6 4 6 7 6 12 2 8 7 11 8 6 7 3 3 3 3 2 4 3 3
+5 8 14 10 8 6 7 6 6 3 6 6 9 11 8 5 6 6 6 6 11 13 10 10 10 7 11 8 11 12 13 13 11 12 9 7 7 7 7 5
+8593 8995 9166 10049 8374 8080 7701 7648 7086 5853 7013 7092 7838 8237 7597 7123 7420 7873 7568 7589 8128 8957 8720 8973 9169 8384 9419 9475 10207 11047 10568 11212 10414 10366 9809 9267 9495 9253 8267 6751
+1 4 14 6 5 2 4 3 3 0 3 3 6 9 5 2 3 3 3 3 9 12 7 7 7 3 8 4 8 8 10 10 7 9 5 3 3 3 3 2
+6 8 6 4 4 7 6 3 3 4 5 3 6 5 5 8 8 9 5 6 6 8 7 8 9 6 7 8 6 11 10 13 12 11 9 7 7 7 5 5
+8531 8946 9090 9691 8267 8025 7662 7373 6985 5747 6965 6850 7795 8049 7535 7187 7439 7953 7506 7502 8078 8911 8681 8926 9170 8249 9362 9398 10105 11060 10559 11337 10460 10419 9794 9141 9436 9127 8188 6653
+2 4 2 1 1 4 3 0 0 1 2 0 3 2 2 5 5 6 2 3 2 4 3 4 5 2 3 4 2 7 6 10 9 7 5 3 3 3 2 2
+5 14 12 5 8 10 5 7 4 5 13 5 3 4 3 6 15 8 7 7 5 5 6 8 10 8 7 8 10 9 10 17 13 10 12 7 9 7 6 5
+8445 9269 9169 9416 8265 8157 7598 7360 6912 5709 7123 6746 7677 7811 7423 7124 7636 7967 7497 7481 8004 8683 8617 8881 9196 8245 9307 9325 10108 10949 10551 11701 10531 10408 9856 9022 9430 9009 8136 6561
+1 14 10 1 5 8 2 4 1 2 15 2 0 1 0 3 19 5 4 4 2 1 2 4 7 5 3 4 6 5 6 16 10 6 9 3 5 3 2 2
+4 11 9 9 6 12 9 10 6 9 11 4 7 4 6 8 9 9 7 6 10 5 9 14 8 10 8 9 11 12 13 12 13 11 15 9 10 11 8 6
+8336 9388 9170 9404 8212 8404 7638 7532 6892 5919 7226 6587 7664 7588 7391 7188 7675 8041 7488 7400 8059 8469 8631 9208 9170 8364 9279 9318 10136 11029 10619 11736 10600 10459 9993 9033 9450 9144 8137 6535
+1 8 5 5 2 11 6 8 3 8 10 1 4 1 3 5 6 6 4 3 8 1 6 14 4 7 4 5 8 8 10 8 10 7 15 6 7 8 5 3
+8 10 6 10 8 3 3 4 4 9 11 6 8 3 5 7 7 5 7 4 8 6 8 9 10 11 8 7 9 13 13 10 9 14 11 7 8 7 7 5
+8332 9439 9094 9454 8211 8084 7524 7325 6822 6116 7326 6560 7677 7317 7334 7186 7662 7865 7480 7201 8062 8329 8620 9208 9196 8537 9251 9189 10113 11165 10686 11646 10565 10691 10024 8921 9418 9025 8112 6450
+5 7 2 7 5 0 0 1 1 8 10 3 5 0 2 4 4 2 4 1 5 2 4 5 7 9 4 3 5 10 10 5 5 12 8 3 4 3 4 2
+7 14 13 4 4 4 4 11 10 6 6 4 6 5 5 4 5 4 5 8 8 9 6 8 8 8 11 11 8 13 10 13 11 14 8 8 7 9 9 5
+8302 9732 9199 9132 8108 7844 7438 7561 6907 6117 7296 6412 7638 7185 7278 7000 7598 7638 7421 7260 8065 8382 8558 9147 9170 8516 9301 9313 10065 11293 10674 11745 10582 10909 9978 8877 9361 9036 8139 6370
+3 13 12 1 1 1 1 10 9 3 3 1 3 2 2 1 2 1 2 5 5 6 2 4 4 5 8 8 4 10 6 9 7 12 4 4 3 6 6 2
+14 17 14 6 7 8 6 6 10 8 8 10 12 7 5 6 9 7 4 8 8 9 7 9 8 10 7 8 8 13 12 13 9 11 13 13 12 10 10 9
+8452 10192 9327 8952 8084 7864 7405 7475 6990 6241 7318 6641 7754 7183 7224 6948 7638 7609 7337 7315 8068 8432 8523 9151 9145 8619 9247 9245 10018 11414 10714 11839 10547 10930 10061 9143 9434 9108 8191 6540
+15 19 14 2 4 5 3 3 9 6 5 9 12 4 2 3 6 4 1 5 5 6 3 5 4 7 3 4 4 10 9 9 5 7 11 12 10 7 8 7
+10 9 6 7 8 9 9 10 10 6 8 9 8 8 4 8 9 7 6 6 7 8 10 12 12 9 11 11 15 15 10 12 7 12 13 10 12 14 8 8
+8496 10133 9247 8844 8086 7945 7450 7640 7071 6235 7339 6795 7764 7243 7145 7022 7677 7582 7307 7244 8045 8417 8565 9339 9223 8654 9297 9366 10151 11650 10702 11865 10462 11011 10142 9208 9505 9421 8191 6639
+7 5 2 3 5 6 7 8 9 3 5 7 5 5 1 5 6 4 3 3 4 5 7 10 10 6 8 8 15 13 6 8 3 8 11 7 10 14 5 6
+7 10 11 8 12 11 14 9 9 7 10 5 5 5 11 9 8 5 7 9 7 8 9 8 8 8 7 7 12 10 11 15 7 10 10 9 9 7 8 10
+8462 10139 9297 8804 8190 8144 7621 7734 7124 6290 7411 6694 7697 7115 7247 7153 7689 7434 7303 7362 8023 8403 8581 9270 9197 8626 9243 9234 10204 11565 10716 12074 10379 10964 10144 9208 9497 9285 8191 6855
+3 6 8 4 11 9 16 6 7 4 8 2 2 2 10 7 5 2 4 7 4 5 6 4 4 4 3 3 9 5 7 12 3 6 6 5 5 3 5 9
+13 11 9 8 9 12 6 5 9 7 4 7 5 4 4 7 9 4 5 9 10 10 6 5 9 9 8 8 12 8 12 11 14 14 9 7 7 6 10 8
+8583 10206 9294 8767 8215 8392 7584 7577 7176 6342 7328 6722 7632 6933 7168 7153 7727 7233 7248 7473 8078 8513 8520 9021 9197 8661 9216 9171 10256 11362 10755 12025 10477 11166 10120 9085 9438 9096 8242 6935
+13 8 5 4 6 11 3 2 7 4 1 4 2 1 1 4 6 1 2 7 8 7 2 1 5 6 4 4 9 3 9 6 12 12 5 3 3 2 8 5
+9 9 4 6 5 6 6 5 1 5 7 9 6 5 7 8 7 12 12 8 5 6 11 8 5 8 6 9 9 9 10 12 13 11 11 11 8 7 10 7
+8598 10146 9164 8609 8137 8257 7548 7429 7022 6268 7324 6871 7594 6824 7168 7215 7713 7536 7373 7516 8004 8370 8588 8971 9095 8632 9139 9173 10230 11233 10742 12040 10547 11171 10148 9215 9406 8980 8291 6948
+6 5 1 2 2 2 3 2 0 2 4 7 3 2 4 5 4 12 12 5 2 2 9 4 1 4 2 5 5 4 6 8 10 7 8 8 4 3 8 4
+7 7 7 6 4 13 9 4 4 6 4 5 5 7 8 5 6 8 8 14 8 8 7 9 8 7 7 7 13 10 9 13 9 10 10 7 11 11 7 6
+8562 9967 9114 8461 8036 8560 7589 7229 6948 6260 7243 6765 7532 6844 7193 7089 7673 7575 7393 7925 8008 8359 8552 8985 9072 8544 9089 9052 10306 11173 10703 12116 10513 11115 10150 9092 9452 9117 8262 6899
+3 3 3 2 1 13 6 1 1 3 1 2 2 4 5 2 3 5 5 16 5 5 3 6 4 3 3 3 11 6 5 9 5 6 6 3 8 8 3 3
+11 11 9 3 3 11 10 3 6 5 5 6 6 6 5 6 7 6 5 6 5 8 8 6 9 11 7 7 8 10 9 13 10 13 9 10 9 9 7 7
+8629 10044 9116 8137 7912 8722 7655 6979 6927 6191 7190 6727 7497 6802 7141 7032 7660 7489 7336 7818 7935 8348 8543 8814 9075 8707 9041 8938 10253 11117 10665 12187 10506 11246 10126 9160 9446 9122 8234 6915
+9 8 6 0 0 9 8 0 3 2 2 3 3 3 2 3 4 3 2 3 2 5 5 2 6 9 3 3 4 6 5 9 6 10 5 7 5 6 3 4
+9 15 13 4 6 7 4 3 4 3 7 9 10 8 6 9 7 6 4 11 7 8 8 12 7 9 7 10 11 11 10 7 10 10 11 11 9 10 8 8
+8643 10362 9220 7894 7867 8628 7566 6744 6856 6003 7189 6876 7565 6885 7116 7163 7647 7408 7255 8024 7915 8338 8534 9022 9027 8737 8994 9016 10278 11125 10654 11885 10499 11185 10154 9286 9440 9189 8232 6991
+6 14 12 1 3 3 1 0 1 0 4 7 8 5 3 7 4 3 1 9 4 5 5 10 3 6 3 7 8 7 6 2 6 6 8 8 5 7 5 5
+5 8 9 8 7 11 6 3 6 7 9 9 11 10 8 8 9 10 5 11 9 6 5 8 10 9 9 8 10 8 11 9 10 13 11 10 9 8 6 6
+8555 10231 9219 7911 7849 8786 7530 6523 6838 6072 7239 7016 7657 7086 7142 7224 7686 7577 7201 8218 7947 8206 8448 8972 9057 8765 8999 8966 10277 10949 10669 11724 10492 11312 10181 9343 9434 9129 8179 6940
+1 4 5 5 4 9 3 0 3 5 7 7 10 9 5 5 6 8 2 9 6 2 1 4 7 6 6 4 6 4 7 4 6 10 8 7 5 4 2 3
+11 6 10 8 6 9 7 6 6 4 7 9 8 7 6 6 7 7 5 5 6 9 6 5 10 11 7 10 10 9 14 16 8 11 10 13 8 11 9 6
+8622 9985 9244 7927 7806 8811 7520 6500 6820 5953 7237 7147 7670 7090 7117 7159 7673 7552 7149 8032 7901 8266 8390 8740 9086 8914 8953 9042 10276 10845 10760 12003 10434 11309 10182 9581 9402 9257 8204 6892
+9 2 7 5 3 6 4 3 3 1 4 7 5 4 3 3 4 4 2 2 3 6 2 1 7 9 3 7 6 5 12 14 4 7 6 12 4 8 6 3
+10 7 6 9 6 8 8 4 6 6 7 10 15 9 5 7 7 8 5 6 7 11 6 5 11 9 9 7 10 11 14 17 11 10 9 12 10 16 13 8
+8662 9815 9166 8004 7764 8773 7536 6355 6803 5964 7235 7332 7861 7217 7067 7159 7660 7590 7098 7918 7882 8445 8333 8522 9140 8932 8959 8929 10275 10870 10849 12327 10454 11244 10157 9743 9422 9684 8331 6969
+7 3 2 6 3 4 5 1 3 3 4 8 18 7 2 4 4 5 2 3 4 9 2 1 8 6 6 3 6 7 12 15 7 6 5 10 7 18 13 5
+4 8 9 6 7 6 6 5 7 8 9 9 10 6 4 7 13 9 3 5 10 7 7 9 9 6 9 9 11 9 11 18 13 14 11 11 9 8 10 10
+8548 9717 9167 7892 7749 8615 7501 6280 6812 6097 7284 7445 7920 7152 6992 7159 7801 7687 6997 7750 7940 8368 8303 8563 9141 8764 8965 8946 10299 10770 10859 12693 10525 11429 10184 9834 9416 9594 8378 7165
+1 4 5 3 4 2 3 2 4 6 7 7 8 3 1 4 14 6 0 2 8 3 3 6 5 2 6 6 8 5 7 17 10 11 8 8 5 4 7 9
+3 6 10 4 4 4 3 3 4 8 5 6 7 5 6 7 11 7 4 5 6 6 7 7 10 9 11 11 9 10 13 12 10 12 14 11 10 8 5 6
+8411 9502 9193 7664 7657 8343 7390 6087 6744 6222 7229 7366 7901 7030 6970 7159 7887 7655 6924 7592 7895 8234 8274 8479 9168 8791 9022 9085 10271 10738 10920 12668 10517 11480 10287 9919 9436 9509 8296 7103
+0 2 7 1 1 1 0 0 1 6 2 3 4 2 3 4 10 4 1 2 3 2 3 3 7 6 9 9 5 6 10 7 6 8 13 8 7 4 2 3
+7 10 7 10 6 4 5 7 5 9 7 7 5 6 8 9 4 3 7 10 10 12 7 8 10 11 10 9 8 10 10 11 11 11 14 11 9 8 10 6
+8379 9546 9142 7818 7619 8088 7333 6151 6703 6401 7227 7354 7831 6976 7000 7282 7792 7380 6930 7750 7953 8477 8246 8461 9194 8939 9052 9092 10219 10708 10903 12583 10535 11467 10388 9999 9430 9429 8344 7045
+3 7 3 8 3 1 2 4 2 7 4 4 2 3 5 7 1 0 4 8 8 11 3 5 7 9 7 6 4 6 6 6 7 7 12 8 5 4 7 3
+11 13 10 13 7 4 4 5 5 4 4 4 6 8 10 9 6 5 5 6 8 10 8 5 5 7 8 8 10 12 11 10 11 10 12 8 10 11 7 6
+8451 9771 9169 8147 7607 7848 7252 6089 6663 6262 7148 7158 7788 7048 7080 7398 7750 7244 6884 7653 7958 8582 8244 8260 9092 8832 9030 9038 10219 10802 10912 12442 10553 11393 10435 9890 9450 9539 8314 6990
+9 11 7 13 4 1 1 2 2 1 1 1 3 5 9 7 3 2 2 3 5 7 5 2 1 3 4 4 6 9 7 5 7 5 9 4 7 8 3 3
+6 8 11 20 15 6 6 6 4 3 2 5 4 7 6 7 8 9 7 10 10 9 13 9 4 7 6 8 11 8 9 11 12 7 11 10 8 8 8 7
+8393 9676 9221 8886 7800 7745 7224 6092 6598 6070 7020 7035 7695 7055 7056 7384 7761 7362 6891 7808 8014 8620 8370 8317 8967 8732 8957 8987 10245 10645 10869 12371 10596 11139 10455 9910 9418 9458 8310 7000
+2 4 8 30 19 3 3 3 1 0 0 2 1 4 3 4 5 7 4 8 8 6 13 6 1 3 2 4 8 4 5 6 9 3 7 6 4 4 5 4
+12 8 8 8 6 6 3 5 5 3 7 5 6 8 7 9 7 9 5 8 7 6 8 7 6 11 9 9 7 9 9 13 10 12 11 10 9 11 9 8
+8490 9586 9195 8844 7758 7648 7120 6033 6561 5890 7023 6920 7656 7123 7058 7493 7746 7473 6846 7831 7992 8471 8365 8248 8896 8883 8963 9000 10168 10559 10827 12427 10587 11207 10475 9929 9412 9566 8332 7071
+11 4 4 4 3 3 0 2 2 0 4 2 3 5 4 7 4 7 2 5 4 2 5 3 2 9 6 6 3 5 5 9 6 8 7 6 5 8 6 5
+14 10 8 7 7 6 3 3 6 6 7 5 7 13 10 13 8 6 6 8 7 8 10 6 8 8 8 9 8 9 9 10 9 13 13 10 8 10 9 5
+8635 9625 9169 8743 7743 7557 7018 5855 6550 5905 7026 6812 7643 7494 7137 7842 7757 7393 6828 7852 7971 8454 8411 8121 8878 8841 8943 9012 10118 10478 10786 12295 10552 11333 10545 9947 9381 9606 8354 6953
+15 7 4 3 4 3 0 0 3 3 4 2 4 14 9 14 5 3 3 5 4 5 7 2 4 4 4 6 4 5 5 5 5 10 10 6 4 7 6 2
+9 8 14 9 5 4 10 7 9 7 6 8 11 12 12 11 7 6 4 10 10 9 9 10 10 6 6 9 10 8 10 11 10 11 12 10 7 11 14 9
+8649 9539 9297 8771 7677 7349 7098 5933 6616 5980 7003 6894 7733 7781 7265 8047 7742 7318 6759 7995 8027 8499 8431 8248 8912 8679 8873 9024 10121 10340 10772 12233 10544 11328 10588 9964 9325 9705 8503 7088
+6 4 14 6 2 1 9 5 7 5 3 5 10 12 12 9 4 3 1 8 8 6 6 8 7 2 2 6 6 4 6 6 6 7 9 6 3 8 15 7
+7 11 5 4 10 12 13 5 4 5 9 8 9 12 8 6 8 10 11 12 8 8 7 8 8 7 9 8 9 10 10 10 8 13 11 8 7 6 8 5
+8612 9642 9192 8490 7740 7645 7253 5884 6553 5928 7058 6971 7770 8051 7288 7932 7753 7493 6871 8252 8031 8480 8399 8244 8894 8588 8881 8974 10098 10334 10758 12113 10485 11447 10604 9857 9271 9491 8495 6969
+3 8 1 1 8 12 15 2 1 2 7 5 6 11 5 2 5 8 11 11 5 5 3 5 4 3 6 4 5 6 6 5 4 10 7 4 3 2 5 2
+39 129 294 436 540 548 441 254 120 109 150 195 149 82 46 98 75 131 208 175 90 42 19 24 19 20 29 23 73 140 65 48 52 36 24 17 14 15 13 10
+9393 16988 16477 34767 21350 40854 18345 21136 9456 12269 10716 18533 11384 12605 8281 13476 9476 15091 12016 18508 10131 10551 8675 9223 9157 9301 9400 9848 11711 18315 12151 14335 11552 12971 10952 10310 9397 9843 8615 7165
+103 342 878 790 1417 892 1266 637 450 347 542 520 520 240 144 287 257 376 729 461 309 107 27 42 26 29 60 36 218 358 182 102 138 67 35 19 14 15 13 9
+66 417 1181 1450 1694 1731 1128 764 424 380 414 338 207 164 72 221 143 291 416 246 172 128 44 146 87 79 184 428 359 579 262 515 428 195 134 100 126 67 60 49
+10845 41588 46254 121765 64119 144751 46721 66806 20058 34879 21031 38186 16391 21924 9914 26245 12894 32063 22350 32511 14274 17781 9583 17639 11152 13596 13868 35552 20595 52788 18545 45115 22204 24173 14103 15835 12383 13368 9933 9745
+201 653 1800 945 1982 990 1701 833 1147 679 1083 556 601 378 238 461 461 537 1043 440 532 329 123 386 280 217 584 762 944 753 721 766 1080 427 404 263 408 177 187 143
+198 195 409 446 1050 1023 408 264 224 263 370 348 209 111 68 192 231 736 702 229 229 239 191 265 161 128 181 629 591 549 354 331 266 216 231 101 199 111 81 88
+15635 51073 55554 141860 89358 198916 55984 79017 25283 48944 29964 57275 21324 27428 11404 36466 18477 75357 39736 44629 19771 31397 14226 32861 14989 20644 18148 72063 35188 83350 27131 62744 28449 35993 19655 21090 17160 19385 11755 14566
+589 219 488 212 906 399 484 207 482 332 736 394 510 191 202 300 630 724 1187 308 596 436 599 475 479 295 485 633 1085 476 757 342 532 349 604 215 559 260 248 237
+202 213 228 370 573 418 413 327 376 521 230 104 82 59 38 73 144 292 258 176 137 149 207 184 76 82 98 259 261 241 221 177 116 92 87 58 123 60 74 90
+20408 61094 59995 156080 101774 212662 65143 94366 34263 78016 35095 60228 22887 29407 12090 38763 21696 88775 45339 52764 22779 38667 19162 42193 16557 24443 20200 83651 40981 93155 32103 69853 30704 39485 21388 23388 19875 21908 13353 19221
+506 202 225 147 405 113 429 223 680 478 384 67 150 65 80 68 326 207 350 184 295 206 544 249 179 141 214 189 390 150 392 137 182 100 174 85 289 97 202 200
+207 104 108 237 322 444 265 193 187 322 331 200 116 86 56 191 172 192 211 185 128 71 116 179 48 52 54 146 134 89 217 91 63 57 65 49 69 35 51 48
+25189 63818 61258 161276 107464 227181 70290 100561 38187 93118 42680 68901 25280 32926 13219 48171 25551 95244 49600 60964 25482 40709 21648 50658 17370 26171 21076 87602 43383 93033 36848 71252 31547 40617 22515 24995 21142 22743 14323 21016
+442 62 70 67 186 112 235 95 279 222 494 165 219 110 138 227 352 103 250 167 246 60 251 197 85 59 85 73 158 24 344 40 67 38 108 56 126 33 112 69
+218 206 104 256 802 726 320 213 223 240 348 190 69 58 64 176 203 157 151 59 58 97 74 91 24 48 50 69 72 81 123 76 75 31 37 28 121 48 28 29
+30132 72645 62387 167327 125282 258154 76715 107613 42934 102276 50510 76440 26412 34513 14525 56093 30102 99175 52221 60931 26328 44225 22999 53209 17549 27549 21827 86585 44140 92427 39072 71646 32676 40084 22898 25215 23707 24327 14681 21536
+405 163 64 73 481 195 273 101 310 131 446 137 99 51 155 172 373 67 151 21 73 94 129 63 21 47 71 16 54 20 157 28 89 11 36 18 245 56 35 24
+2297 1309 1882 2919 2259 2465 1965 1158 515 684 395 314 190 176 93 180 180 91 72 82 69 82 146 198 61 116 81 18 24 65 178 108 112 20 18 24 93 28 14 16
+88096 148708 108938 336623 179900 394108 125030 172301 55026 138162 59346 91145 30609 43255 16539 63786 33951 98815 52757 62313 27434 46609 26156 62181 18670 33022 23352 82495 43651 90875 42646 73982 34723 38907 22786 25176 25492 24587 14672 21226
+2087 715 1381 765 1067 533 1274 540 637 368 441 221 340 229 233 154 294 22 40 39 94 64 283 180 118 171 145 1 6 13 236 53 155 5 8 13 161 19 8 7
+3493 5719 4555 9538 9098 8519 6470 2313 1555 1787 1013 803 598 479 719 1601 822 414 211 146 144 185 233 281 114 169 105 14 14 88 262 144 110 18 15 22 39 18 23 18
+175186 491146 222655 902417 407976 893848 287298 304068 93402 239661 83759 135010 45131 70088 34505 158320 54115 118321 56833 67544 30430 55178 31459 75714 21118 41423 25453 78405 42919 90829 48278 78390 36668 37678 22600 25017 25852 24217 14893 21058
+1730 1052 1782 990 2063 886 2057 658 1316 612 928 453 899 478 1352 839 1072 231 215 106 244 187 423 235 251 227 191 0 2 25 335 85 143 4 6 11 34 8 23 9
+4073 7828 6422 13612 9976 12341 7205 4078 2368 1915 1763 1585 730 1041 1021 3572 1898 786 487 396 461 508 366 422 261 175 32 19 20 71 198 136 124 30 24 22 25 26 33 28
+274928 942611 381259 1684561 652803 1598419 464303 536366 151602 342934 126734 224288 62665 129839 59742 368275 101281 159511 67863 87820 41454 83077 40029 97097 27262 49689 25635 74868 42358 89741 52134 82042 38922 37260 22649 24867 25845 24361 15364 21514
+1321 765 1546 776 1470 743 1417 681 1322 470 1124 574 832 626 1236 863 1497 376 501 307 731 437 584 297 539 196 23 1 4 16 218 71 160 12 15 11 14 16 47 22
+4053 8412 7125 15643 12496 19178 11695 4445 2446 2458 2390 2192 997 1502 2138 4071 3084 1126 878 815 1078 1007 501 380 415 340 60 19 21 39 111 123 105 28 24 28 29 29 35 34
+371669 1402868 553874 2544558 955935 2680766 751665 777275 210343 473372 184664 345502 86586 214328 112903 596291 177587 219119 88612 132622 67975 139960 51836 114617 37189 67596 26528 71543 41837 86752 53669 84676 40634 36744 22697 25095 25941 24681 15875 22312
+979 564 1205 604 1244 713 1497 508 984 432 1102 542 884 567 1524 616 1499 399 753 467 1185 566 654 214 712 331 77 1 5 5 90 55 119 10 15 18 18 20 51 32
+4635 9178 7798 15895 13673 23909 16333 6078 3817 3085 2435 3130 1534 2443 3441 4314 3382 1511 1165 1009 1453 1598 992 714 575 440 132 32 17 74 128 122 88 56 49 52 55 59 49 50
+480873 1882572 739383 3368439 1281587 3988835 1150413 1104057 302664 634505 242298 517072 123637 351560 198045 825556 259606 298804 116180 186655 103420 229739 75899 151606 50959 90572 29240 69216 41227 86093 55601 87091 41869 37979 23382 26784 26699 26825 16731 24045
+858 442 996 444 1054 583 1407 502 1132 423 849 523 990 601 1502 458 1138 414 779 423 1108 564 992 354 772 337 226 5 3 19 108 52 86 41 62 57 65 74 91 62
+5070 9775 7647 14141 12876 25889 17379 7771 4867 3346 2665 3811 1843 3017 4752 4927 2911 1566 1212 999 1443 1524 939 723 600 416 142 24 17 62 59 53 45 45 51 53 58 53 42 55
+598470 2370173 916401 4035128 1578737 5340070 1565945 1515247 419521 802006 304373 720187 167662 515824 314574 1078727 327537 377087 144261 236832 137724 309585 98007 186929 65024 110695 32139 66537 40632 84736 55721 85121 41974 38465 24101 28433 27515 28471 17386 25981
+781 382 768 315 787 479 1094 472 1042 354 767 468 922 503 1374 403 784 333 676 318 851 404 732 284 653 250 229 3 3 14 24 10 23 26 64 55 69 55 66 67
+5083 10030 7572 13293 12502 24381 14679 9288 4585 3297 2842 4191 1673 3172 4413 4907 3248 1533 1398 1263 1332 1978 1287 895 721 449 157 25 22 46 76 86 71 39 49 37 43 55 60 66
+713464 2844186 1087083 4609718 1858908 6517586 1902085 1994967 526252 956447 369423 934462 206242 679755 419528 1315479 402387 448646 176396 300218 168335 412534 128459 230700 81831 131639 35350 64080 40180 82478 56272 85297 42741 38553 24751 29000 27927 30141 18485 28477
+652 320 651 257 634 352 738 423 786 280 672 384 667 405 939 326 706 261 648 334 646 393 795 286 656 230 240 3 5 8 40 26 55 19 57 26 37 54 116 84
+5107 10504 7656 13916 12099 20628 11630 8685 4458 3121 3081 3981 1504 3269 3989 4953 3352 1506 1541 1548 1646 2706 1551 1301 985 541 192 18 20 106 124 137 64 25 23 31 29 24 27 26
+826201 3318881 1255652 5188110 2121784 7393879 2151894 2408858 627072 1090808 438959 1122979 239539 839810 511023 1540853 478027 514253 211384 377310 206208 554032 164899 296788 104967 156978 39375 61341 39688 84041 58037 88596 43310 37775 24721 29164 27971 29807 18713 28365
+554 277 573 238 541 248 512 326 647 230 604 302 505 324 691 272 606 219 593 328 655 417 778 350 719 243 280 1 4 41 97 63 44 8 12 18 16 10 25 13
+4788 10974 8349 14952 11467 16599 9605 7908 4005 3071 3284 3935 1815 3182 5020 5409 3245 1642 1660 1815 2317 2570 2004 1871 1349 618 205 23 28 72 75 117 98 37 45 54 58 61 64 58
+927969 3793971 1437727 5795449 2361942 7970065 2343703 2750180 713795 1214036 511948 1297359 279955 984917 626589 1780721 549044 584279 248540 466181 260288 678685 212009 393931 136830 185528 43632 59073 39413 83422 58505 90468 44733 37781 25254 30731 28755 31766 19881 30226
+451 246 545 228 459 171 379 248 500 199 557 253 544 260 738 251 518 215 545 307 757 316 788 390 795 236 275 3 9 19 36 44 95 18 47 51 65 62 119 60
+4768 11315 8993 14915 11848 13912 9248 7755 4364 3331 3556 4411 2640 3211 6849 9164 2971 2236 1899 1767 2290 2596 1906 2579 1317 504 230 29 32 90 103 176 87 38 50 58 53 54 57 40
+1026685 4261507 1631720 6364076 2605844 8346599 2521598 3061623 807531 1345845 590068 1490521 340451 1123100 786024 2236895 611284 686597 290878 546771 312328 797457 255438 528743 167080 205361 48422 57310 39247 83946 59677 95853 45840 37848 25901 32450 29391 33177 20841 30870
+406 228 511 198 430 126 333 210 477 195 535 246 677 231 808 374 421 263 552 253 633 262 621 412 643 157 285 5 12 30 67 88 74 19 56 55 52 46 94 28
+4784 11258 9458 14055 11505 12083 8842 7446 4615 3621 3325 4073 2794 2808 7498 11179 3019 2571 1902 2113 3536 3671 2787 3408 1110 402 209 33 23 130 128 135 88 52 45 51 51 49 55 60
+1123346 4697490 1832756 6845751 2834889 8588172 2684674 3335396 905343 1487562 660333 1651328 403373 1228233 958069 2789496 673197 803358 332236 643783 394920 975148 320304 706398 191283 217737 52555 55898 38855 86896 61459 98396 46945 38771 26404 33636 29960 34196 21726 32704
+375 202 471 165 376 98 294 177 444 190 441 192 597 174 719 372 387 256 483 265 791 313 765 421 459 102 232 7 6 59 95 50 73 34 44 40 47 36 85 58
+4806 10471 8935 11923 10391 12478 9741 7219 4156 3051 3018 2913 2397 2485 4389 10354 4171 2992 2293 3059 3839 4111 3427 3731 908 393 213 47 29 127 176 186 107 45 44 48 51 55 60 46
+1218156 5058964 2015405 7167541 3029740 8839519 2866659 3578797 988980 1585757 720996 1731219 454576 1307214 1046346 3258256 763012 938979 382557 793094 483195 1169211 399910 893239 209718 228818 56687 55431 38626 89485 64423 103919 48508 39209 26869 34566 30515 35523 22717 33567
+347 168 400 122 308 100 306 152 358 137 356 110 438 134 363 279 484 254 509 322 697 301 754 353 326 91 219 15 10 53 147 85 98 25 41 34 46 44 93 33
+4789 9415 7469 9703 8275 12865 8568 5913 2944 1953 2347 2110 1701 1500 3367 6624 4198 3292 2421 4031 4146 2726 2216 2121 584 278 196 43 19 80 113 145 69 23 29 27 22 27 33 30
+1310165 5333872 2156019 7333634 3165637 9099562 3014117 3727357 1039547 1610602 762992 1756983 486710 1320940 1106294 3469730 851276 1084894 434893 993164 577114 1266539 446573 969955 219411 232169 60282 54747 38147 89031 65703 106592 49060 38269 26939 34150 30315 35050 22993 33396
+319 133 306 82 218 99 245 106 223 62 244 60 271 55 249 141 428 247 466 344 648 160 408 155 176 45 183 13 4 21 70 50 41 6 17 11 8 10 28 14
+4848 9077 5713 8528 5346 8823 6040 2698 1492 1134 1249 1390 882 864 2042 3299 2665 1886 2276 1967 1926 1781 1611 997 332 192 242 93 11 57 63 44 24 15 15 17 14 13 16 17
+1401385 5571520 2248236 7417572 3223269 9095673 3093271 3669483 1051735 1583639 775872 1736966 497106 1294768 1130875 3464236 898149 1135674 482216 1054424 611940 1299969 476606 973013 222420 230035 64963 57176 37475 87191 65673 102899 48448 36894 26649 33145 29916 33745 22828 32436
+299 119 209 65 114 49 148 24 80 21 97 27 106 19 122 40 232 107 387 126 250 79 259 41 70 22 225 58 1 11 21 4 5 3 4 4 3 2 6 4
+4532 10204 7294 6298 4803 6332 4239 2393 1337 1120 560 872 547 898 1135 1019 1113 808 822 576 1297 1248 1133 671 243 154 202 76 8 39 57 21 15 16 15 17 17 14 15 14
+1482250 5864150 2378565 7359469 3265582 8938977 3124411 3596342 1059657 1557434 770818 1686325 498679 1272255 1131657 3318995 904179 1117177 491190 1026549 629818 1298648 493670 955859 223079 225694 68504 58414 36743 84355 65490 98015 47622 35663 26367 32200 29603 32580 22641 31350
+256 132 267 35 95 26 82 19 64 22 20 11 43 21 41 4 58 21 96 12 142 39 156 19 37 14 168 37 0 5 17 1 2 3 4 4 5 3 6 3
+4577 10787 7694 6154 4787 5455 3748 2768 1319 948 834 422 320 628 933 1423 983 355 269 381 896 914 953 757 187 135 153 80 10 18 29 15 15 14 15 17 13 16 21 15
+1562247 6175041 2515865 7296005 3306429 8737801 3142223 3550629 1066921 1522234 772894 1611076 494410 1234505 1127256 3207288 906735 1071959 485804 988366 636999 1276886 505707 945017 222290 220446 70704 59824 36081 80399 64596 93055 46816 34383 26092 31312 29196 31608 22612 30390
+242 130 268 34 92 20 65 27 62 16 45 2 15 10 28 8 45 4 10 5 77 21 117 25 22 11 108 40 1 1 4 0 2 2 5 4 3 4 12 4
+4644 10487 7196 5773 4415 3773 2622 1938 917 731 1173 287 167 363 907 1183 1393 441 402 803 884 936 787 518 159 69 99 71 9 12 22 13 15 14 15 16 17 18 16 17
+1641960 6448848 2637008 7212941 3336747 8445357 3130807 3456665 1063727 1475814 783584 1532047 486336 1182739 1122301 3087539 919708 1034738 483952 978400 643694 1257781 513200 920142 220805 211458 71469 60596 35410 76312 63545 88270 46030 33180 25824 30416 28901 30817 22456 29611
+230 117 232 30 78 10 32 14 30 10 86 1 4 3 27 6 87 7 24 26 73 23 84 12 16 3 47 31 1 0 2 0 2 3 5 4 5 5 7 5
+5223 10823 6912 6080 4127 1741 1269 1344 1089 910 932 214 190 170 1568 1885 651 470 372 480 543 750 808 566 127 66 66 34 9 12 15 16 14 14 18 18 12 14 13 17
+1734484 6726870 2747867 7153722 3358946 8045618 3085090 3331845 1065010 1443176 787847 1453275 479052 1122221 1134366 3018103 913390 1001531 481380 949188 641505 1228395 521043 899709 218539 202825 71372 59049 34756 72470 62342 83957 45238 32049 25639 29696 28486 29828 22227 28878
+249 115 209 35 68 2 7 7 42 17 54 0 5 0 78 17 19 8 21 10 28 15 86 15 10 3 21 7 1 0 1 0 1 3 7 5 2 3 4 5
+5769 11453 6707 6447 3888 697 1030 746 1178 768 499 314 371 434 1340 1981 607 507 618 594 449 473 481 254 80 59 42 22 15 10 11 16 16 15 16 18 16 15 17 16
+1838655 7026917 2850718 7120603 3374482 7605722 3034405 3177774 1068536 1403772 780935 1385373 476576 1081554 1140302 2958731 906105 972590 485160 928732 636968 1183754 520331 861333 215128 194280 70663 56857 34271 68736 61066 79902 44517 31047 25407 29020 28183 28959 22106 28128
+261 117 191 39 60 0 5 2 49 12 16 2 21 6 57 20 17 10 58 15 19 6 31 3 4 2 8 3 3 0 0 1 2 3 5 6 5 4 8 5
+6518 12603 6832 6314 3537 2848 1978 1107 650 433 390 339 317 247 843 901 1116 590 312 277 252 454 484 286 52 24 12 7 13 14 15 17 15 10 16 15 20 18 16 15
+1959372 7379615 2954197 7081300 3380658 7324371 3009218 3055126 1058477 1346151 771409 1323081 472782 1031838 1133385 2836569 912013 950485 481024 890028 627508 1140624 519714 827226 211086 184098 69205 53875 33747 65472 59925 76152 43789 29798 25181 28200 27990 28327 21963 27361
+282 126 187 39 49 7 19 6 15 4 10 2 16 2 23 4 58 15 15 3 6 6 31 4 1 0 0 0 2 1 1 1 2 1 5 4 8 6 7 4
+7384 13866 7060 4989 3267 4234 2206 1335 729 584 216 204 299 261 956 1044 710 441 154 157 392 462 355 295 69 28 10 10 8 9 8 14 13 9 16 14 15 19 17 14
+2099212 7788748 3060921 6962950 3379778 7145052 2990488 2953844 1050688 1301264 757673 1256232 468622 985965 1129529 2730522 907395 920552 472952 846274 621863 1100573 515814 795718 207580 174772 67733 51257 33109 62096 58633 72443 43028 28563 24961 27368 27674 27794 21849 26579
+309 135 185 25 42 17 25 9 19 8 3 1 14 2 29 6 23 8 3 1 15 7 17 5 3 0 0 0 1 0 0 0 1 1 6 4 4 7 8 4
+8092 13535 5835 2137 2090 2148 1109 927 793 427 191 139 199 121 396 491 275 186 176 157 350 296 287 232 65 26 8 8 6 15 12 11 7 8 12 14 15 15 11 10
+2253660 8152998 3133667 6676481 3348832 6848334 2944184 2833573 1044730 1249425 743641 1189400 462010 934243 1111454 2596863 891773 876748 465644 805145 615286 1052726 510273 762230 204059 165883 66246 48673 32435 59291 57476 68772 42132 27340 24644 26586 27366 27047 21584 25598
+321 124 138 5 17 4 6 4 23 4 2 0 6 0 5 1 3 1 5 1 12 3 11 3 3 0 0 0 0 1 0 0 0 1 3 4 4 4 3 2
+7706 10212 3409 2627 2321 1347 1390 639 424 510 176 313 269 213 385 405 373 159 206 244 286 167 300 242 76 17 8 8 12 7 7 10 11 11 10 12 11 12 11 13
+2394385 8291237 3142582 6437304 3324564 6520206 2906219 2702824 1029488 1205795 729576 1137268 457352 891276 1093549 2465939 879046 833913 459285 771829 607237 999825 505203 731365 200907 156974 64796 46244 31932 56163 56220 65260 41361 26375 24284 25728 26964 26161 21326 24860
+284 75 53 8 22 2 10 2 6 7 2 3 12 2 5 1 7 1 7 3 8 1 12 4 4 0 0 0 2 0 0 0 1 2 2 3 2 3 3 4
+7509 7978 2327 1350 1334 1363 1129 620 417 277 122 302 224 225 370 588 553 226 256 262 223 294 209 125 33 7 7 7 8 12 5 11 13 13 11 12 12 12 12 11
+2526561 8283931 3123616 6134022 3275672 6212748 2862530 2578752 1014448 1150468 714481 1087588 451660 851625 1075708 2354113 871238 797765 454363 741617 597778 957900 497933 695164 196735 147986 63357 43899 31339 53530 54944 62020 40660 25591 23958 24921 26597 25328 21100 24044
+258 46 25 2 7 2 7 2 6 2 1 3 8 2 4 2 15 3 11 4 5 3 6 1 0 0 0 0 1 1 0 0 2 3 3 3 3 3 4 3
+8853 9177 2140 1277 1279 727 579 716 388 229 43 212 315 229 159 394 433 254 216 245 126 106 123 106 21 11 10 10 11 14 13 13 10 13 17 11 12 13 11 9
+2689794 8350727 3100343 5844451 3226594 5884663 2805872 2468022 999042 1095511 697744 1035360 448437 814598 1052919 2237078 860557 765506 448542 712173 586076 906940 488647 659968 192360 139783 62031 41879 30837 51178 53904 59097 39900 24854 23794 24101 26239 24607 20854 23154
+294 61 21 2 7 0 1 3 5 1 0 1 17 3 0 1 9 4 8 4 1 0 2 1 0 0 0 1 2 1 1 1 1 4 7 3 3 4 3 2
+6736 5718 2059 1642 1338 362 627 471 371 502 285 233 221 245 199 309 261 300 217 136 170 97 103 72 41 53 37 16 15 11 12 19 15 11 13 10 13 18 16 11
+2794836 8201003 3075581 5594678 3180249 5553837 2751855 2348883 983586 1060624 687611 987555 442891 780776 1031721 2121842 845746 738008 442891 677799 575791 858485 479081 624795 188606 134652 61428 40349 30450 48783 52865 56718 39287 24038 23532 23269 25916 24236 20742 22440
+199 24 20 4 8 0 2 1 5 9 6 2 8 3 1 0 3 6 8 1 3 0 1 0 1 4 8 3 4 1 1 2 2 3 4 2 3 8 7 3
+4373 5025 4645 5220 5388 3093 1361 1711 1260 830 1434 862 342 402 315 478 809 814 581 324 274 352 491 366 197 303 297 206 59 26 53 130 52 29 27 27 47 20 24 19
+2836852 8017686 3117541 5579714 3238589 5410646 2717949 2313075 991241 1047982 707102 981263 440577 758629 1014017 2023903 845313 743739 446686 657038 568421 828603 479672 609794 188933 145188 67486 50584 31198 47453 52900 61301 39635 24377 23634 23531 26470 24010 20837 22260
+103 19 97 43 116 16 11 24 63 25 139 30 21 11 3 2 35 47 60 9 8 7 37 14 34 120 282 237 60 6 21 98 33 20 18 18 48 10 17 10
+4475 7281 6927 12201 11217 12207 5614 2692 2121 1179 1528 1176 773 695 601 1203 1797 1348 782 658 609 744 1078 1031 425 445 374 181 78 27 86 161 59 37 29 31 46 32 35 28
+2880426 7983970 3216788 5994543 3444477 5835988 2793607 2339685 1020714 1057540 728509 994640 449338 755812 1004066 1976383 870146 781934 455525 658043 569798 824598 495254 636549 195080 163816 75361 58669 32413 46264 53777 67514 40153 25187 23785 24023 26985 24535 21211 22644
+105 41 169 165 286 171 155 60 146 50 146 55 99 33 14 15 142 108 99 39 43 31 145 98 132 179 336 169 96 7 55 123 42 32 21 24 44 24 36 21
+6940 9173 7183 8574 17835 18785 9979 7494 4203 2016 3027 2377 958 956 870 2519 2921 2815 1392 880 649 1576 2171 1971 834 927 686 161 76 47 141 188 91 40 25 35 52 32 31 26
+2985924 8068517 3320101 6161650 3814398 6639946 2978957 2659722 1102672 1117948 787700 1081000 462609 769199 1001240 2012565 923092 907966 479736 672627 572163 871949 538386 719450 211529 210940 91015 65040 33546 46375 56038 75013 41476 26133 23830 24731 27640 25028 21473 22882
+188 65 169 92 441 252 299 243 327 122 321 161 133 60 29 66 251 245 213 67 49 116 331 211 291 332 559 128 88 21 125 137 92 35 15 29 55 24 28 18
+7480 8348 5852 6128 12824 13040 20631 10852 4687 3206 3696 3007 1390 1484 1299 3335 4414 3105 1582 1140 1068 1732 1838 1739 778 1015 813 223 54 45 143 264 170 54 32 23 37 35 29 31
+3102592 8097305 3386811 6168455 4046991 7042710 3431972 3166864 1194957 1247842 862515 1200885 486592 814222 1009451 2096710 1012881 1044253 508200 702309 585180 926043 571929 783124 226136 260643 109525 74838 34088 46357 58294 86731 44786 27882 24053 24660 27895 25676 21678 23413
+197 52 121 49 280 142 585 306 344 204 363 196 209 118 65 104 375 238 236 99 117 122 254 158 246 295 557 176 44 19 121 186 211 59 25 12 27 27 24 24
+7618 8679 6371 7082 10719 15442 26813 15281 7507 4663 4427 3752 1712 1794 2005 4453 5092 2878 1421 1209 1385 2307 2104 1278 590 959 696 186 30 97 237 98 143 69 41 63 56 52 48 54
+3219875 8144702 3465123 6233463 4219968 7568882 4031705 3915686 1357024 1459457 954148 1359348 518208 875589 1035504 2244493 1117760 1158417 531837 734450 605975 1012218 611435 814655 235572 303923 124582 81775 34003 49535 62897 87548 47323 30448 24500 27051 28630 27330 22363 25325
+192 56 133 63 216 165 656 357 516 270 399 225 257 140 132 149 405 193 197 101 164 165 279 92 162 235 411 122 13 80 226 33 157 83 40 82 61 56 63 67
+7958 10254 6762 6251 9376 14376 28341 19576 12879 5848 5182 6090 2672 3149 3733 6995 4569 2755 1611 1361 1424 2992 2174 1130 711 700 526 357 51 149 315 166 130 71 62 62 59 97 72 44
+3342922 8286019 3551475 6243516 4354297 7997993 4655526 4883454 1652367 1731179 1062794 1651944 573574 1016522 1105079 2539584 1206651 1258175 559741 774001 627248 1135308 651744 835202 247866 328694 134917 98801 34457 55717 69379 92493 49464 32983 25473 29237 29423 31649 23645 26508
+192 76 140 49 174 139 614 385 747 287 434 319 397 248 284 234 330 164 220 112 163 208 270 71 197 139 274 236 39 136 295 84 130 80 86 72 65 138 120 42
+7925 10381 7201 7098 9624 15350 26329 24353 19915 7339 5216 7353 3874 4862 4999 8460 3333 2670 1821 1364 1580 3283 2311 1408 746 534 435 409 87 247 395 244 161 68 56 53 60 84 66 56
+3462054 8426660 3646893 6305003 4491612 8461198 5212343 6086646 2120195 2078202 1169597 2004581 658284 1254242 1205279 2906976 1261728 1346725 592317 811363 651978 1268891 694549 871595 260748 341780 142668 118001 35820 67549 77744 101934 52344 35181 26268 30739 30222 34910 24741 28358
+181 77 148 62 173 145 501 387 932 308 399 320 527 341 368 252 211 143 242 104 179 206 270 97 200 85 204 228 103 223 348 135 165 68 69 49 65 100 99 61
+7964 9528 6764 6175 7069 10910 17113 28160 20848 8987 7327 7279 4786 7337 6317 7430 2883 2244 1722 1442 1530 3424 2919 1924 1448 805 423 402 93 349 666 397 306 103 50 51 55 72 61 57
+3579209 8506457 3728758 6306094 4560187 8623829 5519676 7451542 2600194 2505653 1327696 2331514 764193 1629757 1336669 3189045 1303927 1403790 621549 851276 674813 1403122 751827 937507 291253 370731 149919 135619 37303 84937 92827 120208 58859 39398 26890 32028 30873 37238 25682 30158
+175 63 130 47 108 82 286 355 806 326 513 273 564 405 431 188 167 103 213 105 163 191 325 140 405 143 186 194 109 273 530 215 330 120 53 43 53 70 83 58
+8180 9594 6741 5869 6841 8680 12217 28934 20810 11343 8031 6960 6323 10588 8656 5047 2775 1775 954 1206 1335 3148 3063 1759 1346 914 465 315 144 482 680 483 362 116 52 47 47 78 65 54
+3698961 8585521 3807992 6288320 4621222 8639697 5694183 8782100 3067239 3052205 1499844 2619233 906747 2182475 1524570 3307785 1342312 1428617 630419 874295 692093 1512343 811356 989327 318389 404641 158063 146835 40052 109453 107892 142670 66643 44160 27547 32993 31303 39795 26702 31666
+173 64 126 42 101 53 177 317 672 340 496 224 632 455 529 101 152 66 87 73 129 154 313 114 338 151 198 125 191 304 463 233 361 126 55 35 38 74 88 49
+7598 8708 6389 5283 5457 6939 11145 25232 18052 12675 8454 7268 8487 13788 10701 3403 2263 1456 1009 722 936 2720 3133 1486 1336 944 524 318 262 543 590 510 448 99 47 51 53 108 102 49
+3800846 8605408 3876250 6235610 4645355 8547651 5836931 9805386 3452124 3647800 1678508 2908613 1101060 2898632 1760055 3318397 1366651 1432356 640474 866197 698742 1588716 871189 1021265 344592 438360 167511 157562 45749 136246 120280 165443 76431 47592 28060 34146 31876 44042 28642 32776
+151 53 113 35 66 33 150 237 500 310 460 207 729 454 568 47 109 44 94 27 70 114 294 83 306 141 216 116 353 283 350 212 410 88 44 39 47 113 163 38
+6523 6932 5937 5248 3421 4571 8225 17252 14166 11454 7653 7462 10134 14811 11120 2116 1142 1185 659 736 747 1413 2192 1279 1083 843 533 248 282 464 531 443 401 86 48 48 59 134 104 56
+3872708 8514989 3931250 6183912 4616840 8315643 5901473 10277006 3728065 4132646 1832236 3192549 1332624 3634671 2000373 3249303 1361727 1419221 641331 859445 700394 1580208 905474 1038570 363674 463851 176954 163345 51815 156578 130851 182733 84773 50020 28586 35046 32588 49632 30585 34250
+117 33 99 35 26 15 92 133 344 238 369 189 732 373 516 19 30 30 41 28 44 34 177 61 221 109 207 71 340 200 286 154 324 63 45 34 56 135 157 48
+6470 7329 6013 4649 3915 5676 9573 13373 13393 10106 8268 8022 12244 14301 9668 2280 1348 1027 732 666 389 812 1348 884 739 693 421 185 255 396 559 378 318 57 48 52 60 96 124 53
+3941421 8454385 3986820 6098515 4601665 8165443 5998862 10482014 3977358 4505584 1997847 3493855 1612344 4295217 2197575 3194430 1362191 1397167 644032 848797 692853 1535286 917328 1030569 373486 478597 183298 164910 57039 171512 141873 194992 90785 50520 29099 36138 33308 52552 32991 35451
+112 39 98 28 34 24 115 86 298 185 369 182 728 302 405 23 41 23 50 23 12 11 82 29 125 74 142 39 273 143 282 110 225 27 43 38 57 72 187 41
+6476 7597 5695 4594 3779 5763 9820 15422 12855 9201 7821 9988 14367 16092 6348 2467 1348 750 726 748 504 933 1193 695 504 537 353 220 330 321 375 221 155 66 47 49 51 97 89 45
+4008572 8413883 4032873 6014862 4583392 8029600 6100133 10800607 4206675 4800546 2147898 3897869 1939350 5026166 2304986 3154338 1362644 1359418 646513 843826 688440 1500493 924924 1011436 377046 482874 187745 168532 64050 180943 147917 196870 92480 51543 29574 36980 33780 55358 34442 36088
+111 42 88 29 32 26 115 105 271 149 323 211 707 293 232 28 41 13 49 30 21 16 64 18 63 44 105 53 334 95 160 40 74 35 41 33 40 67 111 28
+6227 6888 5205 4346 4172 5169 8022 14094 11044 8051 7666 9041 12907 14460 4772 1898 892 529 617 535 471 706 782 673 342 407 311 158 246 237 283 223 150 65 49 49 44 80 82 58
+4067682 8332252 4065251 5920992 4575621 7865414 6152915 11018497 4383974 5007158 2290241 4219462 2220872 5612994 2369430 3081693 1351429 1310356 646145 826067 683294 1453841 921824 992099 376376 478907 191008 168127 68739 184647 151458 198758 94005 52443 30088 37771 34061 56951 35678 37486
+101 34 75 26 40 21 82 87 213 116 295 172 559 225 153 17 18 7 35 16 18 10 28 18 29 25 82 27 218 52 101 40 68 33 43 32 29 43 94 45
+6448 7234 5528 4783 5105 4981 7767 12406 9402 7467 7613 9176 10630 10922 3664 1185 807 615 397 641 468 772 898 608 422 468 293 136 229 290 380 312 110 47 54 59 55 100 105 46
+4130965 8276776 4105078 5859602 4591894 7699528 6197861 11119607 4514873 5165494 2427676 4530055 2437160 5947248 2403942 2969602 1338321 1269521 640163 815886 678199 1414043 921767 969929 377767 478926 193729 166395 72876 191385 157390 206001 94470 52184 30717 39129 34616 59677 37471 38063
+106 38 83 33 59 20 76 66 167 98 276 160 408 141 100 7 15 10 15 24 18 12 37 15 44 34 72 21 187 72 151 72 36 17 51 44 45 63 131 27
+6467 7514 5923 5039 5456 5995 8708 13465 8366 6654 8107 9428 8821 8432 3085 1378 414 438 211 510 528 530 526 534 372 362 259 120 206 298 422 253 95 43 45 47 46 110 97 45
+4193154 8241831 4154008 5817623 4616733 7605892 6265739 11279713 4616022 5264382 2574308 4837495 2601807 6108468 2422791 2876094 1315494 1220262 629576 798267 674765 1361765 912202 944543 377845 472431 195513 163784 76322 198210 164248 209185 94540 51694 31100 39668 34927 62854 39015 38544
+104 42 92 36 67 30 91 77 138 78 278 154 304 90 73 10 4 5 4 15 24 6 12 12 34 20 55 16 154 71 166 46 27 14 35 27 31 70 111 26
+6032 6828 5728 5219 5742 6459 9509 15725 8611 5999 8202 10164 7464 6555 2791 1848 1001 397 334 703 696 430 746 866 432 324 234 143 160 230 441 286 123 54 44 40 40 125 82 61
+4242671 8166836 4196731 5789222 4648263 7546381 6352398 11569062 4720909 5317095 2719708 5171707 2727655 6144697 2433654 2817071 1308242 1171439 622397 793563 675712 1306480 908499 941077 379455 463992 196613 162742 78506 200448 171420 214205 95324 51910 31448 39745 35077 66762 40137 39979
+92 35 85 40 73 36 103 97 139 63 264 156 234 56 60 19 25 4 11 30 41 4 26 33 46 17 44 24 101 41 167 56 44 23 33 19 23 82 80 45
+6146 7168 6245 5576 5628 6395 8970 14539 7512 4826 7020 8283 5843 5433 2659 1953 1240 559 485 715 674 445 553 736 416 402 292 239 111 270 489 337 166 36 23 27 31 122 59 34
+4293866 8117229 4251604 5784458 4676092 7486509 6423116 11768186 4795084 5294579 2831263 5370304 2808925 6109820 2440872 2768041 1307281 1135499 619257 789878 676073 1255434 899955 929832 380616 460851 199169 167661 79383 205009 179640 222057 97187 51007 31250 39019 34993 70251 40643 39669
+93 38 97 46 70 35 91 81 112 41 205 109 162 38 54 23 38 10 23 32 39 5 14 24 42 27 68 63 50 55 181 72 78 10 9 9 14 74 41 14
+6228 7446 6410 5463 5532 6405 7304 9854 4618 3998 4924 5911 4941 4289 2110 1191 800 548 469 589 612 507 498 581 246 341 343 278 81 158 421 322 165 39 26 28 32 83 40 29
+4345879 8087678 4309325 5773037 4700772 7430843 6449481 11667528 4793430 5222544 2886454 5411255 2865109 6006751 2433876 2675137 1295096 1101039 615787 778673 674840 1211259 890219 909739 377402 454151 202965 174681 79471 202416 185917 228516 98978 50342 31134 38398 34937 71135 40650 39070
+94 41 100 44 67 36 62 37 45 28 121 58 124 25 34 9 16 10 22 22 32 7 12 15 15 20 88 79 26 19 140 63 74 12 11 10 15 33 19 10
+5946 6787 5727 4898 4894 4744 4565 5484 2401 2283 3478 4486 3900 2986 1011 792 721 596 337 467 498 379 278 570 246 244 238 147 37 149 350 240 85 41 28 18 33 84 56 28
+4389385 8019413 4348146 5727589 4708527 7276470 6405172 11304428 4735145 5049465 2903304 5362201 2893279 5829812 2398961 2563293 1281196 1071596 609029 760645 670723 1161871 875102 890176 374269 441893 203982 173231 78432 199425 190222 229550 98679 49840 31072 37200 34908 72027 41066 38446
+85 36 81 35 52 20 25 12 12 10 65 34 82 12 8 4 13 12 11 14 21 4 3 15 15 10 43 22 5 17 101 34 20 14 13 4 16 33 36 10
+5648 5810 4052 2733 2290 1380 1977 3460 1216 747 1092 1597 2103 1414 494 731 460 245 306 352 390 810 615 505 218 129 128 42 40 69 235 172 57 18 17 18 23 40 30 17
+4424188 7895219 4343180 5551856 4649524 6924683 6295814 10838763 4648023 4792403 2858741 5138597 2874810 5566910 2351702 2454412 1260971 1022355 601647 736634 663948 1141925 868977 867793 370498 423305 202161 165417 77496 191699 191480 226344 97672 47955 30730 36073 34624 70163 40807 37183
+76 26 41 11 11 1 4 5 3 1 6 4 24 3 2 4 5 2 10 9 13 21 19 13 12 3 12 2 6 4 47 18 9 2 5 4 7 8 10 3
+6008 6798 3870 2424 1966 1147 1516 3231 1495 615 399 1133 1732 1071 303 402 361 248 298 346 315 417 318 470 160 73 82 39 21 30 131 108 27 11 14 14 18 22 16 12
+4467324 7839177 4333686 5367682 4583712 6579687 6177401 10386968 4570208 4542654 2797576 4899902 2847319 5298708 2300741 2331851 1238720 976252 594245 713695 655425 1099031 855413 844603 365339 402392 199210 157888 76098 182040 190048 219399 95923 45753 30320 34768 34219 67305 40197 35689
+85 37 37 10 8 1 2 5 5 0 0 2 17 2 0 1 3 2 9 9 9 5 5 12 6 1 5 1 1 0 15 7 2 1 3 2 4 2 3 2
+5602 6699 3745 3053 2351 1584 1371 2803 1646 887 552 783 1371 922 338 250 369 262 166 182 290 307 272 451 138 66 83 31 15 26 122 88 31 9 14 11 13 14 10 10
+4499005 7780415 4321234 5233202 4529384 6282238 6058238 9935984 4498196 4324600 2741849 4654025 2811286 5037443 2251946 2207305 1217229 933775 583654 682056 646476 1051953 841012 821637 359746 382304 196358 150319 74581 172715 188422 211642 94320 43560 29920 33357 33697 64126 39449 34162
+74 36 36 16 12 3 2 4 6 1 1 1 11 1 1 0 3 3 3 2 7 3 4 11 5 1 5 1 1 0 13 5 2 0 3 1 2 1 1 1
+5338 6532 3931 3600 2910 2144 1374 1542 1862 1263 549 546 920 1041 439 323 222 126 110 152 305 308 300 330 118 33 28 15 14 10 31 31 20 15 16 15 18 15 13 14
+4523147 7714918 4313847 5140397 4490702 6037041 5942126 9434585 4433503 4142730 2687436 4408339 2764624 4799165 2206951 2094716 1192517 885491 571895 650472 638134 1007761 827686 792615 353781 361394 192171 142221 73077 162966 184510 200848 92476 41868 29582 32277 33315 61200 38796 32972
+67 35 39 24 20 6 2 1 8 4 1 0 5 2 1 1 1 0 1 2 8 3 5 6 3 0 0 0 0 0 0 0 1 2 4 3 5 1 2 3
+5525 6500 3972 3319 2821 2224 1584 747 1688 1219 495 572 622 728 711 331 246 91 98 205 163 195 203 315 80 20 19 9 9 9 59 53 24 13 13 14 15 9 11 10
+4551466 7651385 4307693 5035896 4450710 5811470 5834281 8914426 4365977 3969068 2633001 4178991 2711509 4555953 2170032 1989373 1169036 837954 560123 624039 626370 959278 812213 764412 346994 340939 187859 134241 71482 153741 181412 192053 90780 40154 29175 31200 32866 58081 38108 31608
+70 35 40 21 19 7 3 0 7 4 1 0 2 1 4 1 1 0 1 4 2 1 2 6 1 0 0 0 0 0 3 2 1 2 3 3 3 0 1 1
+5516 6334 3992 3700 2991 1917 1563 906 1447 1052 649 839 543 712 961 299 162 116 151 157 101 128 149 232 26 34 38 12 8 10 30 36 23 10 13 15 12 13 14 9
+4578848 7581465 4302203 4961073 4416062 5580571 5728591 8435244 4293976 3795565 2583862 3979807 2657701 4326350 2140426 1888385 1143994 794805 550000 596243 613315 909587 795746 732802 338996 322572 184140 126924 69902 145131 177650 182742 89101 38359 28779 30249 32352 55394 37514 30264
+70 34 40 27 22 5 3 0 5 3 2 2 1 1 8 1 0 0 2 2 1 0 1 3 0 0 1 0 0 0 0 1 1 1 3 4 2 1 2 1
+5248 6159 4823 5484 6686 6135 3423 1487 1614 859 685 731 556 1086 1319 869 422 136 84 140 91 168 239 296 63 143 135 21 9 11 34 43 33 22 15 16 15 16 20 20
+4598696 7504989 4318093 5000343 4476733 5622669 5673086 8020507 4228041 3620615 2536870 3785939 2605568 4133500 2120710 1828475 1126223 755474 538417 569071 600330 865335 781990 707021 332143 312004 182994 120599 68387 137099 174084 174419 87720 37409 28444 29417 31927 53053 37088 29676
+62 32 59 59 102 58 17 1 6 2 3 1 2 3 17 9 5 1 0 2 0 1 3 6 1 7 17 0 0 0 1 1 3 6 4 4 3 1 5 7
+5777 6800 5582 6868 10494 9390 6906 3421 2207 1449 1576 1654 815 907 1390 1710 810 480 296 354 215 438 825 821 214 483 383 23 18 18 97 113 81 28 24 24 26 28 31 28
+4631571 7472482 4352988 5122287 4633231 5862220 5708001 7749473 4178911 3492409 2513827 3660409 2561357 3941223 2103301 1823829 1118814 739637 532542 556677 590839 840326 783557 715041 329321 322958 188216 114776 67139 129979 172217 170896 87600 36884 28347 29126 31794 51590 36954 29615
+75 40 77 86 187 117 72 9 12 7 18 9 4 2 19 36 21 16 11 15 5 10 43 52 15 78 119 1 1 0 10 13 22 10 11 10 11 6 13 14
+6243 7321 5985 7330 11235 8839 8278 4486 2319 1758 1748 1853 1421 1699 1874 1596 760 501 336 310 264 623 1054 798 473 702 409 30 26 12 108 134 117 36 29 29 27 28 26 29
+4675538 7473935 4397314 5265299 4804764 6053547 5777116 7560132 4133870 3390880 2495756 3554637 2533741 3809141 2098699 1812457 1110312 726040 527836 542323 582838 828184 790939 721167 333191 346710 193972 109732 66127 122917 170678 168875 88404 36882 28380 29160 31690 50214 36696 29619
+85 46 86 93 196 100 97 17 14 12 22 12 14 9 35 32 19 18 15 12 7 22 69 48 71 129 126 2 3 0 12 19 46 17 16 15 12 6 9 15
+5738 6301 5376 7114 9043 7249 6554 3347 1616 1359 1542 1437 1380 2155 1555 863 560 275 231 198 202 441 671 699 457 480 186 22 20 17 70 86 85 20 20 27 29 27 27 28
+4705498 7412634 4424966 5386460 4915982 6135709 5800436 7312173 4071983 3270929 2472870 3429653 2505766 3712999 2086058 1756734 1096909 699374 520564 521949 573452 805588 788346 720843 336555 355398 193884 104499 64987 116586 168206 164026 88369 35897 28182 29069 31640 48860 36470 29562
+71 35 69 85 141 68 62 10 7 7 17 7 14 15 24 10 10 6 7 5 4 11 28 37 65 65 29 1 2 0 5 8 24 5 7 13 14 6 10 14
+4594 3928 3433 6376 5939 4783 2581 636 953 882 1136 1392 1207 1401 824 434 117 99 134 79 75 198 308 232 205 164 65 9 7 13 39 41 44 13 17 17 18 22 17 13
+4705466 7209220 4402259 5455011 4945078 6061437 5721614 6912534 3994693 3128869 2440177 3309403 2474067 3576302 2055046 1677998 1072517 663495 510994 495486 561053 769419 776539 691847 333393 344150 190705 98782 63543 110389 165003 156703 87287 34541 27913 28369 31310 47280 35994 28587
+46 14 29 68 70 30 10 0 2 3 9 8 10 6 6 2 0 0 2 0 0 2 6 4 13 8 3 0 0 0 1 2 6 2 5 5 5 4 4 3
+4421 3059 2029 5054 5106 3126 2519 1712 627 526 513 536 697 789 826 198 84 65 55 30 26 91 256 171 71 58 24 17 11 11 20 38 30 13 13 19 14 17 12 14
+4701013 6964621 4344230 5438228 4952154 5889819 5643175 6602979 3910999 2973460 2392375 3143777 2430122 3410206 2024859 1589486 1047890 627680 499643 467601 547711 728846 763697 660843 326885 327065 186557 93899 62237 104441 161395 149635 85874 33267 27548 27834 30886 45487 35402 27731
+42 9 10 42 52 13 9 3 1 1 2 1 3 2 7 0 0 0 0 0 0 0 4 2 1 1 0 0 0 0 0 1 3 2 3 7 3 2 2 3
+5006 3361 1138 1992 1676 1575 1138 854 312 213 442 457 375 428 442 198 62 36 32 21 37 59 127 84 45 42 19 14 11 11 11 21 20 9 14 17 14 8 9 11
+4711625 6753252 4264873 5234331 4871374 5633208 5531392 6259284 3821342 2808145 2343951 2983234 2379043 3231897 1985610 1506285 1023315 592232 487988 440836 534984 688741 747878 626354 319875 310022 182385 89125 60964 98850 157647 141947 84241 31823 27218 27208 30472 43249 34748 26743
+54 12 3 7 5 3 2 0 0 0 1 1 1 0 2 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 1 1 4 6 3 0 1 2
+4867 3524 851 752 647 1335 1773 860 326 235 313 294 222 319 230 122 61 42 20 25 25 47 108 82 24 39 17 17 17 8 8 20 29 13 9 12 16 10 12 11
+4718419 6564579 4180161 4966484 4766306 5377248 5438632 5936578 3734281 2654100 2293439 2822309 2325328 3057589 1941921 1423406 999328 559279 476317 415922 522268 650305 731969 593812 312503 293817 178266 84822 59876 93410 153916 134659 82879 30712 26768 26312 30120 41268 34187 25814
+51 14 1 1 0 3 5 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 3 2 1 3 4 1 2 2
+3630 2704 1118 662 1248 1995 1049 670 424 323 327 279 327 349 261 171 73 69 22 23 32 53 91 65 29 43 21 24 13 15 20 15 19 21 17 12 14 12 12 16
+4693423 6336847 4104388 4709178 4679224 5177194 5329680 5621560 3651899 2514704 2244545 2670118 2275638 2895582 1900115 1348510 976247 529962 464988 392380 510048 614544 716022 562178 305443 278830 174352 81207 58713 88727 150584 127501 81295 30159 26534 25470 29725 39529 33640 25248
+28 8 3 0 3 7 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 1 0 0 0 1 7 6 3 3 1 2 5
+2550 1840 955 840 1169 764 794 469 322 337 272 351 348 351 208 173 48 50 29 13 33 48 88 50 46 56 36 29 22 25 29 31 17 17 28 15 18 16 17 15
+4641443 6069696 4026340 4478246 4592297 4913513 5216929 5313094 3568966 2384532 2195466 2531482 2227726 2743418 1857998 1278231 953103 501237 454121 369636 498159 580621 700397 531520 298993 265541 170919 78116 57809 84939 147566 121755 79700 29393 26587 24863 29443 38140 33234 24654
+14 4 2 1 3 1 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 1 1 3 3 2 1 1 1 5 16 5 6 3 4 5
+2078 1656 969 399 469 616 733 464 255 236 208 270 202 377 382 182 74 92 43 15 21 41 67 54 54 49 44 84 29 17 41 32 22 29 33 16 13 32 23 18
+4578695 5807269 3950599 4234075 4489646 4656559 5105434 5022828 3486390 2255965 2145976 2396187 2177278 2601981 1821380 1212721 931201 476816 443883 348380 486260 548304 684625 502947 292909 252619 167777 78589 57107 80887 144930 116415 78272 29411 26766 24354 29040 37817 32992 24280
+10 3 2 0 0 0 1 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 1 1 2 29 5 1 2 2 2 15 23 6 3 13 8 7
+1756 1424 1025 563 475 284 347 300 153 138 168 354 228 208 195 107 67 119 57 21 54 40 46 46 46 55 41 75 40 44 59 47 32 32 36 26 21 64 31 24
+4509283 5546334 3878180 4014630 4389711 4394625 4986855 4739901 3403268 2129090 2096699 2274170 2128754 2458647 1780896 1146534 909667 455519 434259 328768 475501 517864 668710 475597 286772 240841 164636 78481 56703 78737 142820 112317 77136 29612 27018 24490 28851 39480 32960 24297
+7 3 3 0 0 0 0 0 0 0 0 1 0 0 0 0 0 2 0 0 0 0 0 0 0 1 1 23 11 8 5 4 4 19 27 16 8 51 15 14
+2085 1820 1064 697 413 128 377 373 249 178 319 337 208 131 121 126 85 197 98 38 59 55 64 58 33 111 60 55 100 109 94 74 62 120 97 88 47 79 46 38
+4450013 5325383 3808566 3816583 4290686 4138822 4872003 4478434 3324675 2012285 2052512 2158429 2080930 2319182 1739531 1085485 889131 440292 425923 311377 465139 490172 653652 450625 280456 233210 162059 77151 57843 80709 141657 110124 76795 35207 28823 28427 29332 41964 33313 25173
+10 5 3 1 0 0 0 0 0 0 1 1 0 0 0 0 0 7 1 0 0 0 0 0 0 7 4 13 66 47 13 12 16 169 151 133 41 70 33 33
+2586 2206 1372 1165 1291 1146 798 605 341 352 582 405 226 298 204 108 97 219 184 57 74 82 90 63 33 144 70 49 120 95 82 86 80 125 110 120 62 88 56 37
+4405030 5141404 3748563 3659172 4216577 3960910 4770780 4246908 3250396 1913178 2016151 2053810 2034760 2198345 1701321 1026993 869414 427330 419994 296197 455419 465800 639635 427459 274298 228064 159802 75532 59466 81703 140216 108800 76922 40774 30915 34093 30184 44852 33912 25935
+16 9 6 4 4 3 1 0 0 1 3 1 0 0 0 0 0 9 6 1 0 1 0 0 0 12 5 10 89 35 10 17 27 154 168 174 69 78 48 30
+3289 2963 2049 1929 2381 2511 2112 1460 963 635 1446 1266 598 206 111 76 75 122 84 44 41 58 82 136 59 127 76 44 65 57 45 54 38 62 60 68 35 64 41 36
+4379140 5014971 3707363 3558143 4172181 3877534 4705673 4081802 3193871 1837404 2002784 2008366 1999251 2079105 1661687 970045 849627 409186 411656 281129 445098 441416 625763 410168 268958 222183 157755 73703 59643 80302 137866 105589 75973 42136 31677 36225 30325 46092 34113 26590
+26 17 13 13 15 19 9 5 4 5 22 17 3 0 0 0 0 3 1 0 0 0 0 3 1 10 7 8 26 13 3 7 6 43 61 65 22 40 25 28
+4339 4218 3271 4891 6569 7425 6054 2875 2349 2050 3867 3464 1375 186 95 96 42 99 44 20 22 69 145 221 149 95 139 176 31 32 32 22 28 25 30 32 29 34 42 41
+4380737 4973228 3698429 3645153 4235949 4101064 4742958 4013536 3174187 1853111 2051637 2100688 1984491 1965791 1622634 917742 829491 390718 402504 265490 434549 419171 613848 399136 266052 214689 157370 80093 58946 77450 135242 100605 74792 41143 31653 36017 30309 45415 34335 27513
+46 35 35 80 105 133 79 23 25 51 134 111 20 0 0 0 0 2 0 0 0 0 2 10 10 6 24 115 6 4 1 1 3 7 15 14 15 11 26 34
+5479 5405 4336 5668 7907 13310 12780 5616 4744 4311 8239 6002 2175 612 307 138 65 72 26 31 25 94 374 518 256 156 190 225 39 119 61 37 40 33 35 28 23 30 30 24
+4411435 5006916 3716942 3774680 4332328 4672743 4951246 4117766 3216217 2006785 2211030 2343399 1990549 1885448 1589975 871158 810445 371699 393120 251465 424340 399796 608084 407013 265954 211392 158298 89111 58471 80114 133425 96842 73947 40701 31757 35576 30140 44533 34244 27336
+73 57 61 97 136 254 226 84 95 161 334 212 50 4 1 0 0 1 0 0 0 1 14 57 31 17 44 144 9 57 6 3 7 13 20 11 9 9 13 11
+6287 5583 4526 6549 9558 16637 13127 11729 8389 7648 10271 7241 2111 748 365 253 133 166 64 54 51 219 517 684 358 279 331 289 114 391 174 64 96 48 62 50 34 50 55 53
+4462022 5049518 3739850 3950562 4468504 5414525 5163205 4591310 3350373 2356256 2418387 2647669 1994820 1818280 1559614 834434 793613 359596 384942 239695 415050 389264 606119 424616 268466 215850 162807 101519 59925 99329 134542 94963 74555 41208 32549 36513 30256 44933 34795 28952
+92 60 66 115 174 281 221 219 206 287 397 232 47 7 2 3 1 7 0 1 0 11 28 89 60 53 118 173 80 263 49 12 42 27 62 35 20 25 45 53
+6232 4948 3607 6654 9677 12666 18121 16489 8726 9013 11141 7976 1618 731 427 279 179 205 100 100 109 233 591 646 347 342 331 317 245 439 236 172 231 55 58 51 53 51 68 63
+4509940 5050551 3738694 4122343 4604323 5867834 5497532 5328885 3489794 2768622 2642807 2978840 1986382 1754098 1531595 801511 778377 350615 377888 231457 407475 380224 606095 438828 270634 223911 167203 114903 64691 120340 137216 99832 78598 42114 33219 37455 30855 45370 35664 31085
+89 47 42 111 169 180 307 286 204 290 395 226 28 7 3 4 2 12 2 5 2 13 37 76 55 73 113 167 230 244 86 79 174 34 53 35 49 26 67 68
+6516 5099 3594 5173 7598 10980 19062 16637 10643 9246 12637 7657 1713 844 449 246 190 189 154 165 148 288 531 632 277 315 276 360 349 472 205 244 302 77 67 40 55 42 35 53
+4563921 5060800 3737235 4192828 4683607 6190362 5847567 6031300 3674738 3170562 2899866 3270543 1980583 1700709 1504838 768535 763802 341190 372391 227707 401086 375105 604538 451327 270958 229829 170084 130126 71997 142118 139030 108833 84355 44317 34102 37665 31490 45228 35668 32476
+95 50 41 71 117 134 303 244 247 252 411 188 31 10 3 4 2 10 6 16 4 21 30 69 35 59 81 174 323 227 64 123 229 61 69 21 51 17 17 46
+6639 4653 3272 4752 6862 9542 21250 14812 9538 9640 12594 8156 1508 662 370 288 124 152 109 134 135 242 577 626 299 327 337 570 500 536 304 278 321 84 63 30 28 30 37 32
+4619699 5043033 3727581 4233219 4742098 6405192 6244795 6579448 3826819 3572593 3149409 3575402 1969689 1639342 1476730 740118 747904 330057 365881 222277 394524 367467 604196 462708 271837 236129 174452 157338 82980 166521 143330 119382 90454 46818 34861 37248 31419 44357 35723 32493
+96 41 34 59 98 103 319 189 203 225 369 180 24 6 2 5 1 7 3 11 4 15 35 65 41 61 109 259 429 224 122 132 229 67 59 12 13 9 19 16
+6544 4368 3273 4959 7149 11608 20717 14869 9446 10397 15266 8323 1664 774 325 281 187 195 122 125 140 239 560 590 279 381 496 767 631 588 285 294 306 84 61 23 34 33 36 29
+4671656 5008822 3718194 4283904 4806465 6734062 6618482 7098211 3972752 3997011 3461026 3872230 1963054 1588538 1448173 712976 734013 322234 359865 216620 388254 360103 603428 471194 272182 245369 182776 195020 97038 192655 147037 130281 96018 49169 35550 36426 31503 43723 35751 32325
+92 37 35 63 101 128 284 168 194 217 409 167 30 10 2 6 2 13 4 10 4 15 33 55 35 77 180 290 476 211 107 130 200 62 54 7 19 11 18 13
+6522 4442 3272 5596 6932 12181 21777 15616 9313 10660 17831 9943 2023 876 353 306 204 182 131 140 133 270 552 599 299 401 733 1063 690 613 261 332 358 87 65 46 47 60 57 61
+4721754 4981210 3709015 4370684 4863678 7078404 7009936 7631743 4111642 4412123 3830432 4250778 1965762 1547048 1421045 688999 720904 314082 354230 212224 381961 355085 602474 479724 273030 255283 196950 248627 112253 218757 150038 142861 102772 51564 36324 37066 31918 44785 36315 34133
+90 38 35 76 95 129 281 164 183 204 436 193 45 13 2 7 3 11 4 13 4 20 32 55 40 80 271 326 451 188 90 140 226 61 59 29 37 36 45 57
+6629 4722 3688 5858 6888 12644 21345 15812 9260 9696 18373 10960 2595 1004 445 298 219 198 166 153 155 283 543 565 309 482 789 1159 765 691 292 419 504 101 57 49 59 110 79 67
+4773336 4972457 3710699 4468354 4918338 7430532 7380575 8145306 4245710 4743104 4204471 4669096 1983024 1515912 1396946 665969 708505 307402 349630 208891 376388 351167 601314 485653 274113 269579 212202 304916 129006 248085 153756 160032 113089 54675 36874 37852 32629 48856 37427 36201
+91 43 44 81 92 127 259 156 175 165 415 198 72 18 4 7 3 14 8 17 6 23 31 48 43 102 271 296 445 190 104 170 309 74 44 32 56 101 83 63
+6687 5016 4012 5810 7143 12831 21611 16618 9414 9610 20448 11780 3035 1225 641 376 253 264 196 156 164 343 726 590 332 491 874 1177 790 719 340 389 409 114 53 44 41 121 98 70
+4825113 4982292 3720624 4557215 4978152 7773022 7748762 8677575 4380368 5048943 4622215 5112695 2011103 1500221 1378459 649112 697285 305178 345912 205942 371184 351170 604861 492763 275756 283570 229246 358933 145979 277374 158608 174329 120720 58398 37308 38284 32862 53358 38997 38329
+91 49 52 77 96 120 247 155 172 149 430 194 94 28 9 13 5 26 11 18 6 34 56 51 49 98 280 249 413 178 127 136 222 83 38 25 27 105 113 64
+6721 5473 4370 6060 7636 12718 21883 17412 9894 10119 20158 10689 3618 1051 529 344 300 342 269 170 186 470 717 666 331 448 858 1015 744 710 386 392 358 83 34 43 49 119 133 84
+4876467 5019614 3739452 4656104 5049075 8088021 8114710 9226691 4523934 5367704 5022117 5462651 2053384 1474782 1357570 631301 687547 307879 344153 204030 366672 358976 608089 504115 277333 294080 245455 399756 161353 304352 164515 187953 126857 59993 37245 38628 33294 57467 41423 41190
+90 59 61 80 105 112 240 157 179 148 388 155 121 21 6 11 7 43 21 22 9 61 54 63 48 80 254 178 348 157 146 123 177 43 15 23 38 92 165 81
+6737 5832 4522 5934 7879 13192 21259 17455 9094 9039 18551 12817 4334 962 468 423 313 346 259 211 214 461 848 851 375 475 661 604 625 568 300 260 184 67 52 51 52 150 113 61
+4926948 5076752 3761696 4741319 5124439 8413242 8455572 9745503 4643466 5600988 5370957 5922349 2112912 1445401 1335643 619412 678384 310664 342182 204752 362989 365761 614586 526152 279995 305618 256224 412879 173301 320988 168076 192650 128393 60509 37644 39443 33792 63234 43277 42466
+88 65 65 76 109 116 226 144 153 116 325 180 153 19 5 18 8 43 20 33 12 56 73 93 61 83 173 75 260 104 96 57 59 27 36 32 42 118 124 41
+6885 6293 4646 5304 8038 12929 16018 14636 9190 6255 13176 13455 6766 1136 376 378 234 317 217 206 250 437 1095 1062 512 537 469 275 392 588 242 208 146 65 42 65 38 94 114 57
+4979952 5158785 3786554 4782715 5201987 8702793 8653952 10059995 4762468 5649233 5573691 6393664 2233122 1428473 1311912 605472 667430 311500 339187 205123 360318 370664 627234 559830 286093 320273 261816 405002 178995 337854 170066 193870 128920 60872 37777 41069 33919 65215 45110 43420
+90 72 67 60 110 106 151 107 151 61 202 172 261 27 3 15 4 36 14 32 17 49 110 123 104 95 101 16 133 102 63 36 36 25 23 49 22 49 119 35
+6882 6409 4724 5605 8040 12569 14919 12852 7311 6085 10437 13101 8173 2644 756 523 385 439 305 336 279 709 1386 1139 717 573 385 186 278 473 321 398 406 43 29 32 27 67 101 38
+5031556 5243023 3812786 4840120 5277650 8952854 8819287 10246014 4830468 5684139 5701348 6814952 2386298 1505208 1298487 601276 660610 319781 338516 213459 358455 391984 647005 596218 297279 336261 265121 392130 181633 346643 174026 206690 136080 59861 37575 40570 33762 65418 46565 43149
+88 73 69 65 106 98 132 83 105 56 142 150 307 119 14 29 13 66 28 77 21 108 151 126 164 98 70 8 73 66 101 109 195 11 11 12 11 24 94 15
+7049 6570 4748 5917 8889 12043 12375 11467 6244 3931 5081 7881 5491 5277 1618 746 502 529 398 347 398 1161 1535 1035 1016 907 334 145 161 294 275 383 416 56 29 29 30 38 78 32
+5086141 5332098 3838976 4913250 5373127 9155596 8915463 10335781 4869495 5584615 5688906 6890260 2467091 1739104 1307432 611033 656951 333095 340239 221970 359680 439794 670092 624034 315829 371809 267040 377511 181214 343907 176711 217819 143316 59709 37378 39917 33686 63827 47396 42526
+92 73 69 70 122 88 96 67 79 24 39 62 177 251 65 58 22 87 48 77 43 187 166 102 242 169 52 5 25 26 76 94 191 19 11 10 14 8 56 11
+7197 6334 4339 6476 9552 11221 10361 8886 4440 2552 1973 3029 3048 6733 2999 1847 929 845 631 557 620 1809 1788 1181 1235 732 252 91 110 439 544 388 304 60 28 36 29 42 43 36
+5143147 5401330 3854057 5016336 5483168 9295672 8957755 10261592 4861433 5406340 5597326 6662955 2483418 2048420 1351456 687847 664298 365024 347875 242873 366549 524547 699069 659151 339514 394473 266815 360452 179502 350244 186205 228588 147509 59812 37160 39733 33586 62577 47312 42186
+94 67 57 81 131 75 70 40 40 10 6 10 69 282 168 206 76 157 109 144 97 272 193 115 283 113 30 2 11 56 199 89 118 22 10 16 13 10 17 14
+7372 5798 4065 7834 10072 11887 6405 4558 2431 1850 1468 1213 1330 1814 2750 3700 1925 1142 796 772 1140 2302 1658 977 762 528 165 79 104 317 414 283 196 51 38 26 25 34 36 31
+5203203 5433478 3861758 5196669 5603755 9468261 8897866 9925953 4802217 5195632 5495123 6337717 2455421 2036967 1388015 873896 696922 413285 359538 275730 386539 634504 724000 679627 350517 403244 264372 343679 177679 348705 192139 232260 148836 59356 37203 38946 33386 60911 47051 41559
+96 56 49 106 137 84 26 11 12 6 3 1 13 34 144 358 213 199 147 197 217 299 166 80 143 61 13 1 10 29 130 47 53 16 19 8 9 7 12 11
+7491 5404 3586 9294 9442 11632 4014 1786 1289 1286 2012 1168 1007 1307 1479 2763 1775 1691 1367 945 1163 2590 1401 897 580 503 269 75 68 228 323 238 144 56 29 19 17 21 23 17
+5264802 5439490 3857022 5455881 5705227 9614829 8778353 9440147 4715287 4962915 5409377 6029228 2419866 1995052 1391172 991216 724897 492379 385506 317245 406618 755558 741739 693960 356593 409953 264648 327666 174981 341791 195599 232947 148801 59235 37015 37776 32987 58546 46464 40109
+96 49 38 128 122 77 10 1 3 3 6 1 7 18 48 215 182 265 275 219 208 279 125 66 91 53 34 1 4 15 84 33 28 20 11 4 4 2 5 3
+7487 5097 3592 10867 9826 8235 2831 1706 1162 1126 1393 1116 756 954 678 1061 1024 1826 1610 1144 1131 2302 1386 995 764 706 367 120 56 200 347 231 89 54 30 16 15 17 15 17
+5324761 5426280 3852558 5796182 5813982 9543900 8631583 8978573 4627281 4734330 5309949 5736053 2378783 1933965 1373775 996930 732977 575022 417038 368495 425378 851655 758652 713454 367220 428731 267422 315379 172044 333571 199586 233162 147361 58998 36857 36492 32546 56077 45687 38746
+95 43 39 145 125 39 5 1 3 2 3 1 4 10 10 44 76 251 303 233 189 205 119 76 135 93 63 5 3 12 92 31 11 18 12 3 3 2 2 3
+7359 4884 4047 11601 9722 3967 3273 2282 1298 1313 1199 1064 629 561 671 781 579 1308 1586 1408 1090 1798 1689 1137 751 722 467 131 44 210 349 177 85 39 19 15 14 17 17 14
+5379951 5400777 3859836 6161161 5917363 9215011 8499775 8580080 4544948 4530949 5208044 5457273 2335479 1852398 1356633 985099 729480 620882 447169 432890 442622 911022 782888 740502 377250 447366 272683 304505 168873 326459 203524 230047 145855 57854 36422 35224 32091 53756 44981 37281
+90 40 49 148 121 9 7 3 3 4 2 1 3 3 10 24 24 146 274 246 170 133 152 90 126 89 95 6 2 14 90 18 10 10 5 3 3 2 2 2
+7078 5071 4766 11771 8912 2313 4065 2460 1319 1462 1336 1104 661 300 385 458 447 847 1622 1831 1239 1993 1870 1149 761 909 539 98 41 181 329 179 66 29 23 16 15 15 19 17
+5426580 5388293 3885312 6514687 5997458 8804237 8391503 8216432 4465207 4348924 5112185 5197676 2294074 1759689 1332608 954133 722696 635668 477468 519409 463244 978807 811146 766665 387285 476371 279654 292256 165705 317992 206853 227242 143901 56164 36100 34093 31673 51452 44343 36088
+82 44 68 137 103 3 12 4 4 5 3 2 3 1 3 8 15 69 260 278 191 140 166 86 123 118 116 3 1 11 79 19 6 5 7 3 3 1 3 4
+6974 5359 5352 11851 8003 1654 3087 2005 708 1061 1107 521 562 330 208 431 501 854 1296 1988 1374 1802 2004 1564 861 1228 689 101 24 211 466 184 40 26 25 19 16 21 17 13
+5469387 5394252 3925131 6851917 6052317 8377621 8260934 7846648 4371838 4153184 5012865 4917837 2251172 1674386 1304658 923366 717461 649997 498677 610383 486802 1030791 842124 816755 399626 523235 290285 280926 162181 311876 213601 224912 141331 54391 35837 33214 31291 49655 43670 34721
+79 49 83 129 84 1 7 3 1 3 2 0 2 1 1 8 19 67 186 261 206 113 173 127 141 164 160 4 0 16 132 21 2 5 9 5 4 3 3 2
+6777 5495 5617 12675 9047 1650 908 950 926 583 911 536 441 319 284 376 296 474 1148 2204 1210 1657 2441 1849 1132 1258 654 110 21 161 387 135 49 34 25 19 17 15 18 12
+5506089 5408209 3970731 7219539 6132494 7976356 8077923 7434233 4286373 3939821 4911014 4655709 2206248 1593525 1279349 891066 707117 640120 515574 709169 505580 1070747 883499 881349 418586 569130 299756 270829 158669 303055 218161 219711 139055 53216 35581 32388 30944 47597 43039 33375
+74 50 89 130 101 2 0 0 2 0 1 0 1 1 2 6 6 21 152 248 167 94 211 145 193 154 142 5 0 9 96 11 3 8 9 5 5 2 3 2
+6409 5070 5012 12638 9766 2666 3000 2029 1364 440 507 570 473 392 201 422 489 826 1473 2476 1302 1760 2414 1783 1290 1183 519 149 23 173 355 127 57 19 17 15 17 17 17 16
+5532468 5395217 3999727 7562831 6229049 7661586 7952958 7112853 4214237 3730473 4801378 4411397 2163263 1522000 1252550 863530 701965 652461 540357 818739 526241 1114634 923151 938013 441112 607664 305540 263734 155296 295501 221789 214331 137040 51190 35127 31366 30606 45785 42399 32355
+65 43 71 124 112 5 7 3 4 0 0 0 2 2 1 9 19 63 203 238 177 98 197 125 214 130 97 10 0 11 80 11 5 2 4 3 5 2 3 4
+6090 4448 4295 12871 10292 3806 4252 2626 784 453 441 778 392 148 97 344 416 729 2183 3088 1465 1851 2340 1704 1288 1165 456 90 24 155 291 113 51 42 38 30 33 30 25 30
+5550034 5344791 4009671 7899841 6336640 7435740 7863117 6847434 4129076 3534484 4692792 4194522 2119281 1439776 1223761 832854 695075 658102 582670 959335 550553 1161479 959922 986424 463024 642780 309569 253439 152032 287294 223691 208414 134922 50699 35221 31327 30685 44881 41979 32256
+59 34 52 118 116 12 14 7 1 0 0 1 1 0 0 6 14 48 308 257 198 101 179 109 201 117 76 4 0 10 53 9 4 14 21 15 19 9 7 14
+5585 3613 3559 11617 8285 2489 2397 1298 1199 820 1012 630 356 161 111 407 540 1138 2409 2698 1236 1550 2167 1650 1106 934 419 51 26 156 233 97 68 35 25 17 19 18 15 17
+5554252 5246090 4000553 8139589 6390241 7142531 7728100 6516350 4056649 3372802 4601513 3981566 2075476 1363284 1196049 807889 691527 688533 629704 1067535 568404 1187021 991353 1028612 479737 661597 312551 241366 148901 279641 224062 201869 133292 49807 34980 30491 30404 43294 41314 31365
+50 23 36 97 81 5 4 1 4 2 2 1 1 0 0 9 24 102 319 194 151 71 155 98 156 77 63 1 0 10 34 7 7 10 9 5 6 3 2 4
+4775 2500 3831 9116 5157 3359 2828 1669 717 581 1176 643 516 505 281 507 423 955 1908 1848 811 1276 1256 1140 844 814 331 45 14 119 162 92 48 23 16 12 11 14 14 11
+5537660 5084931 3998615 8211297 6362544 6920365 7607471 6227924 3973709 3206137 4516705 3782186 2036855 1312516 1173374 790566 685077 705895 662757 1117022 574945 1194196 998711 1036936 489335 671912 313209 229649 145541 270174 222609 195409 131191 48231 34515 29398 29926 41556 40640 30158
+36 12 42 62 32 11 6 3 1 1 3 1 2 6 2 16 15 72 225 106 75 48 62 48 101 57 39 1 0 6 16 7 3 4 3 2 2 2 2 2
+3870 2030 3943 7869 5921 3970 1746 1496 339 680 1108 383 296 305 166 480 524 1108 1897 1023 630 1095 969 627 711 565 185 27 13 51 88 70 48 25 17 15 16 18 16 16
+5498348 4904565 3999588 8202090 6355068 6749066 7462194 5946174 3883177 3055554 4432276 3578794 1993574 1252506 1148325 772623 681369 731615 694704 1112853 576696 1189821 998549 1013243 495294 666311 310119 217529 142240 257097 219301 187985 129143 46873 34087 28555 29587 40168 40034 29331
+24 8 44 46 42 16 2 3 0 2 2 0 0 2 0 15 23 88 210 34 45 35 37 15 73 28 12 0 0 1 5 4 3 5 4 4 4 3 3 4
+3253 1658 3326 6431 6279 4815 3015 1086 499 645 860 537 350 403 159 457 614 1623 1004 748 626 1235 1018 627 605 357 113 16 18 54 60 56 47 20 18 18 16 15 17 16
+5444245 4712166 3984765 8105089 6356930 6639960 7352983 5656139 3798995 2911855 4343615 3397067 1952754 1202118 1123723 754344 680055 787432 703026 1092039 578301 1194309 999644 990972 498395 648267 305265 205460 139149 244989 215360 180146 127120 45289 33696 27947 29257 38679 39469 28554
+17 6 31 31 47 25 8 1 0 2 1 1 1 4 0 14 32 142 79 19 45 45 40 15 53 11 4 0 0 1 2 3 3 4 5 6 4 2 3 5
+2884 1224 1304 1151 2178 4466 3583 1487 958 637 476 740 400 366 175 436 755 947 591 541 407 857 1006 409 489 258 53 10 17 62 48 79 63 14 12 14 11 15 14 11
+5382060 4504647 3918625 7689518 6253914 6515958 7261018 5408142 3728648 2776286 4247351 3238715 1914231 1152479 1100144 735871 682378 798369 700583 1059757 574268 1175305 1000405 956644 498453 625223 298999 193747 136109 234099 211210 174191 125557 43431 33161 27130 28807 37279 38841 27516
+14 3 5 1 5 22 11 3 2 2 0 2 1 4 1 13 48 55 28 10 19 22 39 7 34 6 1 0 0 2 1 6 7 2 2 4 2 3 2 2
+2409 1151 2225 4145 3129 2902 1986 1378 1023 494 417 484 303 158 287 781 579 409 531 377 292 456 389 198 243 204 53 13 16 66 36 74 67 17 15 14 9 10 11 12
+5309285 4305093 3877679 7482824 6177780 6303308 7130525 5168327 3661718 2640065 4151982 3074135 1874190 1093040 1080017 739703 680144 775596 696667 1019336 567396 1132805 985375 911412 492221 600244 292889 182921 133120 224108 206857 168286 124135 41869 32716 26362 28317 35656 38152 26602
+10 3 14 14 12 10 3 3 3 1 0 1 1 0 2 44 28 10 22 5 10 6 6 1 8 4 1 0 0 2 0 6 8 3 3 4 1 1 1 3
+1788 859 1354 1526 1418 2241 1272 729 435 381 771 512 249 374 473 778 388 166 295 245 157 229 243 144 101 133 47 12 12 31 35 47 41 7 11 11 10 10 11 11
+5222453 4099572 3815490 7127626 6059809 6062806 6985038 4903027 3581428 2505075 4068043 2921150 1833768 1050437 1065147 743121 673083 739260 686816 973230 557245 1078908 966988 865576 482515 572402 286778 172683 130103 212566 202587 161076 122084 39787 32180 25456 27865 34131 37480 25681
+5 2 5 2 2 6 1 1 0 1 1 1 0 5 8 43 13 1 7 2 3 1 2 1 1 2 0 0 0 0 0 2 3 0 2 2 2 1 1 2
+1715 795 939 1457 772 1628 1221 696 340 299 621 404 236 348 332 275 151 110 323 260 138 207 173 118 106 80 18 7 11 33 26 36 35 10 9 9 8 11 9 8
+5135922 3902450 3744245 6789500 5928269 5799072 6841879 4651617 3500714 2373146 3982365 2770708 1794023 1008793 1047044 715430 660140 701664 677927 930812 546861 1026893 947271 820893 473179 542974 280079 162752 127136 201839 198194 153623 119931 38014 31606 24481 27373 32759 36774 24631
+5 1 2 2 0 3 1 1 0 0 1 0 0 4 4 5 2 0 8 3 2 1 1 0 1 0 0 0 0 0 0 1 2 1 1 1 1 1 1 1
+2101 889 741 1109 796 741 945 604 274 79 243 230 162 185 163 132 110 140 197 296 111 135 170 51 75 57 25 14 11 15 11 16 14 9 10 11 9 10 9 9
+5061418 3722930 3669717 6450280 5800627 5496667 6695239 4409639 3420328 2235616 3889163 2618602 1753379 959633 1025072 680615 646472 668167 666039 893151 536046 973575 927969 774774 463284 513898 273726 153847 124243 190650 193527 145388 117294 36286 31072 23688 26919 31407 36086 23706
+8 2 1 1 0 0 0 0 0 0 0 0 0 1 1 1 1 1 3 4 1 0 1 0 0 0 0 0 0 0 0 0 0 1 1 3 1 1 1 2
+1843 928 1145 1460 922 542 437 140 64 75 127 225 134 111 114 55 54 177 143 113 127 70 79 19 38 27 15 16 11 11 17 18 15 10 7 8 12 13 9 8
+4982179 3556577 3607377 6152977 5679392 5200179 6539274 4153672 3336581 2106092 3795322 2475315 1713034 908876 1002396 643158 631714 638953 653067 846506 525910 919463 906823 729456 452690 484724 267276 145599 121422 179887 189130 137770 114749 34723 30475 22758 26553 30321 35415 22775
+6 3 4 2 1 0 0 0 0 0 0 0 0 0 0 0 0 3 1 0 2 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 3 3 1 1
+1581 1131 761 653 758 530 309 244 141 41 95 111 75 107 92 41 62 96 65 82 53 54 99 20 12 19 12 9 10 14 18 15 8 10 6 8 10 8 8 9
+4898221 3412676 3536777 5823932 5556991 4920742 6383930 3919452 3256892 1982250 3703006 2333621 1672188 860919 979724 607089 617529 606515 638425 800755 514136 867615 886716 686919 441696 456809 260910 137416 118646 169954 184868 130425 112088 33254 29867 21884 26145 28993 34735 21961
+5 4 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 2 1 0 2
+1385 1082 393 364 255 344 168 131 137 43 40 65 23 25 51 30 22 45 35 54 38 31 65 23 12 18 11 8 10 12 13 14 9 13 9 6 7 7 9 10
+4811348 3274399 3458533 5496873 5424788 4646643 6228860 3692342 3179091 1865961 3611588 2197602 1631032 810801 956570 572508 602675 572890 623382 756029 502272 817464 866242 647118 430976 430507 254677 129662 115939 160494 180585 123459 109519 32057 29351 20939 25671 27683 34097 21257
+4 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 1 1 1 1 1 2
+650 793 558 200 202 258 104 94 98 35 40 55 34 19 31 28 19 17 26 64 22 25 37 26 8 12 13 10 13 17 12 14 10 11 11 10 10 7 8 7
+4707856 3126662 3386460 5179361 5294530 4383706 6076025 3476585 3102235 1756158 3522453 2069130 1591185 763322 933483 539879 588115 539562 608484 714601 490295 769954 845563 609889 420421 405414 248651 122496 113377 151909 176383 116911 107040 30809 28899 20297 25285 26452 33450 20411
+0 2 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 2 3 2 1 1 1
+376 722 438 91 44 40 48 53 42 20 32 28 13 13 8 12 15 24 15 36 14 19 11 18 11 13 7 10 13 15 13 13 14 10 8 9 12 8 8 8
+4599943 2983427 3313118 4874202 5163485 4123151 5925573 3271254 3025866 1652021 3435338 1946707 1551795 718323 910384 508224 573817 508664 593677 673938 478413 724925 824736 574403 410207 381888 242622 115760 110879 143716 172312 110695 104725 29574 28382 19632 24960 25356 32819 19677
+0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 2 3 1 1 2
+301 365 247 67 29 37 34 25 7 14 21 10 8 8 13 8 15 19 19 21 15 11 11 9 14 9 7 9 11 14 14 12 16 13 7 6 8 7 5 6
+4492807 2826853 3236725 4585878 5035328 3878045 5778519 3076522 2950509 1553763 3350117 1830523 1513261 675716 887990 478223 559876 479312 579342 634793 466853 682107 804428 540493 400324 359528 236744 109367 108392 135953 168368 104790 102519 28598 27852 18822 24541 24264 32127 18865
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 1 1 1 0 1
+163 87 75 74 37 50 44 14 13 16 26 18 17 14 20 16 17 16 25 23 13 12 14 13 3 7 12 12 17 17 12 14 10 10 8 10 9 8 8 5
+4384818 2662593 3157842 4315282 4910575 3648443 5635392 2892798 2877186 1461524 3267151 1721801 1475919 636034 866334 450513 546334 451537 565518 598120 455531 641919 784704 508863 390407 338387 231140 103542 106120 128840 164471 99362 100215 27496 27361 18307 24158 23299 31529 18040
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 1 3 2 1 1 0
+33 35 20 19 40 61 36 16 5 8 8 12 13 8 9 11 11 14 14 11 7 14 9 9 9 8 8 13 12 16 11 12 9 9 8 9 9 7 7 9
+843 2150 511 1167 1022 3747 920 983 127 491 204 737 332 491 230 675 281 860 357 675 178 860 230 552 230 491 204 798 306 983 281 737 230 552 204 552 230 430 178 552
+179 168 88 72 227 297 200 53 6 14 15 31 39 14 19 26 28 41 45 26 11 41 19 18 19 14 15 36 33 53 28 31 19 18 15 18 19 11 11 18
+22 21 15 13 33 30 31 15 12 7 7 6 7 6 5 4 7 8 13 8 7 4 5 13 12 7 6 6 11 13 11 11 8 11 10 7 6 7 8 5
+1384 3311 881 1895 1840 5365 1689 1845 430 891 377 1061 502 830 352 880 452 1299 680 1126 352 1054 352 1317 531 891 352 1118 579 1722 555 1368 428 1194 454 948 377 834 378 826
+91 64 47 30 160 96 149 40 33 10 11 7 11 7 5 3 11 12 37 13 11 3 5 33 32 10 8 7 27 31 27 23 14 24 22 10 8 10 14 5
+18 15 9 9 22 21 24 15 7 4 6 4 7 5 7 5 7 5 9 13 10 7 11 10 11 9 6 9 12 10 9 8 9 10 7 8 10 8 5 5
+1809 4033 1089 2334 2356 6333 2260 2655 598 1083 520 1243 668 1087 522 1134 619 1528 893 1857 598 1420 624 1852 798 1390 496 1603 871 2233 771 1777 647 1736 621 1382 623 1275 496 1083
+58 29 16 13 80 43 94 36 10 3 8 3 10 5 11 5 10 4 17 30 22 9 26 18 26 15 8 15 30 17 17 11 17 18 10 12 22 12 5 5
+27 27 16 14 20 27 24 13 7 9 9 5 10 8 5 6 6 7 10 12 7 7 6 9 5 4 6 10 9 14 15 10 11 10 8 10 8 8 8 6
+2454 5449 1470 3054 2808 7611 2817 3294 762 1570 737 1475 906 1513 636 1434 756 1866 1126 2482 762 1764 761 2293 905 1552 636 2121 1079 2959 1135 2284 912 2246 809 1913 811 1690 688 1386
+111 79 49 29 62 62 87 24 10 15 17 4 21 12 5 6 7 8 20 23 10 8 7 13 5 3 7 17 16 30 45 16 25 17 13 17 13 11 14 7
+22 39 27 7 7 26 27 15 9 8 7 5 8 9 9 13 10 12 15 8 4 9 4 3 4 5 6 9 9 11 12 13 11 11 14 11 10 9 6 5
+2955 7518 2123 3300 2916 8751 3436 4017 973 1967 897 1693 1087 1975 850 2146 992 2491 1481 2824 845 2211 844 2339 984 1766 773 2546 1282 3457 1413 2945 1170 2787 1146 2474 1046 2141 824 1610
+73 120 115 7 7 51 99 29 16 11 10 4 13 14 17 29 20 23 43 9 3 13 3 1 3 4 7 13 16 17 27 25 24 18 39 19 20 14 7 4
+16 31 22 14 14 19 13 11 12 10 13 12 10 5 4 8 7 4 12 9 6 13 10 6 5 8 6 7 7 9 9 11 10 11 9 7 7 8 6 6
+3290 8971 2632 3962 3201 9393 3682 4451 1255 2463 1206 2328 1315 2163 931 2508 1146 2587 1750 3207 977 2877 1078 2567 1087 2151 907 2823 1428 3802 1607 3444 1396 3295 1347 2755 1198 2504 956 1882
+37 72 77 26 28 26 23 15 28 16 33 24 19 4 3 10 9 2 26 11 7 26 20 5 5 11 7 7 9 11 15 17 19 17 15 7 9 10 7 6
+15 17 6 16 16 6 11 8 7 11 8 6 6 6 3 7 4 4 5 5 5 5 5 7 8 11 6 4 7 8 8 9 11 9 10 7 7 7 6 6
+3591 9477 2719 4707 3530 9198 3871 4675 1402 2991 1380 2556 1435 2401 984 2787 1219 2677 1834 3321 1080 3011 1178 2843 1264 2697 1037 2899 1571 4065 1771 3790 1642 3650 1568 3019 1347 2783 1085 2137
+31 20 5 30 36 2 16 7 9 18 12 5 6 5 1 7 3 2 4 3 5 3 5 7 12 19 7 2 9 8 11 11 22 11 18 7 9 7 7 6
+7 9 8 18 14 19 19 10 3 6 8 9 9 8 7 7 7 11 7 8 7 5 7 8 5 6 7 6 9 8 11 13 9 9 7 7 6 8 8 6
+3680 9461 2855 5530 3799 9813 4260 5008 1443 3180 1550 2955 1629 2748 1138 3049 1367 3192 1967 3613 1231 3137 1327 3163 1360 2903 1190 3093 1761 4312 2007 4361 1831 3983 1707 3267 1466 3107 1262 2377
+6 5 9 35 26 25 46 11 1 5 12 12 15 10 9 7 9 17 8 8 9 3 9 9 4 5 9 5 14 8 21 21 14 10 9 7 6 9 12 6
+10 9 6 17 10 7 8 7 4 10 6 3 6 8 7 4 5 7 7 6 5 8 8 5 5 5 5 3 4 7 10 10 9 11 6 7 5 6 6 5
+3843 9446 2937 6242 3959 9654 4358 5137 1509 3603 1664 2962 1741 3074 1288 3111 1460 3430 2096 3764 1328 3440 1498 3280 1453 3036 1288 3091 1819 4483 2212 4713 2015 4419 1817 3501 1557 3289 1383 2541
+13 5 5 29 13 3 8 5 3 13 6 1 6 9 9 2 4 7 8 4 4 9 12 3 4 3 4 1 2 6 17 12 14 15 6 6 4 5 7 4
+6 5 3 8 7 4 6 5 3 5 6 5 4 5 4 3 4 8 6 4 5 7 7 7 5 7 7 6 6 6 7 9 9 10 5 5 6 5 7 5
+3900 9186 2940 6358 4039 9320 4402 5135 1548 3694 1775 3091 1799 3196 1358 3108 1525 3715 2197 3783 1422 3663 1639 3513 1544 3283 1434 3274 1926 4582 2335 4983 2194 4768 1899 3598 1671 3398 1527 2695
+4 1 1 6 6 1 4 2 1 3 6 3 2 3 3 1 3 8 6 2 4 6 9 6 4 7 9 5 6 4 8 9 13 12 4 3 6 3 9 3
+3 3 7 13 16 9 11 6 4 6 8 5 3 4 5 5 5 6 6 7 7 7 5 6 8 7 8 9 12 11 8 8 8 9 8 9 9 9 8 6
+3879 8819 3045 6775 4347 9313 4573 5195 1611 3840 1935 3212 1830 3249 1451 3228 1614 3860 2295 3986 1565 3873 1725 3670 1709 3516 1602 3630 2184 4982 2481 5175 2343 5034 2056 3935 1859 3747 1693 2901
+1 0 7 15 32 5 14 4 3 4 11 3 1 2 4 3 4 4 6 6 9 6 4 4 11 6 12 11 24 14 10 7 10 9 11 10 14 11 11 5
+10 9 3 6 7 10 8 5 4 4 2 6 10 7 9 6 7 7 7 10 10 7 6 5 5 8 7 6 5 8 8 9 8 10 10 13 9 8 8 6
+4037 8842 3045 6737 4417 9368 4663 5190 1673 3855 1937 3387 2039 3484 1644 3402 1752 4058 2416 4361 1781 4070 1835 3756 1794 3796 1740 3780 2257 5174 2623 5417 2489 5346 2260 4497 2042 4013 1855 3095
+13 6 1 3 6 7 7 2 2 2 0 5 17 6 15 5 9 6 8 12 18 6 6 3 4 8 9 4 4 7 10 8 10 11 16 21 14 8 11 5
+8 9 3 9 6 7 7 4 4 5 5 3 8 7 4 7 5 4 6 7 6 7 6 5 6 4 8 9 7 8 8 9 7 7 9 8 6 9 6 5
+4140 8864 3045 6885 4460 9236 4725 5124 1733 3930 2016 3368 2192 3705 1705 3627 1836 4060 2509 4529 1889 4255 1942 3837 1902 3813 1901 4106 2379 5355 2762 5644 2605 5455 2433 4718 2144 4325 1962 3216
+8 6 1 7 4 3 5 1 2 3 4 1 10 6 2 6 4 2 5 6 6 6 6 3 6 2 11 10 8 7 10 8 7 5 13 7 6 10 6 3
+1 2 4 12 8 7 5 8 6 4 4 6 5 6 5 2 3 5 4 6 7 8 5 6 5 5 5 8 7 9 10 7 7 6 7 9 8 9 6 4
+4062 8455 3071 7209 4553 9111 4734 5308 1843 3939 2067 3534 2265 3851 1790 3532 1866 4123 2548 4625 2020 4491 2021 3975 1982 3891 1981 4351 2498 5586 2948 5735 2718 5496 2551 4987 2294 4618 2066 3268
+0 0 2 12 7 3 3 7 6 2 2 5 4 4 4 0 1 3 2 4 8 7 4 4 4 3 4 8 8 8 15 5 7 3 7 9 10 9 6 2
+2 5 7 11 7 3 5 4 3 2 4 4 7 6 7 2 5 6 4 4 7 6 5 9 4 5 5 7 6 8 10 8 6 5 9 8 8 7 6 5
+4011 8254 3173 7452 4618 8748 4743 5235 1873 3825 2117 3567 2387 3988 1924 3442 1947 4244 2586 4593 2148 4590 2098 4289 2034 3964 2059 4520 2589 5742 3130 5882 2803 5473 2717 5179 2441 4770 2167 3379
+0 2 7 10 5 0 3 1 1 0 2 2 8 4 8 0 4 4 2 1 8 4 4 10 2 3 4 6 5 6 14 6 5 2 12 7 10 5 6 3
+5 2 7 10 8 8 5 6 3 2 3 2 6 4 4 6 3 2 2 5 5 8 5 8 6 6 6 8 7 7 6 8 7 6 5 5 6 7 6 5
+4038 7881 3272 7619 4707 8714 4752 5289 1902 3718 2140 3475 2480 3994 1978 3604 1975 4112 2572 4624 2222 4806 2173 4523 2136 4094 2160 4740 2703 5827 3205 6020 2911 5513 2776 5175 2533 4913 2266 3483
+3 0 7 8 7 4 3 4 1 0 1 0 5 2 2 5 1 0 0 3 4 7 4 7 6 4 6 7 7 5 5 6 7 3 3 2 5 5 6 3
+8 6 6 9 8 10 6 6 4 3 3 2 6 5 4 5 4 3 4 6 5 7 4 6 7 8 8 9 5 4 6 7 7 8 6 7 6 7 6 7
+4141 7776 3343 7714 4793 8805 4786 5340 1956 3679 2163 3389 2571 4061 2030 3694 2027 4049 2610 4715 2294 4947 2221 4620 2261 4339 2310 5008 2763 5723 3278 6088 3017 5673 2860 5294 2623 5048 2362 3704
+8 3 5 6 7 7 4 4 2 1 1 0 5 3 2 3 2 1 2 4 4 5 2 4 8 8 10 9 3 1 5 5 7 6 5 5 5 5 6 6
+5 6 8 5 5 5 5 3 5 6 3 6 5 4 3 7 6 4 5 6 6 5 5 6 6 4 5 8 4 4 6 6 6 7 7 6 7 6 6 6
+4165 7678 3464 7558 4801 8583 4794 5203 2034 3826 2185 3554 2634 4063 2056 3902 2129 4051 2672 4800 2390 4957 2293 4711 2357 4324 2380 5199 2796 5625 3349 6091 3095 5762 2967 5344 2736 5113 2456 3850
+3 3 9 2 2 1 2 1 4 4 1 5 4 2 1 6 6 2 3 4 5 2 4 4 6 2 4 7 2 1 5 3 5 5 7 4 7 4 5 4
+5 3 9 7 7 6 7 4 5 3 4 8 7 6 4 5 4 6 5 4 5 4 3 8 9 4 5 8 6 6 7 6 7 7 7 4 7 6 6 5
+4188 7401 3607 7534 4860 8436 4853 5136 2111 3780 2232 3832 2747 4187 2106 3975 2178 4176 2733 4757 2458 4905 2312 4919 2528 4310 2448 5378 2879 5656 3444 6094 3196 5846 3071 5269 2846 5174 2548 3926
+3 0 11 4 5 2 5 1 4 1 2 8 7 4 2 3 2 4 3 1 4 1 1 7 13 2 4 7 5 3 6 3 7 5 7 1 7 4 5 3
+5 3 9 7 6 4 6 5 4 6 4 5 5 5 4 3 4 8 4 3 4 6 5 7 9 6 5 7 8 8 7 6 6 8 8 5 7 6 7 5
+4211 7141 3747 7512 4892 8175 4885 5135 2160 3921 2278 3909 2806 4242 2155 3920 2225 4416 2767 4655 2498 4979 2382 5053 2694 4420 2514 5485 3011 5808 3536 6096 3269 5986 3198 5260 2953 5232 2663 3997
+3 0 11 4 4 1 4 2 2 4 2 3 3 3 2 1 2 8 2 1 2 4 4 5 12 4 4 5 9 6 6 3 5 6 9 2 7 4 7 3
+8 4 4 9 8 12 8 6 5 9 5 5 4 5 3 6 5 5 7 5 4 7 6 7 8 9 8 8 8 6 6 10 8 7 7 6 5 5 6 5
+4310 6958 3755 7614 4974 8421 4967 5195 2233 4238 2348 3981 2838 4294 2177 4053 2297 4458 2876 4682 2537 5110 2475 5179 2831 4707 2655 5647 3140 5828 3601 6344 3391 6056 3297 5313 3007 5225 2749 4064
+8 1 2 6 7 11 7 4 4 10 4 3 2 3 1 4 4 3 7 3 2 5 5 5 9 9 10 6 9 3 5 9 9 5 7 4 3 2 5 3
+4 5 7 5 4 6 5 4 3 5 3 4 3 5 3 5 4 5 6 4 3 5 5 6 5 10 6 8 8 7 5 4 5 6 6 6 6 5 4 5
+4304 6847 3840 7464 4952 8284 4970 5129 2253 4290 2366 3987 2843 4343 2199 4117 2341 4497 2957 4646 2550 5110 2541 5236 2888 5038 2742 5799 3266 5908 3638 6209 3434 6061 3368 5362 3085 5218 2782 4127
+2 2 6 2 1 2 2 1 1 3 1 2 1 3 1 3 2 3 5 1 1 2 4 4 3 11 5 6 9 5 3 1 3 3 5 4 5 2 2 3
+2 1 4 5 6 6 4 3 3 6 3 3 7 6 3 4 4 5 5 6 5 6 5 5 3 5 7 9 7 7 7 3 6 8 8 8 7 5 5 4
+4247 6497 3846 7323 4981 8155 4948 5005 2273 4401 2383 3932 2950 4451 2220 4115 2384 4534 3010 4735 2614 5172 2605 5229 2892 5042 2852 6004 3363 5983 3726 6020 3501 6188 3488 5531 3186 5212 2840 4125
+0 0 2 2 4 2 1 1 1 4 1 1 7 4 1 2 2 3 3 4 4 4 4 2 1 2 7 8 7 5 6 0 5 6 9 6 7 2 3 2
+3 1 2 6 5 4 3 5 2 3 3 4 11 6 6 6 3 5 4 4 4 4 4 5 4 5 7 8 7 8 7 5 7 6 6 6 8 6 6 4
+4217 6168 3801 7252 4984 7911 4901 5011 2267 4321 2400 3941 3157 4552 2317 4236 2401 4569 3037 4696 2650 5107 2642 5222 2922 5046 2959 6135 3457 6115 3811 5966 3592 6185 3554 5567 3310 5267 2922 4123
+1 0 0 3 2 1 1 2 0 1 1 2 18 4 6 4 1 3 2 1 2 1 2 2 2 2 7 6 6 6 6 2 6 3 5 3 9 4 5 2
+5 4 3 2 3 4 3 5 3 2 4 3 5 5 4 5 3 5 6 5 6 4 5 7 7 7 6 5 3 6 9 7 9 7 7 7 9 5 5 4
+4239 6043 3782 6939 4936 7682 4855 5017 2287 4184 2442 3888 3206 4586 2361 4289 2417 4602 3114 4721 2737 5046 2703 5338 3027 5173 3038 6074 3447 6116 3945 6038 3732 6243 3644 5663 3457 5258 2976 4121
+3 1 1 0 1 1 1 2 1 0 2 1 3 3 2 3 1 3 5 3 5 1 3 5 7 5 5 2 1 3 10 5 11 4 6 5 11 2 3 2
+6 4 3 4 3 4 3 3 4 2 3 7 6 3 1 2 5 4 5 3 6 4 5 6 5 8 5 5 6 7 8 7 9 8 6 6 5 6 6 7
+4286 5926 3764 6768 4889 7466 4810 4900 2332 4055 2457 4084 3279 4495 2327 4154 2484 4571 3164 4622 2822 4989 2763 5386 3079 5354 3089 6016 3514 6179 4051 6105 3868 6359 3706 5691 3498 5311 3055 4303
+4 1 1 1 1 1 1 1 2 0 1 6 5 1 0 0 4 1 3 1 5 1 3 4 3 7 3 2 5 4 8 4 10 6 4 3 3 4 5 6
+5 6 3 5 4 3 4 3 4 3 4 2 3 5 3 2 7 7 6 5 4 6 5 7 4 6 6 5 7 7 5 7 6 7 6 6 5 6 4 4
+4306 5939 3746 6669 4869 7202 4792 4790 2376 3996 2497 3961 3273 4532 2345 4027 2600 4726 3238 4651 2853 5058 2821 5492 3104 5401 3165 5962 3605 6238 4077 6168 3924 6407 3766 5718 3538 5360 3080 4290
+3 3 1 2 1 0 1 1 2 1 2 0 1 3 1 0 7 5 5 3 2 4 3 5 2 3 5 2 6 4 3 4 4 4 4 3 3 4 2 2
+4 3 2 3 6 8 6 1 3 3 7 4 5 5 4 2 3 5 6 4 4 5 5 6 6 10 9 6 5 7 7 5 5 7 7 10 6 6 5 5
+4300 5766 3703 6453 4900 7261 4825 4564 2393 3940 2613 3969 3319 4567 2388 3908 2611 4749 3310 4617 2884 5061 2878 5531 3179 5691 3316 5972 3642 6293 4154 6105 3953 6452 3850 5989 3603 5407 3130 4339
+2 0 0 0 4 5 4 0 1 1 7 2 3 3 2 0 1 3 5 1 2 2 3 3 5 10 11 3 3 4 6 2 3 4 6 10 5 3 3 3
+3 2 3 4 3 4 6 4 2 3 4 6 4 4 7 6 2 4 8 8 5 6 6 7 9 5 6 6 7 3 5 5 6 7 7 6 6 6 5 7
+4269 5542 3687 6311 4854 7071 4857 4535 2384 3887 2650 4099 3338 4538 2507 4042 2596 4709 3431 4831 2939 5125 2959 5629 3329 5656 3386 5982 3730 6099 4178 6045 4007 6494 3932 5998 3666 5451 3179 4508
+1 0 1 1 1 1 4 1 0 1 2 4 2 1 8 4 0 1 9 7 3 4 5 5 11 2 5 3 6 0 3 2 4 4 6 3 4 3 3 6
+5 5 6 6 2 3 6 4 2 2 4 5 9 6 6 5 5 4 5 7 5 7 7 5 7 5 6 7 7 3 5 7 7 8 9 4 5 6 5 4
+4290 5516 3748 6300 4783 6831 4889 4508 2375 3776 2686 4160 3484 4634 2597 4106 2659 4672 3473 4971 2993 5247 3064 5598 3424 5623 3454 6053 3815 5917 4201 6112 4085 6595 4063 5883 3702 5492 3227 4483
+3 2 4 3 0 0 4 1 0 0 2 3 11 4 5 3 4 1 3 5 3 5 7 2 7 2 5 5 6 0 3 4 6 6 10 1 3 3 3 1
+3 2 4 3 4 3 4 4 3 4 5 4 4 4 3 4 4 5 5 5 5 4 5 7 5 5 5 7 6 5 8 9 6 6 6 5 5 6 5 4
+4259 5307 3756 6106 4765 6605 4869 4483 2392 3795 2746 4156 3499 4601 2608 4105 2694 4698 3514 4979 3046 5177 3115 5692 3466 5592 3495 6119 3873 5869 4300 6298 4136 6567 4114 5837 3737 5531 3274 4459
+1 0 2 0 1 0 1 1 1 2 3 2 2 1 1 2 2 3 3 2 3 1 3 5 3 2 3 4 4 2 8 8 4 3 4 2 3 3 3 1
+4 6 6 3 5 5 3 1 2 2 3 3 2 6 4 2 4 5 3 5 5 3 4 4 4 4 3 5 8 6 8 8 7 6 7 6 6 5 4 3
+4254 5357 3815 5923 4773 6515 4824 4275 2383 3690 2754 4090 3462 4693 2645 3981 2728 4723 3502 4987 3097 5050 3139 5596 3481 5502 3484 6059 3980 5885 4397 6411 4211 6541 4190 5855 3797 5506 3294 4375
+2 4 4 0 2 2 1 0 0 0 1 1 0 4 2 0 2 3 1 2 3 1 2 1 2 1 1 2 8 3 8 6 6 3 6 3 4 2 2 1
+2 3 4 4 4 5 3 1 3 2 2 3 2 3 2 2 2 3 3 4 3 3 4 5 4 4 4 5 5 4 5 6 5 5 4 5 5 4 3 3
+4198 5219 3822 5813 4756 6431 4780 4079 2400 3591 2736 4028 3426 4595 2630 3865 2711 4623 3491 4933 3096 4931 3162 5567 3496 5417 3499 6002 4008 5777 4415 6394 4233 6455 4187 5810 3830 5421 3288 4296
+0 1 2 1 1 2 1 0 1 0 0 1 0 1 0 0 0 1 1 1 1 1 2 2 2 1 2 2 3 1 3 3 3 2 2 2 3 1 1 1
+6 4 5 4 3 4 3 2 6 6 5 5 4 3 3 3 4 3 4 7 3 4 5 6 6 4 6 7 7 5 6 5 8 5 4 7 7 7 5 3
+4246 5151 3854 5709 4713 6290 4737 3957 2493 3744 2795 4093 3442 4503 2641 3817 2745 4529 3506 5067 3095 4880 3210 5601 3562 5337 3565 6071 4086 5737 4458 6317 4331 6374 4184 5891 3913 5525 3333 4222
+4 1 3 1 1 1 1 0 5 4 3 3 2 1 1 1 2 1 2 5 1 1 3 3 5 1 5 5 6 2 4 2 8 2 2 5 6 5 3 1
+6 6 2 3 3 5 2 2 6 7 4 6 8 4 4 4 5 3 2 6 5 3 4 4 3 4 6 7 7 6 6 6 8 6 5 6 8 7 4 3
+4293 5210 3808 5550 4672 6219 4669 3842 2584 3949 2827 4216 3560 4478 2677 3833 2804 4441 3469 5131 3145 4771 3232 5510 3549 5262 3629 6136 4162 5761 4500 6306 4427 6360 4207 5906 4019 5623 3352 4153
+4 4 0 0 1 2 0 0 5 6 2 4 9 1 2 2 3 1 0 4 3 1 2 1 1 1 5 4 6 3 4 3 8 3 3 3 8 5 2 1
+5 3 3 4 3 3 7 6 9 5 3 3 5 5 4 5 3 3 4 4 7 4 6 5 3 4 5 4 4 5 4 8 9 6 7 6 5 6 5 3
+4313 5081 3789 5462 4632 6030 4731 3980 2749 4019 2833 4147 3598 4516 2712 3910 2810 4358 3484 5068 3245 4730 3304 5486 3537 5192 3666 6013 4160 5722 4489 6419 4546 6347 4280 5920 4046 5654 3396 4088
+3 1 1 1 1 0 5 4 12 3 1 1 3 3 2 3 1 1 2 1 7 1 5 2 1 1 3 1 2 2 1 6 10 3 6 3 3 3 3 1
+6 4 4 2 2 3 6 7 6 5 5 6 3 3 3 3 4 3 5 4 5 6 5 7 4 6 6 6 5 6 6 7 7 7 7 6 6 5 5 3
+4358 5021 3796 5257 4567 5852 4766 4171 2833 4085 2890 4266 3584 4429 2720 3859 2842 4280 3524 5009 3291 4814 3349 5586 3550 5249 3727 6020 4183 5747 4530 6463 4611 6396 4352 5933 4098 5621 3439 4027
+4 1 2 0 0 0 4 6 5 3 3 4 1 1 1 1 2 1 3 1 3 4 3 5 2 4 4 3 3 3 4 4 6 4 6 3 4 2 3 1
+2 5 7 2 4 3 3 4 3 4 3 5 3 3 4 5 5 3 4 4 4 5 6 6 3 4 4 7 7 7 8 7 6 5 5 5 5 6 5 3
+4300 5026 3880 5064 4555 5685 4723 4166 2838 4085 2894 4317 3571 4347 2754 3934 2898 4207 3538 4954 3311 4832 3418 5619 3538 5179 3736 6088 4257 5832 4621 6505 4649 6319 4371 5884 4123 5652 3480 3969
+0 2 6 0 1 0 1 2 1 2 1 3 1 1 2 3 3 1 2 1 2 2 5 3 1 1 2 5 6 5 7 4 4 2 3 2 3 3 3 1
+5 7 9 4 4 4 3 3 1 2 3 4 4 3 4 5 5 5 2 3 4 5 6 5 6 5 5 6 5 8 6 6 5 6 7 7 4 5 4 4
+4320 5154 4013 5005 4543 5589 4681 4100 2792 3962 2898 4303 3584 4270 2787 4005 2953 4261 3500 4841 3330 4849 3486 5589 3603 5175 3770 6091 4278 5973 4659 6483 4660 6308 4440 5961 4122 5620 3495 3976
+3 5 10 1 1 1 1 1 0 0 1 2 2 1 2 3 3 3 0 1 2 2 5 2 5 2 3 3 3 6 4 3 3 3 6 5 2 2 2 2
+2 2 7 6 5 5 3 2 3 4 4 5 4 5 5 3 4 5 3 5 5 6 4 4 4 4 5 5 6 7 6 5 6 5 4 5 6 5 6 4
+4263 4967 4091 5073 4557 5560 4640 3976 2798 3970 2927 4352 3596 4320 2845 3949 2981 4312 3489 4857 3374 4926 3501 5499 3615 5110 3803 6032 4324 6044 4696 6401 4697 6236 4431 5910 4172 5590 3561 3983
+0 0 6 4 3 2 1 0 1 2 2 3 2 3 3 1 2 3 1 2 3 4 2 1 2 1 3 2 4 5 4 2 4 2 2 2 4 2 5 2
+4 2 4 2 3 7 4 2 3 3 4 4 4 4 7 3 4 8 6 5 5 5 4 3 3 5 5 6 7 6 4 7 9 6 5 6 6 4 4 3
+4258 4791 4091 4891 4519 5656 4626 3860 2804 3916 2956 4336 3608 4306 2952 3896 3008 4544 3555 4872 3417 4937 3515 5353 3601 5110 3835 6038 4394 6049 4681 6447 4809 6230 4448 5924 4221 5500 3574 3928
+2 0 2 0 1 5 1 0 1 1 2 2 2 2 7 1 2 7 5 2 3 2 2 1 1 2 3 3 6 3 1 4 9 3 3 3 4 1 2 1
+3 3 3 2 4 5 5 7 6 4 7 6 4 5 5 4 3 4 7 7 5 7 5 6 4 3 4 5 8 9 6 5 7 7 9 8 6 4 4 4
+4228 4687 4065 4720 4508 5623 4638 4058 2887 3926 3061 4444 3620 4354 3006 3907 3009 4517 3645 5009 3459 5070 3555 5400 3613 4987 3841 5982 4488 6239 4717 6367 4867 6286 4567 6060 4269 5415 3587 3938
+1 1 1 0 1 2 3 6 5 2 7 4 2 3 3 2 1 1 6 5 3 5 3 3 2 1 2 2 7 8 4 2 5 4 9 6 4 1 2 2
+6 7 3 4 7 7 4 4 4 4 3 4 5 5 4 2 3 4 3 3 4 5 4 3 5 5 8 6 6 7 4 5 5 4 8 7 5 5 5 4
+4275 4835 4040 4682 4574 5715 4624 4060 2917 3936 3061 4423 3657 4399 3033 3795 3010 4491 3630 4892 3474 5072 3568 5260 3650 4994 3949 5991 4529 6294 4701 6292 4873 6154 4657 6126 4290 5397 3625 3947
+4 5 1 1 6 5 1 2 2 2 1 2 3 3 2 0 1 1 1 1 2 2 2 1 3 2 8 3 4 4 1 2 2 1 7 4 3 2 3 2
+7 5 4 7 6 5 8 5 5 4 4 3 4 5 4 3 3 3 2 2 3 4 5 4 4 5 6 6 7 6 5 6 6 4 4 4 3 4 4 4
+4347 4852 4041 4831 4613 5679 4713 4123 2971 3945 3086 4341 3667 4442 3059 3751 3011 4405 3590 4721 3463 5013 3606 5190 3661 5001 4003 6000 4594 6285 4711 6283 4904 6030 4642 6004 4259 5318 3636 3955
+6 2 2 5 4 2 7 3 3 2 2 1 2 3 2 1 1 1 0 0 1 1 3 1 2 2 4 3 6 3 3 3 4 1 1 1 1 1 2 2
+8 7 4 4 4 6 5 2 2 4 5 2 4 5 4 3 6 4 3 4 4 4 5 5 5 8 8 5 6 5 4 5 6 4 6 6 6 5 5 4
+4442 4990 4042 4786 4600 5706 4723 3998 2947 3954 3136 4203 3677 4482 3084 3710 3089 4386 3577 4683 3478 4957 3643 5185 3697 5192 4107 5947 4632 6215 4695 6213 4934 5913 4679 6012 4306 5306 3673 3963
+8 5 2 1 1 3 3 0 0 2 3 0 2 3 2 1 5 2 1 1 2 1 3 2 3 7 8 2 4 2 1 2 4 1 4 3 4 2 3 2
+4 4 2 4 5 4 6 6 2 2 1 1 2 4 3 3 4 3 3 3 3 3 3 6 5 4 6 4 5 5 4 6 5 4 7 4 5 5 4 4
+4433 4936 3992 4744 4612 5609 4758 4126 2924 3839 3083 4012 3636 4458 3083 3671 3114 4307 3564 4586 3467 4843 3628 5242 3732 5126 4157 5835 4644 6149 4680 6208 4938 5803 4741 5897 4326 5294 3683 3970
+2 1 0 1 3 1 4 4 0 0 0 0 0 1 1 1 2 1 1 1 1 1 1 4 3 1 4 1 3 2 1 3 2 1 5 1 3 2 2 2
+5 1 3 4 3 3 6 7 4 3 2 3 3 5 3 3 3 5 6 5 5 5 4 6 5 3 6 5 6 7 7 5 5 5 6 5 5 5 4 4
+4450 4701 3969 4705 4573 5456 4792 4308 2953 3792 3057 3955 3621 4497 3082 3635 3112 4355 3628 4618 3508 4859 3639 5296 3766 5002 4206 5792 4681 6210 4742 6142 4942 5762 4776 5850 4345 5283 3693 3977
+3 0 1 1 1 0 4 6 2 1 0 1 1 3 1 1 1 3 5 3 3 2 2 4 3 1 4 2 4 4 5 2 2 2 4 2 3 2 2 2
+6 5 3 4 7 8 7 6 3 3 2 3 4 5 4 7 5 5 5 6 6 5 4 5 3 7 9 5 5 6 6 5 6 6 7 6 6 4 5 3
+4492 4726 3946 4668 4637 5620 4851 4418 2955 3748 3031 3902 3632 4534 3107 3846 3162 4400 3665 4709 3573 4874 3650 5285 3748 5131 4331 5751 4691 6206 4776 6080 4972 5784 4835 5867 4389 5211 3728 3922
+4 3 1 1 5 6 5 4 1 1 0 1 2 3 2 6 3 3 3 4 5 2 2 2 1 5 10 2 3 3 4 2 4 3 5 3 4 1 3 1
+5 4 3 3 4 5 5 6 4 3 3 3 4 4 6 8 4 6 5 5 4 4 6 8 8 6 5 7 6 6 6 5 5 8 5 6 5 5 7 4
+4507 4688 3924 4572 4623 5590 4857 4521 2983 3707 3032 3852 3643 4507 3182 4106 3185 4504 3701 4733 3586 4827 3712 5459 3858 5191 4350 5836 4727 6202 4810 6022 4975 5928 4842 5883 4407 5205 3813 3932
+3 1 1 1 1 2 2 4 2 1 1 1 2 1 5 8 2 4 3 3 2 1 4 7 8 4 3 5 4 3 4 2 2 6 2 3 3 2 6 2
+6 6 4 4 7 8 6 8 8 5 7 3 3 4 5 6 5 8 6 5 6 6 5 7 8 7 5 5 7 10 7 5 7 7 5 6 5 6 8 6
+4547 4775 3928 4543 4686 5746 4889 4741 3113 3791 3135 3805 3628 4482 3230 4228 3233 4725 3761 4756 3649 4906 3747 5561 3966 5309 4369 5793 4787 6444 4868 5967 5029 6002 4848 5898 4424 5261 3922 4064
+4 4 2 1 5 6 4 7 9 3 7 1 1 1 3 4 3 7 4 3 5 4 3 5 8 5 3 2 5 9 5 2 5 5 2 3 3 4 8 4
+6 6 4 6 5 3 2 4 6 5 4 4 2 4 5 4 7 7 7 4 5 6 6 6 5 8 6 5 6 6 7 8 5 9 7 8 6 6 5 5
+4586 4857 3932 4639 4696 5585 4818 4702 3188 3870 3158 3822 3588 4458 3277 4220 3331 4871 3846 4716 3685 4980 3806 5595 3994 5481 4413 5752 4820 6426 4925 6100 5031 6194 4905 6035 4466 5313 3951 4127
+4 4 2 4 3 0 0 1 5 3 2 2 0 1 3 2 7 5 6 1 3 4 4 3 3 7 4 2 4 3 5 6 2 8 5 6 4 4 3 3
+6 5 5 3 3 4 5 7 4 3 2 4 5 4 5 4 2 3 5 6 7 8 4 9 5 5 7 9 6 6 6 6 7 6 7 8 7 6 7 6
+4624 4872 3961 4544 4655 5495 4825 4849 3210 3822 3130 3838 3626 4436 3323 4212 3298 4763 3877 4801 3771 5172 3813 5812 4022 5459 4481 5959 4853 6409 4955 6102 5084 6191 4961 6164 4533 5362 4031 4248
+4 2 3 1 1 1 2 5 2 1 0 2 3 2 3 2 0 1 3 4 6 7 2 8 3 2 6 8 4 3 4 3 5 3 5 6 6 4 6 4
+4 6 4 3 3 5 4 6 5 4 5 6 7 7 7 7 4 5 5 5 7 8 7 7 5 9 10 9 7 7 8 7 6 7 4 7 8 7 6 5
+4610 4948 3964 4455 4615 5472 4806 4926 3257 3838 3179 3976 3714 4599 3418 4389 3317 4784 3908 4820 3855 5353 3896 5893 4049 5684 4624 6154 4910 6454 5035 6165 5110 6249 4939 6224 4624 5470 4083 4300
+1 4 2 1 1 2 1 4 3 2 3 4 6 6 7 6 2 2 3 2 6 7 6 5 3 8 12 8 5 4 7 4 4 4 1 4 7 5 4 3
+7 8 3 4 5 5 5 4 4 3 4 3 4 3 7 4 5 7 5 3 6 5 5 7 10 9 11 9 8 8 10 7 5 6 7 8 9 9 7 3
+4673 5142 3941 4433 4627 5450 4813 4876 3277 3792 3201 3921 3723 4507 3511 4371 3362 4927 3938 4715 3912 5339 3926 5969 4203 5895 4789 6337 4991 6558 5164 6225 5110 6242 4994 6342 4738 5694 4160 4226
+5 7 1 2 3 2 2 1 2 1 2 1 2 1 6 2 3 5 3 1 4 2 3 5 12 8 14 8 7 6 11 4 2 3 5 6 9 8 6 1
+3 6 4 7 7 6 4 3 8 4 3 3 5 3 8 6 4 6 11 7 5 5 7 8 9 6 9 11 11 8 6 6 8 7 4 6 7 8 7 5
+4633 5202 3944 4597 4690 5491 4795 4767 3399 3810 3197 3870 3757 4420 3627 4477 3380 5000 4120 4862 3942 5325 4006 6102 4328 5909 4899 6632 5147 6656 5188 6220 5186 6297 4971 6330 4798 5843 4235 4279
+1 4 2 6 5 3 1 1 9 2 1 1 3 1 8 4 2 4 15 5 3 2 6 6 10 3 9 11 13 6 4 3 7 4 1 3 5 6 6 3
+5 3 5 6 8 4 4 6 8 6 6 4 5 5 5 6 3 4 4 4 3 5 5 6 10 9 5 8 9 9 7 6 6 7 7 6 6 6 5 6
+4645 5074 3973 4689 4777 5407 4777 4849 3518 3950 3270 3883 3791 4461 3664 4577 3372 4945 4119 4816 3920 5312 4033 6104 4475 6107 4904 6725 5248 6809 5237 6215 5209 6349 5025 6318 4831 5861 4257 4390
+3 1 3 4 7 1 1 4 9 4 5 2 3 3 3 4 1 1 2 1 1 2 3 3 12 8 2 6 9 7 5 3 4 4 5 3 4 3 3 4
+12 8 9 5 9 4 7 8 9 5 3 2 4 7 7 6 6 9 3 4 7 7 11 6 11 7 5 8 4 8 8 9 5 6 9 6 8 7 6 6
+4835 5261 4103 4714 4887 5328 4836 5049 3660 4020 3265 3772 3798 4623 3751 4671 3441 5201 4092 4772 4001 5423 4213 6106 4644 6170 4909 6813 5219 6891 5310 6395 5206 6336 5129 6307 4914 5939 4304 4495
+17 7 10 3 9 1 5 7 11 3 1 0 2 5 6 4 5 9 1 1 6 5 15 3 14 4 2 6 1 5 7 7 2 3 9 3 7 5 4 4
+8 6 4 7 6 4 10 7 4 2 3 3 5 5 3 8 7 7 6 8 9 8 5 4 7 8 7 8 9 9 7 9 6 11 8 6 7 10 9 7
+4918 5313 4102 4861 4918 5254 4970 5176 3670 3901 3260 3730 3831 4652 3734 4882 3534 5319 4143 4977 4131 5589 4235 5985 4707 6291 4965 6895 5318 7030 5356 6564 5229 6631 5205 6297 4970 6197 4426 4655
+7 4 2 5 4 1 11 5 2 0 1 1 3 3 1 7 6 5 4 7 10 6 3 1 5 6 5 5 9 7 5 7 4 11 7 3 5 10 10 5
+6 10 7 8 8 5 8 7 3 4 3 4 5 4 8 9 6 4 5 5 7 10 8 8 7 6 6 8 10 8 7 7 7 9 6 9 8 6 9 5
+4948 5608 4178 5060 4999 5245 5050 5295 3655 3912 3255 3751 3863 4618 3845 5142 3599 5245 4167 4985 4206 5868 4333 6117 4768 6282 4994 6972 5440 7099 5401 6600 5277 6786 5228 6472 5050 6193 4545 4682
+4 10 6 7 7 2 7 5 1 2 1 2 3 1 8 9 5 1 3 2 6 10 8 6 5 3 4 5 11 5 5 4 5 7 4 7 7 3 10 3
+4 5 4 5 6 7 6 5 7 6 3 6 13 5 8 10 5 5 4 6 6 5 9 9 9 8 7 6 8 9 6 8 7 8 9 9 12 6 7 5
+4926 5578 4175 5063 5027 5360 5077 5284 3742 4045 3250 3894 4098 4648 3953 5447 3636 5237 4165 5054 4254 5823 4454 6302 4879 6396 5048 6922 5508 7226 5419 6695 5324 6870 5327 6636 5230 6190 4610 4708
+1 2 2 2 4 5 4 2 6 4 1 4 22 3 8 11 3 2 2 4 4 2 10 8 9 6 5 3 7 7 3 6 5 5 9 7 16 3 6 3
+10 10 11 5 5 6 8 7 6 3 5 6 8 7 3 2 4 7 6 7 6 5 5 11 9 7 10 7 9 10 8 9 7 11 11 10 12 7 8 6
+5058 5857 4351 5066 5029 5407 5154 5397 3801 3986 3296 4028 4200 4799 3931 5243 3647 5352 4214 5180 4301 5780 4470 6599 4987 6442 5177 6936 5600 7406 5488 6846 5370 7133 5475 6852 5406 6248 4699 4794
+11 10 15 2 2 3 7 5 4 1 3 4 8 5 1 0 2 5 4 5 4 2 3 11 9 4 11 4 8 8 7 7 5 10 13 9 15 4 7 4
+11 8 7 3 4 4 8 8 5 3 4 6 6 3 4 6 9 9 5 11 7 8 5 8 13 10 7 9 7 7 7 6 8 11 10 8 8 10 8 6
+5212 5997 4421 4946 5005 5328 5229 5564 3833 3931 3315 4154 4248 4695 3935 5297 3786 5583 4236 5545 4372 5924 4486 6694 5194 6669 5226 7072 5639 7391 5529 6803 5440 7380 5593 6932 5475 6487 4786 4874
+13 6 6 1 1 1 7 6 3 1 2 4 4 1 2 4 11 8 3 13 6 6 3 6 19 9 5 7 5 4 5 3 7 10 10 5 7 9 7 4
+4 5 4 6 8 5 10 6 2 4 4 3 5 5 8 6 5 6 7 10 5 9 8 9 7 8 8 16 12 9 8 7 5 8 11 11 9 8 8 10
+5184 5944 4412 5017 5084 5315 5354 5598 3788 3940 3334 4089 4269 4720 4041 5347 3819 5616 4309 5826 4390 6121 4578 6845 5243 6760 5300 7630 5804 7500 5595 6824 5432 7428 5734 7191 5568 6589 4871 5195
+1 2 2 4 7 2 11 3 0 2 2 1 3 3 8 4 3 3 6 10 3 8 7 7 5 6 7 22 15 7 6 4 2 5 12 10 8 6 7 11
+5 7 8 6 4 2 3 4 4 6 7 6 3 5 6 4 7 5 6 11 9 9 5 9 6 9 7 9 9 9 10 13 9 12 9 7 6 7 7 8
+5182 6017 4506 5084 5059 5118 5297 5507 3795 4072 3429 4212 4239 4743 4093 5271 3902 5586 4354 6152 4510 6306 4591 6987 5265 6907 5346 7725 5889 7602 5710 7213 5526 7719 5820 7189 5582 6623 4928 5374
+2 5 7 4 1 0 1 1 2 4 7 4 1 3 4 1 6 2 4 12 10 8 3 7 4 7 5 6 8 6 10 15 8 12 8 4 3 4 5 7
+3 4 5 5 3 6 6 5 9 9 8 11 3 2 3 5 6 9 7 5 10 11 6 6 11 8 7 9 9 8 10 10 5 10 8 8 7 6 7 5
+5129 5901 4521 5086 5009 5179 5318 5483 3930 4380 3547 4635 4209 4581 4067 5261 3957 5803 4424 6090 4653 6603 4629 6936 5414 6984 5391 7814 5972 7637 5823 7394 5515 7870 5879 7249 5621 6594 4983 5358
+1 1 3 2 1 4 4 2 10 10 9 14 1 0 1 2 4 8 6 2 12 11 4 3 13 5 5 6 8 5 10 8 2 8 6 5 5 3 5 2
+2 2 3 7 9 8 5 3 4 4 5 3 3 5 6 6 3 8 7 6 6 7 7 9 9 8 7 10 8 10 11 10 8 7 8 11 11 6 6 5
+5052 5669 4484 5210 5114 5359 5313 5338 3934 4362 3586 4541 4180 4613 4118 5313 3934 5946 4492 6093 4690 6636 4692 7072 5508 7056 5435 7959 6027 7793 5958 7564 5581 7827 5936 7489 5761 6567 5011 5343
+0 0 1 5 9 7 2 1 2 2 3 1 1 3 4 4 1 6 6 3 4 4 5 7 8 5 5 8 6 8 12 8 6 4 6 10 12 3 4 2
+5 5 8 5 7 7 6 5 3 3 5 7 7 6 8 5 4 4 7 7 5 6 6 8 8 6 8 10 9 11 9 10 8 10 10 9 12 7 6 5
+5053 5636 4576 5204 5165 5467 5333 5324 3912 4284 3624 4698 4254 4704 4219 5301 3938 5835 4558 6157 4700 6606 4728 7139 5575 7001 5503 8095 6106 8001 6039 7724 5646 7971 6043 7592 5923 6603 5039 5329
+2 2 7 2 5 5 4 2 1 1 3 5 6 4 8 2 2 1 6 4 3 3 4 5 6 3 7 8 8 10 8 8 6 8 10 6 15 4 4 2
+6 9 6 3 10 12 13 7 4 4 3 4 7 5 4 3 4 7 9 8 9 6 6 8 10 10 12 10 9 7 8 9 9 8 7 6 7 7 7 7
+5080 5850 4615 5076 5291 5876 5532 5434 3916 4272 3610 4661 4326 4728 4215 5167 3941 5914 4674 6279 4812 6578 4763 7202 5691 7195 5672 8223 6183 7951 6092 7813 5735 7984 6071 7505 5954 6636 5092 5439
+4 8 4 1 11 15 18 5 2 2 1 1 6 3 2 1 2 5 9 6 9 3 4 5 10 9 15 8 8 4 6 6 8 5 5 3 5 4 5 5
+4 4 6 6 6 7 6 6 8 4 5 8 3 9 7 9 6 7 8 9 11 9 8 10 8 7 7 8 9 8 9 8 9 6 7 9 7 9 8 6
+5055 5744 4653 5140 5312 5953 5547 5476 4022 4261 3647 4872 4294 4997 4288 5409 3995 5989 4761 6455 4973 6736 4848 7384 5753 7193 5709 8221 6258 7965 6169 7835 5821 7873 6098 7607 5984 6790 5169 5481
+1 1 4 4 4 5 3 3 8 2 3 7 1 9 6 8 4 5 7 7 14 7 7 8 6 4 5 5 8 5 8 5 8 3 5 6 5 7 7 3
+6 4 4 5 3 3 7 6 8 9 9 6 4 7 4 4 6 6 8 10 6 6 9 10 9 6 8 6 6 9 6 9 10 7 7 8 6 9 8 6
+5082 5645 4639 5138 5256 5780 5587 5516 4126 4558 3786 4948 4289 5127 4283 5330 4048 5998 4846 6682 5002 6700 4957 7555 5839 7130 5770 8096 6255 8040 6168 7917 5931 7830 6124 7642 5987 6935 5244 5520
+4 1 1 2 1 0 5 3 8 9 11 4 2 5 2 1 4 3 7 9 4 3 9 8 8 3 6 2 3 6 3 6 10 4 4 5 3 7 7 3
+8 8 12 10 5 2 10 7 6 7 5 4 4 5 3 5 4 9 8 9 7 9 8 8 8 10 7 8 11 16 12 10 14 12 8 11 9 8 8 8
+5159 5797 4829 5444 5252 5556 5703 5615 4176 4714 3819 4896 4284 5126 4252 5317 4049 6191 4929 6834 5056 6850 5037 7593 5897 7316 5804 8101 6380 8540 6320 8056 6140 8097 6175 7859 6067 7010 5317 5680
+7 6 17 11 2 0 10 5 4 5 3 1 2 2 1 2 2 8 7 7 5 7 7 5 6 8 5 5 11 20 14 8 19 11 6 10 8 5 7 6
+2 8 9 6 6 6 6 5 8 7 8 6 5 4 4 6 5 7 6 9 10 6 8 9 8 7 7 9 15 20 14 8 11 13 13 14 8 9 6 7
+5081 5940 4938 5485 5274 5591 5714 5585 4276 4861 3928 4970 4304 5064 4248 5366 4075 6249 4959 6976 5185 6807 5115 7690 5954 7307 5838 8167 6604 9256 6520 8064 6267 8409 6353 8247 6120 7142 5337 5769
+0 6 9 3 4 3 3 2 8 5 8 4 3 1 2 4 3 4 4 7 11 3 7 6 6 4 5 6 21 29 19 5 12 13 16 15 6 7 4 5
+7 8 5 5 5 4 3 5 7 12 9 5 6 6 7 7 6 4 7 6 5 5 5 12 13 5 8 12 10 9 8 7 10 8 9 10 11 8 8 6
+5133 6075 4942 5463 5270 5501 5648 5557 4348 5306 4060 4978 4349 5128 4320 5474 4126 6119 5014 6926 5183 6705 5115 7965 6137 7175 5896 8414 6694 9253 6561 8010 6366 8395 6424 8366 6248 7204 5408 5791
+5 6 2 2 2 1 0 2 6 16 10 2 4 4 6 5 4 1 5 3 2 2 2 11 17 2 6 11 9 5 6 4 9 5 7 7 12 5 7 3
+10 17 15 10 9 5 10 8 8 9 5 4 5 10 8 6 10 5 9 16 8 9 10 11 7 9 11 13 12 10 10 7 9 7 7 8 10 8 7 6
+5260 6754 5202 5749 5368 5478 5762 5715 4443 5540 4086 4925 4368 5434 4416 5514 4278 6059 5118 7493 5258 6855 5242 8162 6162 7297 6030 8707 6833 9312 6652 7959 6437 8321 6442 8355 6347 7263 5451 5812
+11 27 25 10 9 2 10 6 8 8 3 1 3 11 8 3 12 2 9 22 7 7 11 9 4 7 12 13 13 7 9 4 7 3 4 5 9 5 5 3
+11 14 12 7 9 10 10 9 9 5 7 7 12 10 10 6 4 5 9 7 7 7 9 8 7 8 8 10 14 11 11 9 7 12 10 9 8 8 8 10
+5409 7208 5378 5834 5464 5763 5873 5925 4562 5514 4162 5059 4565 5722 4561 5551 4273 6002 5220 7473 5305 6873 5341 8163 6187 7350 6083 8798 7020 9429 6767 8034 6455 8559 6536 8406 6393 7318 5519 6077
+13 17 16 5 8 10 10 8 9 2 6 5 17 10 12 3 2 2 9 4 5 4 9 5 4 5 6 7 17 8 11 6 4 11 9 6 6 5 7 10
+8 12 16 17 9 9 5 8 10 9 6 4 9 7 9 10 5 7 8 7 6 8 9 12 13 9 10 11 11 8 8 11 10 10 10 8 7 8 9 8
+5478 7512 5652 6528 5557 5970 5854 6061 4703 5736 4211 5001 4681 5808 4677 5832 4294 6071 5294 7454 5325 6952 5437 8410 6364 7461 6186 8945 7125 9354 6802 8227 6549 8659 6628 8393 6412 7370 5611 6203
+7 12 27 28 8 8 2 6 12 8 4 1 9 5 9 10 3 5 7 4 4 5 8 11 16 7 10 9 10 4 6 9 9 7 9 5 4 5 8 6
+11 12 7 8 9 2 2 7 8 8 9 6 13 11 5 7 9 9 6 8 6 5 10 16 11 9 9 13 13 7 10 11 9 10 10 7 6 6 5 6
+5622 7798 5689 6627 5648 5734 5758 6127 4790 5883 4335 5069 4896 6135 4688 5912 4416 6259 5315 7498 5345 6842 5556 8888 6486 7566 6261 9207 7279 9222 6887 8409 6615 8753 6718 8319 6405 7296 5598 6199
+13 12 5 6 8 0 0 4 7 6 10 4 19 12 3 5 10 8 4 5 4 2 10 19 11 7 8 12 15 3 9 9 7 7 9 3 3 3 2 3
+6 10 10 11 6 6 5 6 7 8 11 3 5 7 3 6 7 8 8 9 8 8 10 9 7 11 9 9 10 10 11 7 10 11 10 7 7 10 9 7
+5635 7944 5802 6905 5660 5758 5742 6128 4849 6021 4507 4949 4901 6196 4647 5925 4484 6374 5386 7601 5416 6922 5672 8907 6503 7787 6334 9207 7352 9283 6996 8334 6705 8903 6805 8249 6424 7472 5688 6257
+3 8 10 11 3 3 2 3 5 6 15 1 2 4 1 3 6 6 7 6 7 5 10 6 4 10 8 5 8 7 11 3 9 9 9 3 4 8 8 4
+6 11 6 8 6 5 4 9 10 5 5 7 5 5 6 6 9 8 8 10 9 7 7 6 8 8 7 8 6 7 7 9 8 7 9 10 10 10 8 7
+5647 8143 5810 6982 5672 5719 5700 6313 4983 5966 4522 5082 4906 6131 4684 5938 4602 6483 5456 7759 5510 6936 5709 8741 6545 7811 6354 9146 7321 9156 7000 8386 6742 8798 6865 8368 6519 7638 5750 6311
+3 9 3 5 3 2 1 8 11 2 3 5 2 2 4 3 9 6 7 8 8 4 5 2 6 5 4 4 3 3 4 6 6 3 7 7 9 8 6 4
+7 16 18 12 7 11 13 9 6 3 4 5 2 8 6 4 10 11 11 9 11 8 5 11 9 8 8 10 10 9 7 10 11 9 9 11 11 9 7 5
+5684 8637 6125 7300 5709 6051 5890 6487 5011 5792 4511 5084 4834 6254 4720 5827 4742 6769 5600 7846 5653 7011 5694 8892 6611 7833 6399 9211 7393 9159 7004 8497 6854 8823 6923 8541 6637 7732 5785 6239
+5 19 32 12 5 12 17 7 4 0 1 2 0 6 4 1 12 11 13 6 13 5 2 9 7 5 6 7 8 5 4 7 11 6 7 9 11 6 5 2
+7 5 11 13 8 6 8 5 5 5 7 10 6 8 11 10 10 10 10 7 9 11 7 7 8 5 7 8 13 10 8 8 9 9 9 7 9 7 5 4
+5721 8425 6253 7660 5770 6056 5947 6404 5013 5751 4577 5393 4866 6370 4883 6091 4879 6977 5715 7805 5741 7266 5730 8788 6650 7670 6418 9149 7540 9223 7033 8478 6912 8846 6980 8458 6701 7698 5768 6110
+5 1 12 14 6 3 6 2 2 2 6 11 4 6 14 10 11 9 10 4 8 10 5 3 6 2 4 4 14 7 5 5 7 6 7 3 7 4 2 1
+6 10 6 4 4 3 5 7 9 6 6 12 8 3 6 8 5 6 6 5 9 11 10 9 6 4 11 9 14 11 10 9 13 10 9 8 9 8 8 5
+5731 8533 6250 7446 5728 5876 5926 6449 5117 5774 4616 5806 4949 6172 4914 6217 4885 6927 5725 7643 5827 7505 5842 8813 6637 7455 6538 9153 7709 9345 7113 8522 7071 8929 7035 8442 6763 7727 5828 6050
+3 7 3 1 1 0 2 4 9 3 4 15 7 0 4 6 2 3 3 2 8 10 10 6 3 1 11 5 16 8 9 6 15 7 7 5 7 5 6 2
+2 6 8 5 4 4 5 3 5 3 6 10 8 4 6 5 6 7 8 7 9 11 13 9 11 11 13 14 10 6 8 7 9 8 12 11 13 11 7 5
+5639 8389 6298 7306 5687 5769 5905 6246 5117 5611 4654 6072 5029 6047 4944 6151 4916 6941 5786 7614 5911 7730 6028 8837 6752 7683 6707 9463 7772 9152 7139 8440 7124 8884 7166 8611 6926 7939 5861 5994
+0 2 6 2 1 1 2 0 2 0 4 10 7 1 4 2 4 4 6 4 8 10 17 6 11 10 16 14 8 2 5 3 7 4 13 9 15 10 5 2
+6 5 5 7 6 7 5 2 6 7 9 5 6 6 5 6 7 6 7 5 6 10 13 10 11 9 10 16 11 12 12 11 8 8 9 10 10 10 9 6
+5651 8192 6268 7297 5698 5852 5885 5994 5142 5704 4767 6014 5056 6052 4948 6150 4972 6893 5820 7464 5916 7880 6209 8921 6864 7774 6795 9878 7859 9340 7267 8609 7150 8842 7217 8708 7008 8077 5944 6002
+3 2 2 4 3 5 2 0 4 5 9 2 4 3 2 3 5 3 5 2 3 8 17 7 11 6 9 17 10 10 12 9 5 4 7 7 9 8 8 3
+3 1 4 6 5 5 8 9 8 4 8 5 5 7 8 3 5 6 5 5 15 12 7 11 9 7 9 10 10 7 8 7 8 8 11 10 9 8 6 8
+5586 7761 6213 7227 5683 5808 5942 6187 5218 5607 4852 5960 5057 6118 5028 5965 4975 6848 5802 7323 6151 8144 6232 9061 6922 7737 6855 9899 7918 9209 7290 8522 7176 8803 7318 8799 7063 8083 5948 6133
+0 0 1 3 2 2 6 8 7 1 7 2 2 4 7 0 2 3 2 2 22 11 4 9 7 4 7 6 8 3 5 3 5 4 10 7 7 5 3 6
+4 5 5 9 8 13 10 9 8 4 8 11 5 4 7 8 9 8 7 9 9 12 12 7 6 7 8 11 10 7 12 15 11 8 7 9 9 9 11 7
+5548 7602 6185 7346 5745 6258 6049 6368 5292 5516 4935 6278 5058 5996 5081 6098 5080 6928 5836 7436 6227 8392 6383 8947 6902 7702 6888 9980 7975 9086 7414 8932 7278 8766 7314 8824 7116 8150 6080 6195
+1 2 2 7 6 16 10 8 7 1 7 12 2 1 5 6 9 5 5 7 8 11 14 3 3 4 5 8 8 3 12 16 10 4 4 6 7 6 12 4
+7 16 13 11 6 9 11 9 12 9 9 9 5 4 5 4 7 9 9 8 5 9 7 7 11 6 5 9 9 9 10 11 11 6 8 9 10 9 8 6
+5588 8128 6362 7581 5754 6435 6179 6538 5466 5737 5041 6454 5059 5882 5081 5977 5132 7065 5920 7481 6199 8441 6402 8840 7010 7608 6843 9934 8005 9093 7484 9071 7377 8608 7335 8847 7193 8213 6132 6191
+5 20 16 10 3 7 12 7 15 8 9 7 2 1 2 1 5 7 8 5 2 6 4 3 11 3 2 5 6 6 8 9 10 2 5 6 9 6 6 3
+14 15 11 7 2 6 10 9 4 5 8 9 8 9 4 9 11 9 4 7 7 8 7 9 9 14 14 11 15 8 10 9 11 8 9 8 10 8 9 6
+5806 8561 6484 7556 5661 6417 6280 6698 5431 5699 5119 6619 5137 6082 5056 6171 5285 7194 5874 7462 6223 8426 6421 8862 7065 8011 7030 10013 8188 9038 7552 9079 7474 8583 7381 8807 7269 8211 6208 6188
+20 17 11 4 0 3 9 7 1 2 7 7 7 8 1 8 13 7 1 4 4 5 4 6 7 16 17 8 18 4 8 6 10 5 7 4 8 5 8 3
+3 5 4 5 3 2 4 5 9 10 10 8 10 12 4 8 10 6 3 8 9 10 6 6 9 12 8 8 13 10 6 7 11 11 10 5 7 9 11 8
+5737 8354 6424 7409 5596 6154 6225 6603 5525 5971 5246 6713 5264 6454 5032 6292 5408 7131 5804 7505 6297 8534 6414 8698 7118 8267 7059 9903 8315 9110 7516 8964 7568 8743 7452 8585 7266 8271 6334 6308
+0 1 1 2 0 0 1 2 8 10 11 6 11 14 1 6 11 3 0 5 8 7 3 2 7 11 5 4 13 7 3 3 10 9 8 1 4 6 12 6
+10 13 8 6 9 5 7 5 4 6 6 8 8 7 6 6 8 6 11 7 10 9 16 12 12 11 9 8 11 9 11 8 11 12 8 5 9 11 11 8
+5849 8651 6468 7333 5686 6091 6248 6514 5489 5981 5268 6801 5337 6496 5059 6283 5477 7071 5940 7484 6395 8574 6662 8913 7247 8446 7112 9800 8388 9116 7609 8917 7660 8955 7470 8377 7314 8450 6457 6421
+10 13 6 3 8 2 4 2 1 3 4 6 7 4 4 3 7 3 12 4 9 6 24 10 12 9 7 4 9 6 10 4 10 10 5 1 7 9 11 6
+7 12 6 4 7 5 10 8 9 8 6 4 4 5 9 9 10 6 13 7 5 8 11 13 7 11 10 12 11 13 11 10 10 8 8 10 11 10 10 6
+5881 8869 6459 7138 5722 6032 6347 6614 5582 6113 5289 6638 5306 6413 5162 6458 5595 7015 6124 7465 6363 8551 6776 9176 7245 8615 7190 9949 8459 9367 7700 8996 7724 8909 7488 8488 7412 8557 6551 6404
+5 10 3 1 5 2 9 6 8 6 4 1 1 2 9 7 10 3 17 4 2 5 11 12 4 9 9 9 9 12 10 7 8 4 5 7 10 7 9 3
+8 12 11 6 4 7 7 3 7 7 8 7 6 4 7 5 8 5 7 7 8 8 6 9 9 8 13 11 9 11 12 14 13 9 9 9 10 11 10 6
+5938 9074 6578 7078 5681 6100 6367 6401 5621 6176 5361 6669 5326 6273 5212 6377 5659 6901 6150 7447 6408 8529 6760 9178 7294 8589 7342 10027 8477 9480 7814 9316 7863 8927 7531 8531 7482 8719 6643 6388
+6 10 11 3 1 4 4 0 5 4 7 4 4 1 5 2 6 2 4 4 6 5 3 5 7 5 14 8 6 8 12 14 14 6 7 6 8 9 9 3
+8 11 7 10 11 8 9 7 6 6 6 6 4 7 9 7 8 5 5 7 6 6 7 6 8 9 13 14 13 12 9 11 10 5 9 8 6 8 9 7
+5994 9205 6592 7267 5820 6225 6438 6447 5634 6174 5380 6637 5295 6326 5311 6424 5722 6794 6124 7430 6401 8385 6770 8995 7316 8626 7491 10285 8597 9648 7848 9432 7922 8698 7573 8510 7448 8687 6707 6434
+6 8 4 8 12 6 7 4 3 3 4 3 1 4 9 4 6 2 2 4 3 2 4 2 5 6 14 13 13 10 6 8 8 1 7 5 3 4 7 4
+4 8 5 9 7 3 4 6 7 4 7 7 7 8 4 6 6 5 6 9 11 5 5 7 7 7 9 12 10 7 11 12 11 12 14 12 10 8 9 7
+5946 9144 6555 7383 5853 6035 6379 6428 5672 6049 5424 6668 5341 6437 5280 6407 5732 6693 6124 7537 6522 8189 6728 8885 7312 8538 7534 10405 8638 9499 7933 9603 8005 8913 7741 8736 7517 8657 6769 6478
+1 4 2 7 5 0 1 3 5 1 5 4 5 6 1 3 3 2 3 7 11 2 2 3 4 3 7 9 7 3 10 10 10 10 16 11 8 4 7 4
+4 7 7 13 9 8 7 7 8 7 9 11 7 6 3 5 6 6 6 6 9 5 6 10 8 9 7 9 12 8 8 9 10 10 10 11 10 9 9 8
+5899 9025 6570 7738 5936 6164 6398 6472 5734 6116 5518 6943 5386 6419 5224 6329 5742 6660 6124 7453 6589 8004 6713 8966 7333 8578 7524 10333 8729 9420 7939 9579 8060 8992 7803 8887 7584 8690 6830 6580
+1 3 4 14 8 6 4 4 6 4 8 11 5 3 1 2 3 3 3 3 7 2 3 7 5 6 4 5 11 4 5 5 8 7 8 9 8 6 7 6
+9 8 7 18 11 8 6 7 4 5 12 11 11 6 8 6 9 10 8 8 8 9 9 10 11 9 13 7 9 8 8 13 14 9 8 8 10 8 9 7
+5981 8975 6584 8379 6069 6285 6391 6513 5693 6056 5687 7202 5532 6402 5298 6317 5828 6874 6175 7497 6629 8076 6775 9042 7431 8616 7668 10143 8741 9346 7945 9802 8216 9005 7812 8845 7650 8660 6889 6615
+8 4 4 25 12 6 3 4 1 2 15 10 13 3 7 3 8 9 6 5 6 6 7 7 10 6 14 3 6 4 5 11 15 6 5 4 8 4 7 4
+6 5 4 9 10 8 10 8 4 4 8 12 8 4 8 7 5 7 9 6 7 8 9 11 11 12 11 7 7 7 8 11 11 11 11 10 6 8 7 6
+5985 8743 6521 8429 6173 6399 6487 6613 5653 5938 5749 7507 5598 6263 5370 6368 5810 6891 6250 7415 6642 8082 6835 9175 7526 8836 7757 9964 8701 9215 7951 9889 8292 9140 7898 8928 7612 8631 6895 6586
+3 1 1 6 10 6 9 6 1 1 6 12 6 1 7 4 2 4 8 3 4 5 7 8 10 10 10 3 3 3 5 8 9 8 10 7 3 4 4 3
+8 5 7 12 12 11 7 5 5 5 7 13 12 8 8 6 12 8 5 4 8 10 12 18 11 10 9 9 7 8 10 8 9 7 7 10 8 8 9 6
+6040 8525 6537 8660 6325 6690 6504 6523 5639 5888 5784 7855 5765 6378 5440 6354 5971 6969 6221 7215 6680 8211 6971 9730 7619 8920 7793 9919 8662 9153 8008 9787 8315 9021 7879 9006 7626 8604 6952 6559
+6 1 4 11 14 11 4 2 2 2 5 14 15 6 7 3 14 5 2 1 6 8 13 22 10 7 6 5 3 4 8 4 6 3 4 7 5 4 7 3
+5 8 8 12 5 7 4 7 10 7 11 6 10 6 5 5 6 8 7 9 7 13 12 11 6 7 9 8 8 11 12 8 8 9 9 7 9 9 8 6
+6017 8505 6578 8877 6294 6718 6443 6561 5753 5964 5920 7752 5876 6363 5432 6279 5975 7042 6244 7335 6692 8517 7103 9822 7582 8814 7828 9815 8650 9279 8114 9691 8311 9032 7912 8895 7665 8640 6982 6534
+2 5 6 10 2 4 1 4 10 5 12 3 10 3 2 2 3 5 4 7 4 13 13 8 3 3 6 4 4 8 11 4 5 6 6 3 6 6 5 3
+4 8 9 9 6 9 5 3 5 9 7 6 8 6 4 3 6 7 6 7 6 9 9 14 20 10 8 7 7 10 11 6 7 7 9 9 11 9 10 11
+5969 8486 6643 8897 6290 6867 6409 6351 5737 6159 5951 7655 5933 6349 5398 6086 5979 7049 6241 7324 6678 8558 7155 10092 7903 8899 7837 9656 8613 9336 8192 9478 8282 8920 7944 8914 7754 8674 7063 6817
+1 5 7 6 3 7 2 0 2 8 5 3 6 3 1 0 3 4 3 4 3 6 7 13 33 7 5 3 3 7 9 2 3 3 6 6 10 6 9 11
+8 9 8 6 5 4 11 11 5 4 8 10 7 3 3 5 5 7 5 6 9 16 7 8 10 13 12 9 7 10 9 7 10 11 9 8 8 7 9 7
+6024 8529 6681 8731 6260 6700 6530 6645 5721 6035 6006 7810 5963 6152 5339 6028 5957 7056 6213 7253 6741 9027 7155 9978 7961 9163 7948 9629 8576 9390 8217 9339 8330 9060 7975 8870 7764 8583 7116 6838
+6 6 6 2 2 1 11 11 2 1 6 8 5 0 1 2 2 4 2 3 7 19 4 4 8 12 11 5 3 7 6 3 8 9 6 4 5 3 7 4
+10 9 11 9 7 4 6 8 3 4 7 4 7 9 6 5 6 8 7 6 6 6 13 12 9 11 16 16 17 13 9 9 12 13 14 14 11 9 8 6
+6129 8570 6795 8760 6282 6543 6520 6737 5654 5918 6035 7587 5993 6335 5359 5973 5961 7124 6236 7186 6726 8854 7308 10116 7992 9289 8158 10034 8796 9625 8241 9331 8428 9315 8133 9197 7851 8620 7142 6796
+10 6 11 6 4 1 3 6 0 1 5 1 5 8 4 2 3 5 4 3 3 2 15 9 6 8 20 17 22 11 6 5 11 12 16 14 10 6 5 3
+13 11 13 6 6 8 8 6 5 7 10 5 6 6 7 10 4 6 8 8 8 16 14 11 12 8 7 13 11 9 9 10 11 11 11 9 10 8 7 8
+6308 8731 6957 8603 6278 6641 6561 6701 5640 5992 6139 7438 5996 6323 5404 6229 5914 7065 6284 7246 6762 9305 7483 10184 8099 9223 8133 10230 8857 9600 8265 9385 8498 9431 8211 9198 7910 8594 7142 6879
+16 9 15 2 3 6 6 3 2 5 10 2 3 3 5 10 1 3 6 5 6 18 17 8 11 4 4 11 9 5 6 7 9 8 9 5 8 4 4 5
+13 19 19 11 11 5 7 5 9 11 9 7 7 6 5 7 6 9 6 6 7 15 10 11 12 12 10 12 13 9 10 10 8 10 8 8 11 9 8 9
+6482 9374 7269 8762 6402 6549 6576 6606 5729 6308 6215 7421 6025 6312 5396 6285 5919 7194 6280 7179 6772 9668 7551 10248 8203 9406 8185 10353 8968 9576 8314 9436 8490 9479 8210 9137 7993 8631 7168 7019
+16 26 32 9 11 2 4 2 8 12 8 4 5 3 2 4 3 7 3 3 4 15 8 8 11 10 8 9 12 5 8 7 5 7 5 4 10 6 5 7
+4 4 6 8 7 4 9 8 7 6 5 6 8 9 6 6 8 7 7 7 11 5 5 18 16 10 11 15 15 7 9 10 11 13 11 7 6 6 6 6
+6422 9057 7240 8727 6421 6401 6641 6701 5764 6298 6187 7344 6079 6486 5414 6276 5975 7192 6302 7178 6884 9395 7490 10739 8407 9456 8261 10653 9127 9431 8336 9484 8559 9708 8286 9018 7946 8481 7142 6966
+1 1 3 4 4 1 7 6 5 3 2 3 6 7 3 3 6 4 4 4 11 1 2 20 20 7 9 14 16 3 6 7 9 11 9 3 2 2 3 3
+2 5 6 11 9 5 9 9 7 7 4 9 9 7 5 6 7 8 13 13 11 12 13 14 9 10 10 7 8 8 11 10 13 8 9 10 12 11 6 4
+6312 8820 7212 8879 6490 6324 6705 6851 5799 6350 6134 7456 6157 6526 5406 6268 6004 7251 6476 7546 6993 9568 7635 10954 8427 9503 8310 10443 9103 9356 8409 9529 8677 9617 8309 9091 8054 8647 7117 6793
+0 1 3 9 7 2 7 7 5 4 1 7 8 4 2 3 5 5 16 14 11 10 14 12 6 7 8 3 4 4 9 7 13 4 6 7 11 9 3 1
+10 6 7 9 9 8 10 6 7 12 9 10 14 7 7 6 5 9 13 14 13 13 14 12 8 8 10 9 9 9 10 7 12 13 13 13 9 9 6 5
+6410 8659 7210 8899 6558 6436 6793 6808 5833 6706 6210 7623 6361 6564 5449 6260 5981 7368 6646 7953 7150 9792 7802 11034 8421 9424 8358 10369 9105 9347 8454 9387 8767 9838 8433 9344 8083 8681 7092 6692
+9 2 4 6 7 6 9 3 5 13 8 8 19 4 5 3 2 7 16 16 15 11 16 8 5 4 7 5 6 5 7 3 11 11 13 12 6 6 3 2
+8 6 6 15 15 7 8 5 4 11 9 12 14 6 6 6 5 6 8 9 14 10 11 11 13 9 8 8 8 11 8 6 9 9 10 12 9 9 9 7
+6454 8508 7183 9286 6777 6479 6827 6706 5789 6979 6285 7902 6560 6538 5466 6253 5959 7294 6684 8028 7329 9818 7888 11047 8543 9411 8353 10238 9082 9462 8447 9192 8778 9800 8478 9520 8111 8713 7145 6720
+6 2 3 16 21 4 6 2 1 11 8 12 18 3 3 3 2 3 6 6 17 6 10 7 13 5 5 4 4 8 5 2 6 5 7 10 6 6 7 4
+6 5 5 9 5 8 6 9 7 10 13 10 14 8 6 5 6 11 8 10 9 10 10 12 9 11 9 9 10 8 10 8 11 11 9 5 7 9 10 10
+6446 8304 7131 9281 6735 6581 6809 6856 5823 7174 6460 8042 6754 6637 5482 6185 5963 7532 6721 8160 7376 9843 7946 11121 8559 9522 8374 10176 9110 9385 8491 9132 8840 9887 8496 9256 8087 8743 7222 6931
+3 2 2 5 2 6 3 7 5 9 16 8 18 6 3 2 3 10 6 8 7 6 8 8 6 8 6 5 7 4 7 4 9 8 6 1 4 6 8 9
+3 5 5 7 7 6 5 7 9 8 11 8 7 9 9 4 5 6 7 12 5 9 9 11 18 9 9 9 8 13 13 14 12 10 7 8 10 9 7 9
+6361 8112 7080 9154 6745 6554 6766 6874 5907 7235 6579 8050 6764 6791 5575 6059 5941 7448 6732 8407 7319 9805 7977 11129 8805 9503 8395 10118 9087 9620 8611 9444 8926 9908 8462 9192 8140 8771 7220 7068
+0 2 2 3 4 3 2 4 8 5 11 5 4 7 8 1 2 3 4 11 2 5 6 7 24 5 6 5 4 11 13 14 10 6 3 4 8 6 4 7
+3 7 9 9 13 9 9 6 7 4 7 9 5 9 8 7 9 8 9 11 5 6 10 8 14 10 13 13 9 11 12 9 9 9 9 8 9 7 8 6
+6278 8055 7133 9157 6908 6713 6827 6830 5938 7046 6593 8119 6722 6936 5640 6125 6022 7492 6794 8578 7264 9585 8033 10952 8943 9547 8517 10309 9090 9718 8702 9430 8933 9866 8480 9132 8166 8674 7244 7012
+0 4 7 5 15 7 7 3 5 1 4 6 2 7 6 4 8 5 7 9 2 2 8 3 14 7 13 11 6 8 11 5 6 5 6 4 6 3 5 3
+3 7 4 9 11 10 5 3 5 5 4 6 5 4 5 9 9 6 5 4 6 6 7 7 12 11 13 14 12 10 11 10 7 7 11 12 9 8 10 7
+6197 8001 7057 9160 7016 6924 6784 6604 5917 6930 6530 8000 6682 6765 5627 6310 6101 7411 6752 8309 7236 9378 8011 10724 9026 9650 8636 10550 9169 9749 8765 9478 8888 9704 8549 9321 8192 8645 7318 7021
+0 4 1 5 11 9 2 0 2 2 1 2 2 1 2 8 8 3 2 1 3 2 4 3 10 8 13 12 10 6 9 7 3 3 9 10 6 4 8 4
+10 10 10 7 8 5 9 8 7 5 6 5 6 5 3 5 10 4 6 10 11 8 8 10 14 11 10 9 10 14 10 7 6 10 11 11 7 9 8 10
+6297 8135 7136 9040 7045 6815 6844 6699 5948 6821 6520 7827 6668 6666 5563 6238 6204 7212 6736 8424 7336 9306 8015 10694 9158 9746 8676 10469 9195 10024 8801 9339 8819 9736 8616 9437 8166 8679 7339 7214
+9 8 9 3 5 2 7 6 5 2 3 2 3 2 0 2 10 1 3 7 10 4 5 6 14 8 7 5 7 13 7 3 2 6 9 8 3 6 5 8
+5 9 10 8 5 5 10 10 6 6 4 5 5 6 5 5 7 7 9 9 10 8 7 10 6 9 11 11 11 11 10 9 7 9 11 12 8 10 11 11
+6267 8199 7213 8989 6996 6713 6928 6911 5952 6780 6459 7664 6629 6634 5551 6170 6228 7209 6797 8471 7408 9239 7993 10666 9082 9714 8740 10516 9246 10098 8836 9331 8777 9704 8682 9608 8166 8772 7436 7456
+2 6 8 4 2 2 9 9 3 3 1 2 2 3 2 2 4 4 7 6 8 4 4 6 2 5 9 7 8 8 7 5 3 5 9 10 5 7 10 10
+4 6 10 8 6 6 7 7 9 4 4 6 8 7 5 8 9 7 12 11 7 9 13 12 8 8 11 10 8 8 9 13 15 11 10 13 9 11 9 8
+6212 8075 7288 8941 6974 6678 6933 6926 6033 6618 6400 7572 6668 6666 5540 6291 6302 7206 6934 8638 7402 9237 8125 10763 9059 9622 8803 10499 9219 9983 8845 9569 8941 9797 8720 9830 8192 8921 7480 7500
+1 2 8 4 3 3 4 4 8 1 1 3 6 4 2 6 8 4 13 9 4 5 13 9 4 4 9 6 4 4 6 12 16 8 7 11 6 9 7 5
+6 2 5 4 8 9 6 5 7 6 10 13 14 11 6 6 8 9 12 12 10 9 12 14 6 11 19 9 7 8 11 11 11 14 10 11 10 9 12 9
+6210 7713 7233 8650 7004 6830 6913 6817 6061 6589 6495 7916 6859 6941 5555 6282 6349 7326 7067 8856 7472 9235 8228 10977 8986 9720 9068 10422 9167 9875 8905 9670 8998 10069 8757 9916 8243 8938 7600 7602
+3 0 2 1 5 7 3 2 5 3 9 14 18 11 3 3 6 7 13 10 8 5 11 12 2 8 26 5 3 4 9 8 9 13 7 8 8 6 12 6
+4 4 3 5 7 12 13 7 3 8 9 8 11 7 6 6 4 5 7 8 7 6 5 8 8 12 15 10 5 11 12 11 8 8 8 12 10 8 7 7
+6157 7495 7129 8438 7008 7157 7072 6838 5986 6685 6562 7932 6968 6954 5569 6273 6292 7193 7069 8816 7464 9049 8150 10809 8966 9874 9225 10411 9065 9958 8989 9765 8977 9956 8742 10058 8292 8893 7589 7575
+1 1 0 1 4 13 15 4 0 6 7 5 11 4 3 3 1 2 4 4 4 2 2 4 4 9 16 6 1 8 10 8 4 4 4 9 8 4 4 4
+5 5 9 6 4 12 12 5 5 5 5 6 8 6 5 4 8 6 5 5 6 8 7 9 7 7 10 10 9 11 12 11 11 9 10 10 9 7 8 6
+6131 7352 7181 8300 6935 7464 7202 6734 5964 6591 6526 7824 6998 6905 5557 6142 6339 7130 7020 8594 7431 8997 8125 10713 8921 9711 9250 10400 9068 10036 9071 9854 9034 9911 8779 10068 8315 8789 7604 7489
+2 2 7 2 1 12 12 2 2 2 2 3 5 3 2 1 6 3 2 1 3 4 4 5 3 3 7 6 6 8 10 8 9 5 7 6 6 3 5 3
+8 6 15 14 5 6 8 5 4 6 10 8 9 12 9 4 7 9 9 11 11 10 11 10 8 7 9 11 8 8 10 9 9 9 10 11 11 9 7 5
+6182 7279 7385 8662 6889 7384 7226 6637 5917 6564 6618 7846 7053 7227 5648 6019 6359 7255 7074 8754 7526 9071 8203 10684 8902 9558 9249 10451 9046 9925 9100 9815 9038 9869 8815 10139 8388 8814 7593 7346
+6 3 19 15 2 3 5 2 1 3 9 5 7 12 8 1 4 7 7 9 10 7 9 6 4 3 5 7 4 4 7 5 6 5 7 8 9 6 4 2
+12 10 5 4 7 10 6 3 3 8 8 6 6 11 7 6 9 6 9 9 7 7 9 7 9 8 10 8 9 8 8 8 10 7 12 9 9 8 9 8
+6334 7456 7328 8388 6895 7555 7198 6423 5845 6661 6657 7743 7030 7469 5685 6026 6430 7188 7127 8781 7517 8956 8228 10473 8909 9476 9273 10315 9050 9821 9077 9717 9068 9706 8901 10083 8408 8776 7633 7396
+14 8 2 1 4 8 3 0 0 6 6 3 3 10 5 3 7 3 7 6 4 3 6 3 6 4 7 4 6 4 4 4 7 3 10 5 6 4 6 5
+5 6 9 7 7 6 5 5 2 5 9 6 6 4 6 8 8 6 5 5 9 10 12 11 11 16 10 10 11 3 5 6 10 6 10 10 12 10 9 7
+6303 7377 7375 8314 6901 7470 7146 6344 5750 6568 6720 7647 7007 7266 5696 6155 6473 7125 7076 8561 7559 9033 8329 10520 8967 9890 9297 10310 9105 9416 8978 9502 9097 9492 8934 10092 8504 8863 7672 7382
+2 3 7 3 4 3 2 2 0 2 7 3 3 1 3 6 6 3 2 1 7 7 11 7 9 17 7 6 8 0 1 2 7 2 7 6 11 7 6 4
+13 7 8 12 14 6 5 7 3 4 6 8 10 9 6 5 6 6 9 12 14 8 10 9 6 10 8 11 11 7 8 10 10 9 9 7 12 9 6 6
+6477 7364 7395 8552 7086 7390 7095 6393 5683 6419 6705 7679 7087 7382 5707 6092 6464 7066 7129 8784 7728 8982 8376 10441 8896 9910 9269 10367 9158 9281 8958 9546 9125 9475 8941 9916 8598 8884 7633 7307
+16 4 5 11 17 3 2 4 0 1 3 5 9 7 3 2 3 3 7 11 16 4 7 5 2 6 4 7 8 3 4 7 7 5 6 3 11 6 3 3
+17 11 6 15 17 5 6 8 3 4 7 8 12 7 8 7 7 8 13 9 9 7 7 13 12 10 8 8 10 9 8 9 9 6 7 9 12 7 7 4
+6749 7597 7363 8960 7343 7253 7071 6500 5617 6279 6716 7709 7216 7369 5769 6156 6481 7133 7283 8809 7765 8873 8345 10613 8980 9929 9242 10236 9185 9277 8938 9526 9127 9275 8896 9874 8690 8781 7621 7114
+27 10 3 16 25 2 3 6 0 1 4 5 12 4 6 4 4 5 15 6 6 3 3 10 10 6 4 4 7 5 4 5 6 2 3 5 11 3 4 1
+5 3 5 10 9 12 7 5 7 7 12 8 10 11 7 6 6 6 7 8 7 10 12 6 14 13 7 7 17 10 8 11 14 11 12 11 9 9 9 8
+6708 7325 7307 9036 7389 7555 7073 6417 5655 6332 6855 7737 7291 7602 5803 6155 6472 7073 7280 8771 7750 8955 8443 10344 9113 10131 9190 10051 9390 9334 8919 9630 9257 9394 8980 9957 8703 8807 7660 7178
+2 0 2 7 7 12 4 2 5 4 13 5 8 10 5 3 3 3 4 4 4 7 11 2 14 11 3 3 20 7 4 8 14 8 10 8 6 6 6 5
+6 8 7 5 4 6 6 9 6 6 5 3 6 8 5 5 6 5 8 10 8 12 10 7 12 10 8 15 13 9 10 14 10 12 12 12 6 7 7 8
+6693 7377 7303 8801 7306 7470 7049 6584 5667 6320 6811 7457 7262 7637 5785 6092 6463 6955 7302 8859 7761 9154 8487 10153 9192 10137 9165 10369 9487 9326 8951 9912 9281 9567 9062 10096 8639 8708 7647 7238
+3 5 4 1 1 3 3 7 3 3 2 0 3 5 2 2 3 2 5 7 5 10 7 3 10 6 4 14 12 5 7 13 7 10 10 9 2 3 4 5
+7 10 12 9 6 4 7 6 3 5 7 5 6 4 7 9 7 10 6 10 9 7 9 9 9 11 12 9 12 8 9 11 8 7 9 15 12 12 9 8
+6704 7548 7427 8825 7276 7267 7051 6557 5602 6248 6819 7316 7234 7424 5819 6279 6480 7152 7273 8941 7797 9034 8505 10096 9192 10204 9242 10299 9556 9257 8957 9993 9253 9423 9065 10411 8730 8922 7686 7295
+4 8 12 6 3 1 4 3 0 2 4 2 3 1 5 8 4 9 3 7 6 3 6 5 5 8 10 5 10 4 6 8 4 3 6 14 11 10 6 5
+4 9 9 9 3 7 9 4 3 5 5 6 3 3 5 11 10 8 6 6 10 8 9 11 14 13 13 11 10 10 11 9 9 11 11 10 11 12 9 7
+6638 7648 7471 8848 7171 7261 7105 6409 5538 6180 6776 7245 7130 7162 5801 6578 6573 7214 7244 8773 7857 8983 8522 10166 9320 10390 9343 10356 9573 9315 9014 9946 9252 9533 9119 10400 8793 9123 7724 7287
+1 6 7 6 0 4 7 1 0 2 2 3 0 0 2 11 9 5 3 2 8 4 6 8 14 11 12 7 7 7 9 5 5 8 8 6 9 10 6 4
+9 6 12 8 5 7 3 2 6 5 4 5 5 4 4 4 7 5 11 7 6 7 6 11 11 12 8 12 12 8 10 10 8 9 12 9 11 7 7 7
+6702 7557 7591 8808 7119 7255 7004 6147 5553 6116 6709 7117 7079 6978 5758 6429 6587 7088 7344 8676 7814 8874 8462 10231 9368 10503 9314 10471 9640 9247 9044 9963 9225 9513 9198 10328 8854 9005 7710 7279
+7 3 12 4 2 4 0 0 3 2 1 2 2 1 1 1 4 2 10 3 3 3 2 8 8 9 4 9 10 4 7 6 4 5 10 5 9 3 4 4
+8 4 7 7 10 5 3 4 6 5 5 4 5 5 5 6 3 4 6 7 6 6 9 11 10 9 11 11 12 15 12 7 6 6 8 7 7 9 10 5
+6739 7349 7580 8709 7196 7126 6905 6023 5567 6056 6669 6935 7030 6866 5742 6411 6499 6908 7314 8585 7772 8710 8480 10292 9389 10425 9362 10518 9706 9613 9124 9795 9148 9310 9172 10138 8811 9017 7773 7149
+6 1 4 3 9 2 0 1 3 2 2 1 2 2 2 3 0 1 3 3 3 2 6 8 7 5 8 7 10 15 10 3 2 2 4 3 3 6 8 2
+6931 5720 3892 2415 2299 2927 3305 2951 2135 1496 1641 1954 2221 2615 2662 1914 785 1057 1266 1415 1742 1674 2443 3645 3992 4795 3782 4309 3996 2034 3779 2724 4320 4458 3031 2528 967 1158 1681 748
+183744 358330 106880 156558 65784 186526 91216 186963 60003 97603 48450 126567 63628 167113 73646 123617 26403 71432 39493 95003 52107 111033 70717 233614 111200 304392 105805 274621 111611 134000 105496 176563 119349 282639 86422 164843 33309 79620 50549 52675
+3335 1463 2978 1311 2666 1350 2934 1358 2645 1210 2427 1255 2633 1340 2839 1251 1794 1116 2201 1172 2422 1199 2686 1348 2944 1430 2921 1402 2937 1253 2927 1324 2993 1417 2824 1311 1887 1120 2398 993
+66898 53522 34543 22039 25397 32620 34444 27284 15530 9017 13471 11629 7029 17903 25703 22901 15725 18009 20080 18562 14129 7196 13375 23940 17926 33994 26964 33219 25119 16945 37213 24840 28288 40446 27915 17558 18093 25207 21145 6251
+1889237 3625088 987217 1501185 713352 2179426 969413 1852006 455490 645728 391592 833429 241718 1257002 728840 1523180 427714 1173574 551802 1229705 411978 546475 410850 1690411 566657 2374635 792430 2299036 750929 1167018 1054119 1692077 839481 2750582 797841 1233672 494979 1623498 589806 433560
+3627 1517 3452 1474 3513 1536 3493 1468 3185 1336 3224 1330 2577 1426 3481 1507 3441 1527 3524 1513 3206 1230 3041 1408 3069 1479 3345 1493 3296 1441 3533 1464 3306 1535 3439 1423 3447 1554 3508 1309
+20957 18357 20655 19266 18286 19740 18739 17800 17687 16882 15993 14626 13374 11850 10275 8440 6261 4609 3283 3451 4766 6294 7623 8407 8231 7807 6703 5167 3315 4957 7775 8022 7113 6927 4674 3626 5575 6016 3830 3564
+2377788 4535399 1490566 2594772 1162980 3261442 1424228 2834477 896243 1644173 790637 1682010 577556 1909619 973300 1950325 577083 1386327 621948 1367946 523524 900375 595456 2105495 762916 2711805 943993 2478547 816922 1401545 1226553 2083408 1000350 3011131 897403 1382427 625133 1895700 672986 626510
+887 384 1388 745 1569 591 1317 618 1905 1007 1962 842 2234 581 994 387 1012 284 464 200 823 633 1210 358 1019 248 646 161 342 307 598 343 651 186 456 211 828 266 508 505
+35404 41195 37619 23532 19140 20217 15779 25870 24513 12959 6096 6048 10300 11425 10757 12098 11816 18133 18179 26452 29965 26074 12727 8414 8277 11690 19728 23472 18297 19095 25227 23057 20041 12497 14049 9518 11296 20852 22749 10208
+3223445 6794203 2414992 3884839 1623214 4307845 1792025 4253803 1500483 2341695 926729 1952667 826432 2496969 1223978 2576581 864723 2417196 1071122 2911017 1276435 2448276 905925 2496105 955451 3267307 1424724 3771901 1264245 2490604 1840799 3374973 1487675 3598254 1234128 1884246 898281 3063057 1237707 1216074
+1120 609 1625 590 1164 454 848 605 1647 533 592 259 1183 432 852 446 1300 750 1671 924 2394 1088 1337 301 802 321 1385 606 1450 770 1358 673 1345 308 1122 461 1190 674 1857 810
+15341 15822 16828 10712 8957 9343 8793 11814 9906 3609 2809 2347 3702 8026 9710 7959 6181 6354 3667 8072 8289 8690 3761 2465 4935 5638 9678 9428 5394 5942 8691 7265 4005 3363 4950 3367 4370 6423 6019 3467
+3535131 7358631 2784871 4309876 1811656 4623395 1972061 4724407 1716247 2422926 975399 1979705 900433 2840254 1441635 2910973 1001138 2662544 1138120 3232286 1456458 2835277 979450 2497788 1057750 3417661 1636551 4124828 1370569 2706235 2017010 3618826 1552915 3588981 1329854 1978055 987564 3273894 1360670 1356116
+404 175 593 210 449 161 401 215 535 97 224 59 346 243 642 233 557 195 269 205 532 269 321 44 413 113 552 184 348 175 387 152 205 39 326 113 380 147 400 204
+501 452 124 121 67 0 0 0 0 0 0 0 0 0 0 0 0 0 47 43 55 31 0 0 0 0 0 0 30 43 36 0 0 0 0 0 0 0 0 29
+3459688 6944900 2718520 4058727 1768143 4346002 1922831 4440954 1673403 2277556 951049 1860927 877955 2669845 1405646 2736321 976146 2502797 1110910 3040998 1421505 2667071 954999 2347926 1031344 3212609 1595697 3877348 1337121 2546509 1967578 3401705 1514148 3373650 1296656 1859376 962911 3077468 1326703 1276534
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+12779 9069 4351 1376 1508 3094 4676 5851 6577 7097 8433 9916 9986 8428 7244 6313 4012 4736 6954 7062 5025 4228 2313 2883 7098 12271 13578 13192 15154 7543 3421 4059 6502 3276 2842 4622 5259 8073 5627 4359
+3699985 7085399 2761878 3899751 1762552 4275340 1994360 4533978 1799753 2576930 1142876 2358490 1111305 3027456 1555731 2960003 1054334 2643603 1260939 3292417 1514470 2766811 990285 2384180 1187040 3773759 1902950 4455200 1691116 2857147 2005909 3446985 1642557 3372508 1336935 2031782 1073306 3388812 1437424 1467751
+307 79 108 5 30 24 181 78 314 235 699 391 862 238 422 167 323 131 513 167 283 102 170 66 558 286 678 264 868 223 114 62 348 42 158 175 438 192 346 246
+53514 49702 32754 16454 15534 26299 34213 27312 26561 34468 39867 37225 44390 34148 23475 43895 30229 20166 22714 24660 12211 15254 10776 23646 33790 37742 39174 41384 54465 26440 21219 21585 39191 33155 27429 24641 25928 39080 24374 34242
+4975572 9713858 3530206 4676668 2115640 5634574 2819143 5939931 2433790 4539948 2133446 4503997 2218282 4943783 2116974 5479209 1800743 3723941 1810088 4609928 1788807 3537976 1241025 3693886 2021164 5866116 2856831 6730428 3041161 4310132 2498245 4566302 2603373 5207126 2004714 3423761 1709297 5586469 2024601 3483432
+1137 530 935 329 717 462 1255 456 1116 777 1921 853 2081 711 1116 836 1677 516 1239 532 645 401 843 622 1693 663 1427 623 1877 613 858 462 1572 648 1369 712 1512 727 1197 997
+9954 8258 14481 20659 29877 38659 38879 53106 48349 42253 45034 39350 37748 44899 58865 45202 33669 23671 22148 16277 13497 21257 24451 23590 19466 30145 38921 27696 24279 39977 18058 28544 39873 35268 33924 30202 41803 51945 42987 32611
+5105814 9638401 3812250 5665316 2826557 7671625 3742612 8846249 3608955 6863480 3231369 6651333 3127839 7405650 3568863 7927567 2616454 4954800 2331060 5333361 2089169 4631683 1835073 4921572 2468308 7366196 3780432 8028191 3585875 6507621 2897487 6046007 3557637 7061488 2821851 5073879 2735216 8442665 3072915 5277972
+154 39 346 344 1088 502 1056 636 1376 627 1442 598 1244 615 1703 576 1333 473 963 277 621 449 1320 471 791 392 1047 328 665 619 614 465 1143 495 1244 606 1599 646 1457 628
+15683 16073 18191 20099 19521 23953 28171 20223 15031 18477 59911 76120 25050 19055 32800 66053 37414 35900 18977 19638 26110 29405 24572 19437 18968 19527 25937 20777 26568 26199 17853 24204 26181 37098 32292 25199 32369 33100 39342 39250
+5379252 10047605 4182090 6560243 3255002 8682958 4369304 9557946 3903093 7586868 4682178 10928891 3690098 8132020 4318222 11510063 3507533 6863130 2757968 6219881 2704453 6160363 2417385 5820450 2891560 8123932 4349074 8823006 4175503 7726780 3281523 7170294 4138078 8917024 3576872 6317622 3494368 9969706 4001884 7372728
+265 124 411 279 584 251 651 182 350 206 1369 753 663 201 776 613 1086 523 686 290 987 474 1039 310 649 207 594 209 630 319 524 314 626 416 905 385 931 323 1002 530
+23018 14886 9611 24319 24643 21629 18611 33737 50387 38825 37019 37840 50142 34998 35919 41408 43271 13532 19859 13273 21215 22153 21796 18485 16441 20793 27088 29963 38325 20067 20542 22967 27747 30180 26991 29020 34869 26540 35295 30882
+5833365 10359331 4323372 7660742 3803682 9490833 4735975 11057209 5093676 9516985 5511593 12597979 4879735 9794308 5128604 13363491 4526088 7282731 3196765 6662163 3179249 7151781 2914199 6606909 3239649 8913985 4932943 10134498 5050951 8496058 3724709 8151128 4744060 10236208 4177538 7721496 4298475 11002099 4804211 8827695
+379 103 180 292 633 200 376 298 1043 409 695 287 1086 354 725 299 992 144 609 159 655 282 750 247 485 205 551 279 787 205 527 255 586 282 641 361 829 221 754 343
+19556 20950 16196 14814 23915 29211 41435 81091 62551 35769 40093 44854 42565 28869 53941 34067 21921 17647 29130 27084 25373 21594 30099 27359 23132 21204 21178 30702 30727 20651 21618 39359 31129 25600 34983 30562 29888 29271 32340 35404
+6187644 11024912 4629456 8111251 4320056 10716057 5676931 15375831 6565480 11143547 6398882 14597848 5845988 10980312 6379443 14654705 4973457 7929972 3861598 7926422 3748481 8049372 3610856 7891379 3750088 9681888 5351162 11412706 5710321 9255061 4184337 10080198 5421367 11194860 4967505 9135877 4955182 12140336 5510972 10473188
+289 161 327 142 549 256 755 549 1004 311 640 297 756 246 884 202 428 185 746 319 666 241 832 323 602 194 380 250 543 198 500 397 581 204 730 323 614 221 599 333
+30064 18709 13604 27581 44628 53834 63664 53703 62368 58880 32460 33816 45570 39334 37642 26663 9319 10893 22046 27002 22965 23377 32083 32120 28163 17989 17928 21179 25175 26790 24588 28416 31314 23465 32032 36778 29887 33577 32778 35010
+6801690 11512878 4861641 9319103 5353016 13380546 7162626 17752696 7995865 14092401 7068903 15799583 6864935 12738102 7182414 15413566 5087519 8123431 4328750 9109791 4241949 9002653 4340838 9391288 4376389 10206197 5675863 12029156 6211308 10345690 4708412 11221218 6086495 11964826 5662317 10847295 5595470 13474832 6211287 11995749
+427 127 248 279 873 409 930 303 819 424 451 183 681 300 524 137 140 87 495 278 530 238 750 335 644 142 290 142 390 243 518 234 517 165 569 331 534 222 528 277
+33417 27237 11395 15532 32540 40289 44583 61135 40293 70004 45489 50900 45848 28303 33059 34562 12772 16152 31425 28643 26721 20961 32470 28819 23086 24407 19870 16708 22872 26997 24655 25123 34578 24993 34354 30141 26214 34700 33982 33059
+7486118 12495506 5031562 9714226 6051190 15053001 8123475 20443559 8826251 17547762 8055251 17978815 7865552 13712712 7848187 16612192 5287001 8628383 5023991 10322980 4819111 9750307 5062490 10598400 4857275 11093355 6042101 12333933 6640918 11383602 5221117 12091466 6818455 12782473 6399140 12048271 6125883 14798256 6924897 13307095
+433 188 189 121 539 250 556 309 467 414 580 271 596 177 412 180 207 152 641 264 555 186 657 252 465 192 304 95 321 212 465 181 501 161 539 230 415 207 483 226
+20884 21576 13962 16573 27759 30749 49270 70358 53164 55367 46024 52861 49322 22794 31452 28473 9262 12314 27976 29787 24597 21837 31841 25963 20414 23886 21686 20256 22030 28440 27887 37052 31348 23573 30310 30241 25324 31264 32303 35937
+7833086 13071381 5262860 10149599 6609721 16038999 9180149 23539614 9964922 19896549 9030653 20147777 8929994 14290389 8456261 17364810 5391779 8867242 5613711 11533665 5327570 10506923 5750048 11557623 5257853 11895276 6445618 12838405 7038280 12447896 5803641 13642389 7449576 13463822 7014195 13183335 6620305 15831178 7577773 14716581
+236 130 235 126 403 161 561 296 559 273 531 258 574 119 366 130 130 98 496 241 458 181 557 202 374 169 311 118 285 199 473 252 410 137 416 201 362 164 417 216
+22212 17975 14214 25041 36217 49348 68027 64223 66689 49582 32062 37144 41595 34141 41605 25208 8529 9638 18203 29273 23196 21235 29267 33487 26340 19899 19054 22696 25706 26395 27801 28592 31205 27825 31197 31189 26166 32833 33394 41646
+8205339 13391469 5494826 11079104 7370517 18108515 10689921 26072995 11420901 21748998 9624802 21220994 9770343 15530538 9308691 17871680 5475204 8927363 5938887 12640133 5787523 11181158 6354645 12921551 5799915 12404133 6771781 13462517 7519690 13322695 6369425 14580499 8061287 14365523 7636570 14308541 7123908 16898523 8242240 16392248
+245 93 227 198 484 261 683 233 619 214 327 153 433 194 454 108 110 60 282 207 389 158 450 238 444 125 252 129 318 167 424 166 377 160 392 187 344 168 400 230
+46285 26079 18373 33617 30887 25048 36101 54169 44177 66742 36770 32993 38396 25468 42169 51104 30808 18964 30371 30406 24780 17278 32759 31950 19938 24829 23478 17936 24772 36004 25448 23407 32592 25212 32772 30391 24446 33771 32741 35025
+9183666 14190242 5827316 12479729 7976072 18560934 11345896 27836686 12265071 24544572 10324467 21974793 10507939 16163433 10154259 19939124 6126053 9556843 6566991 13749824 6276485 11571832 7033413 14109217 6164794 13185346 7202890 13756741 7965207 14735361 6860937 15143772 8693183 15052588 8283669 15317210 7570972 17959458 8873427 17560601
+523 146 290 247 373 101 306 174 356 278 357 121 367 122 419 247 500 167 455 195 377 113 455 201 301 154 302 89 286 217 352 113 365 132 384 166 300 158 359 175
+13354 19024 20834 18367 29814 23412 31413 52551 72326 32470 21971 41597 39298 34072 31256 43658 25928 16160 19318 17019 23740 21588 20621 19649 11806 16996 26061 22181 22352 21916 23961 17423 20790 35051 24773 28553 27854 32342 46795 24414
+9295772 14507649 6214415 12859398 8539082 18885698 11865658 29395154 13807726 25066833 10628367 23211974 11250180 17286964 10699755 21425063 6635910 9976285 6896873 13970472 6726656 12203863 7384959 14469883 6312690 13438449 7689265 14294115 8337741 15197739 7302167 15305608 9007616 16302915 8710140 16152439 8093992 18868945 9848113 18006942
+102 89 312 101 336 84 242 154 536 94 177 159 348 168 279 191 372 125 247 77 330 141 248 94 146 81 317 114 241 101 304 67 205 192 262 145 323 141 495 96
+23329 23204 19309 17932 36198 67150 64529 38617 32607 43495 35244 58940 43147 40324 40898 45705 31398 15572 17167 11863 18384 24626 34451 24774 37677 19041 14521 21161 37803 23466 15566 26664 29162 32133 29955 29431 24443 27824 31139 32670
+9660065 15062821 6552868 13189562 9251229 21878130 13218973 30004048 14296554 26235108 11263971 25440438 12072282 18727194 11478108 22947612 7272866 10334437 7163535 13861110 7028676 12984620 8081259 15123777 7118222 13802006 7868508 14736581 9095941 15727604 7517786 16025478 9528209 17298951 9258430 16991499 8516762 19446291 10398261 18933732
+220 117 265 93 395 313 503 96 198 137 305 223 357 201 355 181 418 114 204 37 227 158 415 123 524 94 145 101 415 107 167 131 291 155 316 144 264 112 285 143
+11604 18183 11218 8539 18651 89226 81712 29436 36299 39261 27881 28840 41892 55268 47265 30605 13255 14437 19781 18125 15204 20699 18604 32185 47339 11951 11639 18567 22206 25482 17450 29059 25324 24432 45106 34973 26630 23840 27257 24044
+9715544 15276206 6676046 12922835 9497053 26047317 14977745 30012352 14867556 27073163 11695492 25685931 12841781 20999135 12399787 23451105 7430141 10601369 7490360 14143031 7241868 13477269 8355088 16193753 8150630 13708158 7969605 14993132 9436516 16349536 7776182 16849300 9937697 17762097 10180330 18120704 8984884 19744231 10835442 19274957
+74 73 124 23 163 349 574 58 219 118 215 76 314 259 386 100 136 94 230 86 169 112 190 171 594 40 100 81 212 122 187 141 232 103 453 166 280 81 228 88
+20215 29650 30102 13420 27258 70133 74209 38214 30164 34718 24174 28412 43888 44532 56282 47653 34973 28436 23347 18315 19499 25507 25674 39225 46934 25446 16840 19285 20897 28103 18941 28012 25685 22110 40114 30071 25041 25287 27096 22685
+9989757 16181292 7278871 12971987 9956757 28793337 16500817 30559456 15267478 27581826 12021480 25890400 13643093 22475172 13528955 24971777 8138656 11712349 7900182 14419711 7559529 14235752 8802808 17632053 9146913 14449040 8201129 15278402 9735128 17095181 8066241 17559370 10346191 18054797 10951608 18880993 9400701 20113195 11257594 19512216
+172 153 398 57 255 227 461 85 164 88 172 67 311 175 416 168 422 220 266 83 226 141 274 204 531 141 170 81 185 134 204 127 230 81 366 126 247 90 220 79
+22519 23349 20057 15027 7663 8682 7926 4822 7888 5306 5568 11610 16116 14676 25144 22859 19349 19229 13976 15890 19375 17703 18240 24718 19098 14180 10077 6755 6871 7386 5470 11665 13476 10395 9104 6200 7097 7933 11388 12646
+10316020 16644958 7609872 13116920 9904088 27599207 16291509 29022214 15087987 26252971 11863715 25050328 13714480 22028373 13833970 24877931 8430096 12191018 8060228 14530805 7866091 14469269 9049320 18092784 9406767 14453316 8253994 14776745 9667746 16523289 8004707 17222519 10432395 17610196 10910939 18129092 9347444 19393836 11267672 19118469
+194 101 229 68 31 5 13 1 14 2 11 12 73 26 144 48 200 119 133 64 215 78 172 98 173 50 76 10 27 10 23 24 87 19 37 6 30 9 55 25
+1449 703 1206 1444 628 1549 1035 1398 1298 1405 1254 1670 1256 1657 1634 1530 1668 1789 1761 1817 1857 1931 1961 2096 2129 2104 2256 2299 2344 2425 2561 2549 2622 2730 2793 2900 2938 3081 3137 3234
+10095537 15689491 7450732 12418652 9672901 26038488 15911275 27366841 14744519 24764176 11599611 23649970 13404226 20808526 13530396 23479315 8262290 11569498 7904033 13770624 7717196 13719783 8873546 17136034 9226364 13715416 8105615 14031421 9486324 15680917 7870348 16345814 10238992 16721351 10709961 17219559 9189202 18419542 11066582 18170096
+1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 2 1 2 0 2 1 2 0 2 1 3 1 3 1 5 1 3 1 3 1 5 1 4 1
+1450 669 1179 1404 628 1541 1001 1390 1272 1428 1263 1650 1258 1662 1619 1515 1636 1769 1778 1828 1888 1935 1969 2102 2120 2124 2238 2291 2347 2428 2539 2582 2641 2716 2801 2902 2953 3063 3109 3221
+9880583 14789261 7294874 11759821 9447485 24570917 15539663 25810295 14408961 23366118 11342330 22332401 13101768 19662174 13234017 22163691 8097855 10984039 7752171 13056727 7572810 13015511 8702364 16237055 9050235 13023017 7960480 13330323 9309508 14889270 7738780 15523736 10050902 15884975 10514204 16364719 9035294 17502597 10869796 17277824
+1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 2 1 2 1 3 1 2 0 2 1 3 1 3 1 5 1 3 1 3 1 5 1 4 1
+1503 807 1164 1442 596 1575 1014 1398 1274 1431 1249 1648 1242 1659 1625 1548 1684 1776 1812 1848 1889 1921 1954 2070 2112 2121 2239 2276 2325 2431 2515 2560 2645 2748 2808 2923 2958 3077 3135 3243
+9672350 13951521 7142524 11142853 9226879 23193486 15177660 24347629 14081831 22052125 11091114 21093760 12806452 18584416 12945190 20929028 7938752 10434136 7604969 12386891 7432054 12352633 8535072 15390046 8878298 12371976 7818993 12670367 9136544 14145304 7609883 14749629 9867610 15100745 10323513 15562457 8885356 16641527 10678587 16440438
+1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 2 1 2 1 3 1 2 0 2 1 4 1 3 1 5 1 3 1 4 1 5 1 4 2
+1384 690 1153 1469 647 1547 1012 1372 1244 1413 1264 1648 1251 1664 1623 1551 1658 1780 1749 1840 1870 1916 1972 2104 2121 2106 2270 2319 2347 2411 2523 2568 2624 2730 2784 2903 2944 3076 3134 3243
+9466273 13156855 6993696 10564560 9013083 21896977 14824643 22971122 13762100 20815862 10846553 19929434 12518738 17571628 12663522 19768626 7782956 9917472 7459832 11756752 7294326 11729219 8372417 14595945 8710883 11759075 7681831 12052649 8968460 13444746 7484408 14022458 9688357 14362461 10136969 14807100 8738803 15832057 10492126 15653293
+1 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 2 1 2 1 3 1 2 1 3 1 4 2 3 1 5 1 3 1 3 2 5 1 4 2
\ No newline at end of file
diff --git a/python/tflite_micro/signal/tflm_signal.bzl b/python/tflite_micro/signal/tflm_signal.bzl
index 9bf773e..1635b48 100644
--- a/python/tflite_micro/signal/tflm_signal.bzl
+++ b/python/tflite_micro/signal/tflm_signal.bzl
@@ -25,7 +25,7 @@
       srcs: Python source files for the Python library.
       deps: Dependencies for the Python library.
       visibility: Visibility for the Python library.
-      cc_op_defs: A list of c++ src files containing REGISTER_OP definitions.
+      cc_op_defs: A list of c++ libraries containing REGISTER_OP definitions.
       cc_op_kernels: A list of c++ targets containing kernels that are used
           by the Python library.
     """
@@ -39,12 +39,12 @@
         library_name = name + "_cc"
         native.cc_library(
             name = library_name,
-            srcs = cc_op_defs,
             copts = select({
                 "//conditions:default": ["-pthread"],
             }),
             alwayslink = 1,
             deps =
+                cc_op_defs +
                 cc_op_kernels +
                 ["@tensorflow_cc_deps//:cc_library"] +
                 select({"//conditions:default": []}),
diff --git a/python/tflite_micro/signal/utils/BUILD b/python/tflite_micro/signal/utils/BUILD
index 32a26fb..58b7dcf 100644
--- a/python/tflite_micro/signal/utils/BUILD
+++ b/python/tflite_micro/signal/utils/BUILD
@@ -1,4 +1,5 @@
 # Signal python utilities.
+load("@rules_python//python:defs.bzl", "py_library", "py_test")
 load("@tflm_pip_deps//:requirements.bzl", "requirement")
 load("@pybind11_bazel//:build_defs.bzl", "pybind_extension")
 
@@ -36,9 +37,39 @@
 py_library(
     name = "util",
     srcs = ["util.py"],
+    visibility = ["//visibility:public"],
     deps = [
+        requirement("tensorflow"),
         "//python/tflite_micro:runtime",
-        "//python/tflite_micro/signal:ops",
-        requirement("tensorflow-cpu"),
     ],
 )
+
+pybind_extension(
+    name = "wide_dynamic_func_lut_wrapper",  # :wide_dynamic_func_lut_wrapper.so
+    srcs = [
+        "wide_dynamic_func_lut_wrapper.cc",
+    ],
+)
+
+py_library(
+    name = "wide_dynamic_func_lut",
+    data = [
+        ":wide_dynamic_func_lut_wrapper.so",
+    ],
+)
+
+py_test(
+    name = "wide_dynamic_func_lut_test",
+    srcs = ["wide_dynamic_func_lut_test.py"],
+    data = [
+        ":wide_dynamic_func_lut_wrapper.so",
+    ],
+    python_version = "PY3",
+    srcs_version = "PY3",
+    tags = [
+        "noasan",
+        "nomsan",
+        "noubsan",
+    ],
+    visibility = ["//visibility:public"],
+)
diff --git a/python/tflite_micro/signal/utils/wide_dynamic_func_lut_test.py b/python/tflite_micro/signal/utils/wide_dynamic_func_lut_test.py
new file mode 100644
index 0000000..5cbcdd5
--- /dev/null
+++ b/python/tflite_micro/signal/utils/wide_dynamic_func_lut_test.py
@@ -0,0 +1,157 @@
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for signal.python.utils.wide_dynamic_func_lut"""
+import unittest
+from tflite_micro.python.tflite_micro.signal.utils import wide_dynamic_func_lut_wrapper
+
+
+class WideDynamicFuncLutTest(unittest.TestCase):
+
+  def testWideDynamicFuncLut(self):
+    self.maxDiff = None
+    expected_lut = [
+        32636,
+        32633,
+        32630,
+        -6,
+        0,
+        0,
+        32624,
+        -12,
+        0,
+        0,
+        32612,
+        -23,
+        -2,
+        0,
+        32587,
+        -48,
+        0,
+        0,
+        32539,
+        -96,
+        0,
+        0,
+        32443,
+        -190,
+        0,
+        0,
+        32253,
+        -378,
+        4,
+        0,
+        31879,
+        -739,
+        18,
+        0,
+        31158,
+        -1409,
+        62,
+        0,
+        29811,
+        -2567,
+        202,
+        0,
+        27446,
+        -4301,
+        562,
+        0,
+        23707,
+        -6265,
+        1230,
+        0,
+        18672,
+        -7458,
+        1952,
+        0,
+        13166,
+        -7030,
+        2212,
+        0,
+        8348,
+        -5342,
+        1868,
+        0,
+        4874,
+        -3459,
+        1282,
+        0,
+        2697,
+        -2025,
+        774,
+        0,
+        1446,
+        -1120,
+        436,
+        0,
+        762,
+        -596,
+        232,
+        0,
+        398,
+        -313,
+        122,
+        0,
+        207,
+        -164,
+        64,
+        0,
+        107,
+        -85,
+        34,
+        0,
+        56,
+        -45,
+        18,
+        0,
+        29,
+        -22,
+        8,
+        0,
+        15,
+        -13,
+        6,
+        0,
+        8,
+        -8,
+        4,
+        0,
+        4,
+        -2,
+        0,
+        0,
+        2,
+        -3,
+        2,
+        0,
+        1,
+        0,
+        0,
+        0,
+        1,
+        -3,
+        2,
+        0,
+        0,
+        0,
+        0,
+    ]
+    lut = wide_dynamic_func_lut_wrapper.wide_dynamic_func_lut(
+        0.95, 80.0, 7, 21)
+    self.assertEqual(lut, expected_lut)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/python/tflite_micro/signal/utils/wide_dynamic_func_lut_wrapper.cc b/python/tflite_micro/signal/utils/wide_dynamic_func_lut_wrapper.cc
new file mode 100644
index 0000000..4ecf161
--- /dev/null
+++ b/python/tflite_micro/signal/utils/wide_dynamic_func_lut_wrapper.cc
@@ -0,0 +1,96 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <pybind11/pybind11.h>
+#include <pybind11/pytypes.h>
+
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+
+namespace py = pybind11;
+
+constexpr size_t kWideDynamicFunctionBits = 32;
+constexpr size_t kWideDynamicFunctionLUTSize =
+    (4 * kWideDynamicFunctionBits - 3);
+
+int16_t PcanGainLookupFunction(const float strength, const float offset,
+                               const int gain_bits, int32_t input_bits,
+                               uint32_t x) {
+  const float x_as_float =
+      static_cast<float>(x) / (static_cast<uint32_t>(1) << input_bits);
+  const float gain_as_float = (static_cast<uint32_t>(1) << gain_bits) *
+                              powf(x_as_float + offset, -strength);
+
+  if (gain_as_float > std::numeric_limits<int16_t>::max()) {
+    return std::numeric_limits<int16_t>::max();
+  }
+  return static_cast<int16_t>(gain_as_float + 0.5f);
+}
+
+py::list WideDynamicFuncLut(float strength, float offset, int input_bits,
+                            int gain_bits) {
+  // Avoid accessing outside of the buffer below gain_lut[4 * interval + 3].
+  int16_t gain_lut_storage[kWideDynamicFunctionLUTSize + 1];
+  int16_t* gain_lut = gain_lut_storage;
+
+  gain_lut[0] =
+      PcanGainLookupFunction(strength, offset, gain_bits, input_bits, 0);
+  gain_lut[1] =
+      PcanGainLookupFunction(strength, offset, gain_bits, input_bits, 1);
+  // This puts the pointer outside of the buffer making the calculation in the
+  // loop below a lot simpler.
+  gain_lut -= 6;
+
+  for (size_t interval = 2; interval <= kWideDynamicFunctionBits; ++interval) {
+    const uint32_t x0 = static_cast<uint32_t>(1) << (interval - 1);
+    const uint32_t x1 = x0 + (x0 >> 1);
+    const uint32_t x2 =
+        (interval == kWideDynamicFunctionBits) ? x0 + (x0 - 1) : 2 * x0;
+
+    const int16_t y0 =
+        PcanGainLookupFunction(strength, offset, gain_bits, input_bits, x0);
+    const int16_t y1 =
+        PcanGainLookupFunction(strength, offset, gain_bits, input_bits, x1);
+    const int16_t y2 =
+        PcanGainLookupFunction(strength, offset, gain_bits, input_bits, x2);
+
+    const int32_t diff1 = static_cast<int32_t>(y1 - y0);
+    const int32_t diff2 = static_cast<int32_t>(y2 - y0);
+    const int32_t a1 = 4 * diff1 - diff2;
+    const int32_t a2 = diff2 - a1;
+
+    gain_lut[4 * interval] = y0;
+    gain_lut[4 * interval + 1] = static_cast<int16_t>(a1);
+    gain_lut[4 * interval + 2] = static_cast<int16_t>(a2);
+    gain_lut[4 * interval + 3] = 0;
+  }
+  // Brings the pointer back to the start of the buffer post calculation for the
+  // lut
+  gain_lut += 6;
+
+  py::list lut_list = py::list();
+  for (size_t i = 0; i < kWideDynamicFunctionLUTSize; i++) {
+    lut_list.append(gain_lut[i]);
+  }
+
+  return lut_list;
+}
+
+PYBIND11_MODULE(wide_dynamic_func_lut_wrapper, m) {
+  m.doc() = "wide_dynamic_func_lut";
+  m.def("wide_dynamic_func_lut", &WideDynamicFuncLut, py::arg("strength"),
+        py::arg("offset"), py::arg("input_bits"), py::arg("gain_bits"));
+}
diff --git a/python/tflite_micro/whl_test.sh b/python/tflite_micro/whl_test.sh
index 8b6d97f..f3e8293 100755
--- a/python/tflite_micro/whl_test.sh
+++ b/python/tflite_micro/whl_test.sh
@@ -37,7 +37,7 @@
 
 # Run the package's post-installation checks.
 python3 << HEREDOC
-import sys
-from tflite_micro import postinstall_check
-sys.exit(0 if postinstall_check.passed() else 1)
+import sys, tflite_micro
+print(tflite_micro.__version__)
+sys.exit(0 if tflite_micro.postinstall_check.passed() else 1)
 HEREDOC
diff --git a/signal/micro/kernels/BUILD b/signal/micro/kernels/BUILD
index 4d37f06..b7ac658 100644
--- a/signal/micro/kernels/BUILD
+++ b/signal/micro/kernels/BUILD
@@ -10,19 +10,24 @@
     srcs = [
         "delay.cc",
         "energy.cc",
-        "fft_auto_scale.cc",
+        "fft_auto_scale_common.cc",
+        "fft_auto_scale_kernel.cc",
         "filter_bank.cc",
         "filter_bank_log.cc",
         "filter_bank_spectral_subtraction.cc",
         "filter_bank_square_root.cc",
+        "filter_bank_square_root_common.cc",
         "framer.cc",
         "irfft.cc",
         "overlap_add.cc",
+        "pcan.cc",
         "rfft.cc",
         "stacker.cc",
         "window.cc",
     ],
     hdrs = [
+        "fft_auto_scale_kernel.h",
+        "filter_bank_square_root.h",
         "irfft.h",
         "rfft.h",
     ],
@@ -40,9 +45,11 @@
         "//signal/src:filter_bank_square_root",
         "//signal/src:irfft",
         "//signal/src:overlap_add",
+        "//signal/src:pcan_argc_fixed",
         "//signal/src:rfft",
         "//signal/src:window",
         "//tensorflow/lite:type_to_tflitetype",
+        "//tensorflow/lite/c:common",
         "//tensorflow/lite/kernels:kernel_util",
         "//tensorflow/lite/kernels/internal:tensor",
         "//tensorflow/lite/micro:flatbuffer_utils",
@@ -324,3 +331,28 @@
         "//tensorflow/lite/micro/testing:micro_test",
     ],
 )
+
+cc_library(
+    name = "pcan_flexbuffers_generated_data",
+    srcs = [
+        "pcan_flexbuffers_generated_data.cc",
+    ],
+    hdrs = [
+        "pcan_flexbuffers_generated_data.h",
+    ],
+)
+
+cc_test(
+    name = "pcan_test",
+    srcs = [
+        "pcan_test.cc",
+    ],
+    deps = [
+        ":pcan_flexbuffers_generated_data",
+        "//tensorflow/lite/c:common",
+        "//tensorflow/lite/micro:op_resolvers",
+        "//tensorflow/lite/micro:test_helpers",
+        "//tensorflow/lite/micro/kernels:kernel_runner",
+        "//tensorflow/lite/micro/testing:micro_test",
+    ],
+)
diff --git a/signal/micro/kernels/delay.cc b/signal/micro/kernels/delay.cc
index 155e198..33ef35e 100644
--- a/signal/micro/kernels/delay.cc
+++ b/signal/micro/kernels/delay.cc
@@ -44,7 +44,7 @@
   tflm_signal::CircularBuffer** circular_buffers;
 };
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* DelayInit(TfLiteContext* context, const char* buffer, size_t length) {
   auto* params = static_cast<TFLMSignalFrontendDelayParams*>(
       context->AllocatePersistentBuffer(context,
                                         sizeof(TFLMSignalFrontendDelayParams)));
@@ -58,7 +58,7 @@
   return params;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus DelayPrepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
 
@@ -108,7 +108,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus DelayEval(TfLiteContext* context, TfLiteNode* node) {
   auto* params =
       reinterpret_cast<TFLMSignalFrontendDelayParams*>(node->user_data);
   const TfLiteEvalTensor* input =
@@ -132,7 +132,7 @@
   return kTfLiteOk;
 }
 
-void Reset(TfLiteContext* context, void* buffer) {
+void DelayReset(TfLiteContext* context, void* buffer) {
   auto* params = static_cast<TFLMSignalFrontendDelayParams*>(buffer);
   for (int i = 0; i < params->outer_dims; ++i) {
     tflm_signal::CircularBufferReset(params->circular_buffers[i]);
@@ -145,8 +145,8 @@
 
 namespace tflm_signal {
 TFLMRegistration* Register_DELAY() {
-  static TFLMRegistration r =
-      micro::RegisterOp(Init, Prepare, Eval, nullptr, Reset);
+  static TFLMRegistration r = micro::RegisterOp(DelayInit, DelayPrepare,
+                                                DelayEval, nullptr, DelayReset);
   return &r;
 }
 }  // namespace tflm_signal
diff --git a/signal/micro/kernels/energy.cc b/signal/micro/kernels/energy.cc
index fa79bc8..6a86366 100644
--- a/signal/micro/kernels/energy.cc
+++ b/signal/micro/kernels/energy.cc
@@ -42,7 +42,7 @@
   int32_t start_index;
 };
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* EnergyInit(TfLiteContext* context, const char* buffer, size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
 
   auto* data =
@@ -60,7 +60,7 @@
   return data;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus EnergyPrepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
 
@@ -83,7 +83,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus EnergyEval(TfLiteContext* context, TfLiteNode* node) {
   auto* params = reinterpret_cast<TFLMSignalEnergyParams*>(node->user_data);
 
   const TfLiteEvalTensor* input =
@@ -104,7 +104,8 @@
 
 namespace tflm_signal {
 TFLMRegistration* Register_ENERGY() {
-  static TFLMRegistration r = tflite::micro::RegisterOp(Init, Prepare, Eval);
+  static TFLMRegistration r =
+      tflite::micro::RegisterOp(EnergyInit, EnergyPrepare, EnergyEval);
   return &r;
 }
 }  // namespace tflm_signal
diff --git a/signal/micro/kernels/fft_auto_scale.cc b/signal/micro/kernels/fft_auto_scale_common.cc
similarity index 61%
rename from signal/micro/kernels/fft_auto_scale.cc
rename to signal/micro/kernels/fft_auto_scale_common.cc
index 8eb0d8f..8703ac6 100644
--- a/signal/micro/kernels/fft_auto_scale.cc
+++ b/signal/micro/kernels/fft_auto_scale_common.cc
@@ -12,26 +12,17 @@
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
-
-#include "signal/src/fft_auto_scale.h"
-
-#include <math.h>
-#include <stddef.h>
-#include <stdint.h>
-
-#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
+#include "signal/micro/kernels/fft_auto_scale_kernel.h"
 #include "tensorflow/lite/kernels/kernel_util.h"
 #include "tensorflow/lite/micro/kernels/kernel_util.h"
-#include "tensorflow/lite/micro/micro_context.h"
 
 namespace tflite {
-namespace {
 
 constexpr int kInputTensor = 0;
 constexpr int kOutputTensor = 0;
 constexpr int kScaleBitTensor = 1;
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FftAutoScalePrepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
 
@@ -60,32 +51,4 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
-  const TfLiteEvalTensor* input =
-      tflite::micro::GetEvalInput(context, node, kInputTensor);
-  TfLiteEvalTensor* output =
-      tflite::micro::GetEvalOutput(context, node, kOutputTensor);
-  TfLiteEvalTensor* scale_bit =
-      tflite::micro::GetEvalOutput(context, node, kScaleBitTensor);
-
-  const int16_t* input_data = tflite::micro::GetTensorData<int16_t>(input);
-  int16_t* output_data = tflite::micro::GetTensorData<int16_t>(output);
-  int32_t* scale_bit_data = tflite::micro::GetTensorData<int32_t>(scale_bit);
-
-  *scale_bit_data =
-      tflm_signal::FftAutoScale(input_data, output->dims->data[0], output_data);
-  return kTfLiteOk;
-}
-
-}  // namespace
-
-// TODO(b/286250473): remove namespace once de-duped libraries
-namespace tflm_signal {
-
-TFLMRegistration* Register_FFT_AUTO_SCALE() {
-  static TFLMRegistration r = tflite::micro::RegisterOp(nullptr, Prepare, Eval);
-  return &r;
-}
-
-}  // namespace tflm_signal
 }  // namespace tflite
diff --git a/signal/micro/kernels/fft_auto_scale_kernel.cc b/signal/micro/kernels/fft_auto_scale_kernel.cc
new file mode 100644
index 0000000..4946fb3
--- /dev/null
+++ b/signal/micro/kernels/fft_auto_scale_kernel.cc
@@ -0,0 +1,64 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "signal/micro/kernels/fft_auto_scale_kernel.h"
+
+#include <math.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "signal/src/fft_auto_scale.h"
+#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
+#include "tensorflow/lite/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/micro_context.h"
+
+namespace tflite {
+namespace {
+
+constexpr int kInputTensor = 0;
+constexpr int kOutputTensor = 0;
+constexpr int kScaleBitTensor = 1;
+
+TfLiteStatus FftAutoScaleEval(TfLiteContext* context, TfLiteNode* node) {
+  const TfLiteEvalTensor* input =
+      tflite::micro::GetEvalInput(context, node, kInputTensor);
+  TfLiteEvalTensor* output =
+      tflite::micro::GetEvalOutput(context, node, kOutputTensor);
+  TfLiteEvalTensor* scale_bit =
+      tflite::micro::GetEvalOutput(context, node, kScaleBitTensor);
+
+  const int16_t* input_data = tflite::micro::GetTensorData<int16_t>(input);
+  int16_t* output_data = tflite::micro::GetTensorData<int16_t>(output);
+  int32_t* scale_bit_data = tflite::micro::GetTensorData<int32_t>(scale_bit);
+
+  *scale_bit_data =
+      tflm_signal::FftAutoScale(input_data, output->dims->data[0], output_data);
+  return kTfLiteOk;
+}
+
+}  // namespace
+
+// TODO(b/286250473): remove namespace once de-duped libraries
+namespace tflm_signal {
+
+TFLMRegistration* Register_FFT_AUTO_SCALE() {
+  static TFLMRegistration r =
+      tflite::micro::RegisterOp(nullptr, FftAutoScalePrepare, FftAutoScaleEval);
+  return &r;
+}
+
+}  // namespace tflm_signal
+}  // namespace tflite
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.cc b/signal/micro/kernels/fft_auto_scale_kernel.h
similarity index 60%
copy from tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.cc
copy to signal/micro/kernels/fft_auto_scale_kernel.h
index e2cf661..9461c90 100644
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.cc
+++ b/signal/micro/kernels/fft_auto_scale_kernel.h
@@ -1,4 +1,4 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -12,12 +12,15 @@
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
+#ifndef SIGNAL_MICRO_KERNELS_FFT_AUTO_SCALE_KERNEL_H_
+#define SIGNAL_MICRO_KERNELS_FFT_AUTO_SCALE_KERNEL_H_
 
-#include "tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.h"
+#include "tensorflow/lite/c/common.h"
 
-const char* kCategoryLabels[kCategoryCount] = {
-    "silence",
-    "unknown",
-    "yes",
-    "no",
-};
+namespace tflite {
+
+TfLiteStatus FftAutoScalePrepare(TfLiteContext* context, TfLiteNode* node);
+
+}  // namespace tflite
+
+#endif  // SIGNAL_MICRO_KERNELS_FFT_AUTO_SCALE_KERNEL_H_
diff --git a/signal/micro/kernels/fft_test.cc b/signal/micro/kernels/fft_test.cc
index 9d6fd25..bf54d41 100644
--- a/signal/micro/kernels/fft_test.cc
+++ b/signal/micro/kernels/fft_test.cc
@@ -274,11 +274,11 @@
   const TFLMRegistration* registration =
       tflite::tflm_signal::Register_RFFT_INT16();
 // See (b/287518815) for why this is needed.
-#if defined(HIFI4) || defined(HIFI5)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
   int tolerance = 9;
-#else   // defined(HIFI4) || defined(HIFI5)
+#else   // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
   int tolerance = 3;
-#endif  // defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
   TF_LITE_MICRO_EXPECT_EQ(
       kTfLiteOk, tflite::testing::TestFFT<int16_t>(
                      input_shape, tflite::kRfftInt16Length512Input,
diff --git a/signal/micro/kernels/filter_bank.cc b/signal/micro/kernels/filter_bank.cc
index 7866ac3..1cf08d2 100644
--- a/signal/micro/kernels/filter_bank.cc
+++ b/signal/micro/kernels/filter_bank.cc
@@ -46,7 +46,8 @@
   uint64_t* work_area;
 };
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* FilterBankInit(TfLiteContext* context, const char* buffer,
+                     size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
 
   auto* params = static_cast<TFLMSignalFilterBankParams*>(
@@ -70,7 +71,7 @@
   return params;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FilterBankPrepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 6);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
 
@@ -122,7 +123,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FilterBankEval(TfLiteContext* context, TfLiteNode* node) {
   auto* params = reinterpret_cast<TFLMSignalFilterBankParams*>(node->user_data);
 
   const TfLiteEvalTensor* input0 =
@@ -166,7 +167,8 @@
 namespace tflm_signal {
 
 TFLMRegistration* Register_FILTER_BANK() {
-  static TFLMRegistration r = tflite::micro::RegisterOp(Init, Prepare, Eval);
+  static TFLMRegistration r = tflite::micro::RegisterOp(
+      FilterBankInit, FilterBankPrepare, FilterBankEval);
   return &r;
 }
 
diff --git a/signal/micro/kernels/filter_bank_log.cc b/signal/micro/kernels/filter_bank_log.cc
index ea0cdb7..3d38e61 100644
--- a/signal/micro/kernels/filter_bank_log.cc
+++ b/signal/micro/kernels/filter_bank_log.cc
@@ -42,7 +42,8 @@
   int output_scale;
 };
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* FilterBankLogInit(TfLiteContext* context, const char* buffer,
+                        size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
 
   auto* params = static_cast<TFLMSignalLogParams*>(
@@ -59,7 +60,7 @@
   return params;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FilterBankLogPrepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
 
@@ -82,7 +83,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FilterBankLogEval(TfLiteContext* context, TfLiteNode* node) {
   auto* params = reinterpret_cast<TFLMSignalLogParams*>(node->user_data);
 
   const TfLiteEvalTensor* input =
@@ -103,7 +104,8 @@
 namespace tflm_signal {
 
 TFLMRegistration* Register_FILTER_BANK_LOG() {
-  static TFLMRegistration r = tflite::micro::RegisterOp(Init, Prepare, Eval);
+  static TFLMRegistration r = tflite::micro::RegisterOp(
+      FilterBankLogInit, FilterBankLogPrepare, FilterBankLogEval);
   return &r;
 }
 
diff --git a/signal/micro/kernels/filter_bank_spectral_subtraction.cc b/signal/micro/kernels/filter_bank_spectral_subtraction.cc
index f5ea4d7..e069323 100644
--- a/signal/micro/kernels/filter_bank_spectral_subtraction.cc
+++ b/signal/micro/kernels/filter_bank_spectral_subtraction.cc
@@ -51,12 +51,14 @@
   size_t noise_estimate_size;
 };
 
-void ResetState(TFLMSignalSpectralSubtractionParams* params) {
+void FilterBankSpectralSubtractionResetState(
+    TFLMSignalSpectralSubtractionParams* params) {
   memset(params->noise_estimate, 0,
          sizeof(uint32_t) * params->config.num_channels);
 }
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* FilterBankSpectralSubtractionInit(TfLiteContext* context,
+                                        const char* buffer, size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
 
   auto* params = static_cast<TFLMSignalSpectralSubtractionParams*>(
@@ -96,7 +98,8 @@
   return params;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FilterBankSpectralSubtractionPrepare(TfLiteContext* context,
+                                                  TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
 
@@ -125,7 +128,7 @@
   TfLiteTypeSizeOf(output->type, &params->noise_estimate_size);
   params->noise_estimate_size *= ElementCount(*noise_estimate->dims);
 
-  ResetState(params);
+  FilterBankSpectralSubtractionResetState(params);
 
   micro_context->DeallocateTempTfLiteTensor(input);
   micro_context->DeallocateTempTfLiteTensor(output);
@@ -133,7 +136,8 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FilterBankSpectralSubtractionEval(TfLiteContext* context,
+                                               TfLiteNode* node) {
   auto* params =
       reinterpret_cast<TFLMSignalSpectralSubtractionParams*>(node->user_data);
 
@@ -158,8 +162,9 @@
   return kTfLiteOk;
 }
 
-void Reset(TfLiteContext* context, void* buffer) {
-  ResetState(static_cast<TFLMSignalSpectralSubtractionParams*>(buffer));
+void FilterBankSpectralSubtractionReset(TfLiteContext* context, void* buffer) {
+  FilterBankSpectralSubtractionResetState(
+      static_cast<TFLMSignalSpectralSubtractionParams*>(buffer));
 }
 
 }  // namespace
@@ -167,8 +172,10 @@
 namespace tflm_signal {
 
 TFLMRegistration* Register_FILTER_BANK_SPECTRAL_SUBTRACTION() {
-  static TFLMRegistration r =
-      tflite::micro::RegisterOp(Init, Prepare, Eval, /*Free*/ nullptr, Reset);
+  static TFLMRegistration r = tflite::micro::RegisterOp(
+      FilterBankSpectralSubtractionInit, FilterBankSpectralSubtractionPrepare,
+      FilterBankSpectralSubtractionEval,
+      /*Free*/ nullptr, FilterBankSpectralSubtractionReset);
   return &r;
 }
 
diff --git a/signal/micro/kernels/filter_bank_square_root.cc b/signal/micro/kernels/filter_bank_square_root.cc
index 8df2617..bd7eff9 100644
--- a/signal/micro/kernels/filter_bank_square_root.cc
+++ b/signal/micro/kernels/filter_bank_square_root.cc
@@ -17,6 +17,7 @@
 
 #include <stdint.h>
 
+#include "signal/micro/kernels/filter_bank_square_root.h"
 #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
 #include "tensorflow/lite/kernels/kernel_util.h"
 #include "tensorflow/lite/micro/kernels/kernel_util.h"
@@ -30,37 +31,8 @@
 constexpr int kScaleBitsTensor = 1;
 constexpr int kOutputTensor = 0;
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
-  TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
-  TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
-
-  MicroContext* micro_context = GetMicroContext(context);
-
-  TfLiteTensor* input =
-      micro_context->AllocateTempInputTensor(node, kInputTensor);
-  TfLiteTensor* scale_bits =
-      micro_context->AllocateTempInputTensor(node, kScaleBitsTensor);
-  TfLiteTensor* output =
-      micro_context->AllocateTempOutputTensor(node, kOutputTensor);
-  TF_LITE_ENSURE(context, input != nullptr);
-  TF_LITE_ENSURE(context, scale_bits != nullptr);
-  TF_LITE_ENSURE(context, output != nullptr);
-
-  TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1);
-  TF_LITE_ENSURE_EQ(context, NumDimensions(scale_bits), 0);
-  TF_LITE_ENSURE_EQ(context, NumDimensions(output), 1);
-
-  TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteUInt64);
-  TF_LITE_ENSURE_TYPES_EQ(context, scale_bits->type, kTfLiteInt32);
-  TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteUInt32);
-
-  micro_context->DeallocateTempTfLiteTensor(input);
-  micro_context->DeallocateTempTfLiteTensor(output);
-  micro_context->DeallocateTempTfLiteTensor(scale_bits);
-  return kTfLiteOk;
-}
-
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FilterBankSquareRootEval(TfLiteContext* context,
+                                      TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kInputTensor);
   const TfLiteEvalTensor* scale_bits =
@@ -83,7 +55,8 @@
 namespace tflm_signal {
 
 TFLMRegistration* Register_FILTER_BANK_SQUARE_ROOT() {
-  static TFLMRegistration r = tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  static TFLMRegistration r = tflite::micro::RegisterOp(
+      nullptr, FilterBankSquareRootPrepare, FilterBankSquareRootEval);
   return &r;
 }
 
diff --git a/signal/micro/kernels/filter_bank_square_root.h b/signal/micro/kernels/filter_bank_square_root.h
new file mode 100644
index 0000000..25b6779
--- /dev/null
+++ b/signal/micro/kernels/filter_bank_square_root.h
@@ -0,0 +1,27 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef SIGNAL_MICRO_KERNELS_FILTER_BANK_SQUARE_ROOT_H_
+#define SIGNAL_MICRO_KERNELS_FILTER_BANK_SQUARE_ROOT_H_
+
+#include "tensorflow/lite/c/common.h"
+
+namespace tflite {
+
+TfLiteStatus FilterBankSquareRootPrepare(TfLiteContext* context,
+                                         TfLiteNode* node);
+
+}  // namespace tflite
+
+#endif  // SIGNAL_MICRO_KERNELS_FILTER_BANK_SQUARE_ROOT_H_
diff --git a/signal/micro/kernels/filter_bank_square_root_common.cc b/signal/micro/kernels/filter_bank_square_root_common.cc
new file mode 100644
index 0000000..b430901
--- /dev/null
+++ b/signal/micro/kernels/filter_bank_square_root_common.cc
@@ -0,0 +1,56 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "signal/micro/kernels/filter_bank_square_root.h"
+#include "tensorflow/lite/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/kernels/kernel_util.h"
+
+namespace tflite {
+
+constexpr int kInputTensor = 0;
+constexpr int kScaleBitsTensor = 1;
+constexpr int kOutputTensor = 0;
+
+TfLiteStatus FilterBankSquareRootPrepare(TfLiteContext* context,
+                                         TfLiteNode* node) {
+  TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
+  TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
+
+  MicroContext* micro_context = GetMicroContext(context);
+
+  TfLiteTensor* input =
+      micro_context->AllocateTempInputTensor(node, kInputTensor);
+  TfLiteTensor* scale_bits =
+      micro_context->AllocateTempInputTensor(node, kScaleBitsTensor);
+  TfLiteTensor* output =
+      micro_context->AllocateTempOutputTensor(node, kOutputTensor);
+  TF_LITE_ENSURE(context, input != nullptr);
+  TF_LITE_ENSURE(context, scale_bits != nullptr);
+  TF_LITE_ENSURE(context, output != nullptr);
+
+  TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1);
+  TF_LITE_ENSURE_EQ(context, NumDimensions(scale_bits), 0);
+  TF_LITE_ENSURE_EQ(context, NumDimensions(output), 1);
+
+  TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteUInt64);
+  TF_LITE_ENSURE_TYPES_EQ(context, scale_bits->type, kTfLiteInt32);
+  TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteUInt32);
+
+  micro_context->DeallocateTempTfLiteTensor(input);
+  micro_context->DeallocateTempTfLiteTensor(output);
+  micro_context->DeallocateTempTfLiteTensor(scale_bits);
+  return kTfLiteOk;
+}
+
+}  // namespace tflite
diff --git a/signal/micro/kernels/framer.cc b/signal/micro/kernels/framer.cc
index 8437bd0..36f189c 100644
--- a/signal/micro/kernels/framer.cc
+++ b/signal/micro/kernels/framer.cc
@@ -48,7 +48,7 @@
   tflite::tflm_signal::CircularBuffer** circular_buffers;
 };
 
-void ResetState(TFLMSignalFramerParams* params) {
+void FramerResetState(TFLMSignalFramerParams* params) {
   for (int i = 0; i < params->outer_dims; ++i) {
     tflite::tflm_signal::CircularBufferReset(params->circular_buffers[i]);
     if (params->prefill) {
@@ -58,7 +58,7 @@
   }
 }
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* FramerInit(TfLiteContext* context, const char* buffer, size_t length) {
   const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
 
   auto* params =
@@ -76,7 +76,7 @@
   return params;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FramerPrepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
 
@@ -132,7 +132,7 @@
         capacity, params->state_buffers[i], state_size);
   }
 
-  ResetState(params);
+  FramerResetState(params);
 
   micro_context->DeallocateTempTfLiteTensor(input);
   micro_context->DeallocateTempTfLiteTensor(output);
@@ -141,7 +141,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FramerEval(TfLiteContext* context, TfLiteNode* node) {
   auto* params = reinterpret_cast<TFLMSignalFramerParams*>(node->user_data);
 
   const TfLiteEvalTensor* input =
@@ -181,8 +181,8 @@
   return kTfLiteOk;
 }
 
-void Reset(TfLiteContext* context, void* buffer) {
-  ResetState(static_cast<TFLMSignalFramerParams*>(buffer));
+void FramerReset(TfLiteContext* context, void* buffer) {
+  FramerResetState(static_cast<TFLMSignalFramerParams*>(buffer));
 }
 
 }  // namespace
@@ -190,8 +190,8 @@
 namespace tflm_signal {
 // TODO(b/286250473): remove namespace once de-duped libraries above
 TFLMRegistration* Register_FRAMER() {
-  static TFLMRegistration r =
-      tflite::micro::RegisterOp(Init, Prepare, Eval, nullptr, Reset);
+  static TFLMRegistration r = tflite::micro::RegisterOp(
+      FramerInit, FramerPrepare, FramerEval, nullptr, FramerReset);
   return &r;
 }
 }  // namespace tflm_signal
diff --git a/signal/micro/kernels/irfft.cc b/signal/micro/kernels/irfft.cc
index 6cb3d7c..b0d58d5 100644
--- a/signal/micro/kernels/irfft.cc
+++ b/signal/micro/kernels/irfft.cc
@@ -50,7 +50,7 @@
 
 template <typename T, size_t (*get_needed_memory_func)(int32_t),
           void* (*init_func)(int32_t, void*, size_t)>
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* IrfftInit(TfLiteContext* context, const char* buffer, size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
 
   auto* params = static_cast<TfLiteAudioFrontendIrfftParams*>(
@@ -79,7 +79,7 @@
 }
 
 template <TfLiteType TfLiteTypeEnum>
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus IrfftPrepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
 
@@ -113,7 +113,7 @@
 }
 
 template <typename T, void (*apply_func)(void*, const Complex<T>* input, T*)>
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus IrfftEval(TfLiteContext* context, TfLiteNode* node) {
   auto* params =
       reinterpret_cast<TfLiteAudioFrontendIrfftParams*>(node->user_data);
 
@@ -133,61 +133,61 @@
   return kTfLiteOk;
 }
 
-void* InitAll(TfLiteContext* context, const char* buffer, size_t length) {
+void* IrfftInitAll(TfLiteContext* context, const char* buffer, size_t length) {
   const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
   const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap();
   auto tensor_type = static_cast<tflite::TensorType>(m["T"].AsInt32());
 
   switch (tensor_type) {
     case TensorType_INT16: {
-      return Init<int16_t, tflm_signal::IrfftInt16GetNeededMemory,
-                  tflm_signal::IrfftInt16Init>(context, buffer, length);
+      return IrfftInit<int16_t, tflm_signal::IrfftInt16GetNeededMemory,
+                       tflm_signal::IrfftInt16Init>(context, buffer, length);
     }
     case TensorType_INT32: {
-      return Init<int32_t, tflm_signal::IrfftInt32GetNeededMemory,
-                  tflm_signal::IrfftInt32Init>(context, buffer, length);
+      return IrfftInit<int32_t, tflm_signal::IrfftInt32GetNeededMemory,
+                       tflm_signal::IrfftInt32Init>(context, buffer, length);
     }
     case TensorType_FLOAT32: {
-      return Init<float, tflm_signal::IrfftFloatGetNeededMemory,
-                  tflm_signal::IrfftFloatInit>(context, buffer, length);
+      return IrfftInit<float, tflm_signal::IrfftFloatGetNeededMemory,
+                       tflm_signal::IrfftFloatInit>(context, buffer, length);
     }
     default:
       return nullptr;
   }
 }
 
-TfLiteStatus PrepareAll(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus IrfftPrepareAll(TfLiteContext* context, TfLiteNode* node) {
   auto* params =
       reinterpret_cast<TfLiteAudioFrontendIrfftParams*>(node->user_data);
 
   switch (params->fft_type) {
     case kTfLiteInt16: {
-      return Prepare<kTfLiteInt16>(context, node);
+      return IrfftPrepare<kTfLiteInt16>(context, node);
     }
     case kTfLiteInt32: {
-      return Prepare<kTfLiteInt32>(context, node);
+      return IrfftPrepare<kTfLiteInt32>(context, node);
     }
     case kTfLiteFloat32: {
-      return Prepare<kTfLiteFloat32>(context, node);
+      return IrfftPrepare<kTfLiteFloat32>(context, node);
     }
     default:
       return kTfLiteError;
   }
 }
 
-TfLiteStatus EvalAll(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus IrfftEvalAll(TfLiteContext* context, TfLiteNode* node) {
   auto* params =
       reinterpret_cast<TfLiteAudioFrontendIrfftParams*>(node->user_data);
 
   switch (params->fft_type) {
     case kTfLiteInt16: {
-      return Eval<int16_t, tflm_signal::IrfftInt16Apply>(context, node);
+      return IrfftEval<int16_t, tflm_signal::IrfftInt16Apply>(context, node);
     }
     case kTfLiteInt32: {
-      return Eval<int32_t, tflm_signal::IrfftInt32Apply>(context, node);
+      return IrfftEval<int32_t, tflm_signal::IrfftInt32Apply>(context, node);
     }
     case kTfLiteFloat32: {
-      return Eval<float, tflm_signal::IrfftFloatApply>(context, node);
+      return IrfftEval<float, tflm_signal::IrfftFloatApply>(context, node);
     }
     default:
       return kTfLiteError;
@@ -201,28 +201,28 @@
 
 TFLMRegistration* Register_IRFFT() {
   static TFLMRegistration r =
-      tflite::micro::RegisterOp(InitAll, PrepareAll, EvalAll);
+      tflite::micro::RegisterOp(IrfftInitAll, IrfftPrepareAll, IrfftEvalAll);
   return &r;
 }
 
 TFLMRegistration* Register_IRFFT_FLOAT() {
   static TFLMRegistration r = tflite::micro::RegisterOp(
-      Init<float, IrfftFloatGetNeededMemory, IrfftFloatInit>,
-      Prepare<kTfLiteFloat32>, Eval<float, IrfftFloatApply>);
+      IrfftInit<float, IrfftFloatGetNeededMemory, IrfftFloatInit>,
+      IrfftPrepare<kTfLiteFloat32>, IrfftEval<float, IrfftFloatApply>);
   return &r;
 }
 
 TFLMRegistration* Register_IRFFT_INT16() {
   static TFLMRegistration r = tflite::micro::RegisterOp(
-      Init<int16_t, IrfftInt16GetNeededMemory, IrfftInt16Init>,
-      Prepare<kTfLiteInt16>, Eval<int16_t, IrfftInt16Apply>);
+      IrfftInit<int16_t, IrfftInt16GetNeededMemory, IrfftInt16Init>,
+      IrfftPrepare<kTfLiteInt16>, IrfftEval<int16_t, IrfftInt16Apply>);
   return &r;
 }
 
 TFLMRegistration* Register_IRFFT_INT32() {
   static TFLMRegistration r = tflite::micro::RegisterOp(
-      Init<int32_t, IrfftInt32GetNeededMemory, IrfftInt32Init>,
-      Prepare<kTfLiteInt32>, Eval<int32_t, IrfftInt32Apply>);
+      IrfftInit<int32_t, IrfftInt32GetNeededMemory, IrfftInt32Init>,
+      IrfftPrepare<kTfLiteInt32>, IrfftEval<int32_t, IrfftInt32Apply>);
   return &r;
 }
 
diff --git a/signal/micro/kernels/overlap_add.cc b/signal/micro/kernels/overlap_add.cc
index 8de7463..c365cd8 100644
--- a/signal/micro/kernels/overlap_add.cc
+++ b/signal/micro/kernels/overlap_add.cc
@@ -48,14 +48,15 @@
 };
 
 template <typename T>
-void ResetState(TFLMSignalOverlapAddParams<T>* params) {
+void OverlapAddResetState(TFLMSignalOverlapAddParams<T>* params) {
   for (int i = 0; i < params->outer_dims; i++) {
     memset(params->state_buffers[i], 0, sizeof(T) * params->frame_size);
   }
 }
 
 template <typename T>
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* OverlapAddInit(TfLiteContext* context, const char* buffer,
+                     size_t length) {
   const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
 
   auto* params = static_cast<TFLMSignalOverlapAddParams<T>*>(
@@ -73,7 +74,7 @@
 }
 
 template <typename T, TfLiteType TfLiteTypeEnum>
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus OverlapAddPrepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
 
@@ -112,7 +113,7 @@
         static_cast<T*>(context->AllocatePersistentBuffer(
             context, params->frame_size * sizeof(T)));
   }
-  ResetState(params);
+  OverlapAddResetState(params);
 
   micro_context->DeallocateTempTfLiteTensor(input);
   micro_context->DeallocateTempTfLiteTensor(output);
@@ -120,7 +121,7 @@
 }
 
 template <typename T>
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus OverlapAddEval(TfLiteContext* context, TfLiteNode* node) {
   auto* params =
       reinterpret_cast<TFLMSignalOverlapAddParams<T>*>(node->user_data);
   const TfLiteEvalTensor* input =
@@ -144,69 +145,70 @@
 }
 
 template <typename T>
-void Reset(TfLiteContext* context, void* buffer) {
-  ResetState(static_cast<TFLMSignalOverlapAddParams<T>*>(buffer));
+void OverlapAddReset(TfLiteContext* context, void* buffer) {
+  OverlapAddResetState(static_cast<TFLMSignalOverlapAddParams<T>*>(buffer));
 }
 
-void* InitAll(TfLiteContext* context, const char* buffer, size_t length) {
+void* OverlapAddInitAll(TfLiteContext* context, const char* buffer,
+                        size_t length) {
   const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
   const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap();
   auto tensor_type = static_cast<tflite::TensorType>(m["T"].AsInt32());
 
   switch (tensor_type) {
     case TensorType_INT16: {
-      return Init<int16_t>(context, buffer, length);
+      return OverlapAddInit<int16_t>(context, buffer, length);
     }
     case TensorType_FLOAT32: {
-      return Init<float>(context, buffer, length);
+      return OverlapAddInit<float>(context, buffer, length);
     }
     default:
       return nullptr;
   }
 }
 
-TfLiteStatus PrepareAll(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus OverlapAddPrepareAll(TfLiteContext* context, TfLiteNode* node) {
   auto* params =
       reinterpret_cast<TFLMSignalOverlapAddParams<void>*>(node->user_data);
 
   switch (params->type) {
     case kTfLiteInt16: {
-      return Prepare<int16_t, kTfLiteInt16>(context, node);
+      return OverlapAddPrepare<int16_t, kTfLiteInt16>(context, node);
     }
     case kTfLiteFloat32: {
-      return Prepare<float, kTfLiteFloat32>(context, node);
+      return OverlapAddPrepare<float, kTfLiteFloat32>(context, node);
     }
     default:
       return kTfLiteError;
   }
 }
 
-TfLiteStatus EvalAll(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus OverlapAddEvalAll(TfLiteContext* context, TfLiteNode* node) {
   auto* params =
       reinterpret_cast<TFLMSignalOverlapAddParams<void>*>(node->user_data);
 
   switch (params->type) {
     case kTfLiteInt16: {
-      return Eval<int16_t>(context, node);
+      return OverlapAddEval<int16_t>(context, node);
     }
     case kTfLiteFloat32: {
-      return Eval<float>(context, node);
+      return OverlapAddEval<float>(context, node);
     }
     default:
       return kTfLiteError;
   }
 }
 
-void ResetAll(TfLiteContext* context, void* buffer) {
+void OverlapAddResetAll(TfLiteContext* context, void* buffer) {
   auto* params = reinterpret_cast<TFLMSignalOverlapAddParams<void>*>(buffer);
 
   switch (params->type) {
     case kTfLiteInt16: {
-      Reset<int16_t>(context, buffer);
+      OverlapAddReset<int16_t>(context, buffer);
       break;
     }
     case kTfLiteFloat32: {
-      Reset<float>(context, buffer);
+      OverlapAddReset<float>(context, buffer);
       break;
     }
     default:
@@ -218,22 +220,23 @@
 
 namespace tflm_signal {
 TFLMRegistration* Register_OVERLAP_ADD() {
-  static TFLMRegistration r = tflite::micro::RegisterOp(
-      InitAll, PrepareAll, EvalAll, nullptr, ResetAll);
+  static TFLMRegistration r =
+      tflite::micro::RegisterOp(OverlapAddInitAll, OverlapAddPrepareAll,
+                                OverlapAddEvalAll, nullptr, OverlapAddResetAll);
   return &r;
 }
 
 TFLMRegistration* Register_OVERLAP_ADD_FLOAT() {
-  static TFLMRegistration r =
-      tflite::micro::RegisterOp(Init<float>, Prepare<float, kTfLiteFloat32>,
-                                Eval<float>, nullptr, Reset<float>);
+  static TFLMRegistration r = tflite::micro::RegisterOp(
+      OverlapAddInit<float>, OverlapAddPrepare<float, kTfLiteFloat32>,
+      OverlapAddEval<float>, nullptr, OverlapAddReset<float>);
   return &r;
 }
 
 TFLMRegistration* Register_OVERLAP_ADD_INT16() {
-  static TFLMRegistration r =
-      tflite::micro::RegisterOp(Init<int16_t>, Prepare<int16_t, kTfLiteInt16>,
-                                Eval<int16_t>, nullptr, Reset<int16_t>);
+  static TFLMRegistration r = tflite::micro::RegisterOp(
+      OverlapAddInit<int16_t>, OverlapAddPrepare<int16_t, kTfLiteInt16>,
+      OverlapAddEval<int16_t>, nullptr, OverlapAddReset<int16_t>);
   return &r;
 }
 }  // namespace tflm_signal
diff --git a/signal/micro/kernels/pcan.cc b/signal/micro/kernels/pcan.cc
new file mode 100644
index 0000000..9473e1b
--- /dev/null
+++ b/signal/micro/kernels/pcan.cc
@@ -0,0 +1,135 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "signal/src/pcan_argc_fixed.h"
+#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
+#include "tensorflow/lite/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/flatbuffer_utils.h"
+#include "tensorflow/lite/micro/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/memory_helpers.h"
+#include "tensorflow/lite/micro/micro_context.h"
+
+namespace tflite {
+namespace tflm_signal {
+// TODO(b/286250473): remove namespace once de-duped libraries above
+
+constexpr int kInputTensor = 0;
+constexpr int kNoiseEstimateTensor = 1;
+constexpr int kGainLutTensor = 2;
+constexpr int kOutputTensor = 0;
+
+// Indices into the init flexbuffer's vector.
+// The parameter's name is in the comment that follows.
+// Elements in the vectors are ordered alphabetically by parameter name.
+constexpr int kSnrShiftIndex = 0;  // 'snr_shift'
+
+struct TfLitePcanParams {
+  int snr_shift;
+};
+
+void* PcanInit(TfLiteContext* context, const char* buffer, size_t length) {
+  auto* params = static_cast<TfLitePcanParams*>(
+      context->AllocatePersistentBuffer(context, sizeof(TfLitePcanParams)));
+
+  tflite::FlexbufferWrapper fbw(reinterpret_cast<const uint8_t*>(buffer),
+                                length);
+  params->snr_shift = fbw.ElementAsInt32(kSnrShiftIndex);
+  return params;
+}
+
+TfLiteStatus PcanPrepare(TfLiteContext* context, TfLiteNode* node) {
+  TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
+  TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
+
+  MicroContext* micro_context = GetMicroContext(context);
+
+  TfLiteTensor* input =
+      micro_context->AllocateTempInputTensor(node, kInputTensor);
+  TF_LITE_ENSURE(context, input != nullptr);
+  TfLiteTensor* noise_estimate =
+      micro_context->AllocateTempInputTensor(node, kNoiseEstimateTensor);
+  TF_LITE_ENSURE(context, noise_estimate != nullptr);
+  TfLiteTensor* gain_lut =
+      micro_context->AllocateTempInputTensor(node, kGainLutTensor);
+  TF_LITE_ENSURE(context, gain_lut != nullptr);
+  TfLiteTensor* output =
+      micro_context->AllocateTempOutputTensor(node, kOutputTensor);
+  TF_LITE_ENSURE(context, output != nullptr);
+
+  TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1);
+  TF_LITE_ENSURE_EQ(context, NumDimensions(noise_estimate), 1);
+  TF_LITE_ENSURE_EQ(context, NumDimensions(gain_lut), 1);
+  TF_LITE_ENSURE_EQ(context, NumDimensions(output), 1);
+
+  TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteUInt32);
+  TF_LITE_ENSURE_TYPES_EQ(context, noise_estimate->type, kTfLiteUInt32);
+  TF_LITE_ENSURE_TYPES_EQ(context, gain_lut->type, kTfLiteInt16);
+  TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteUInt32);
+
+  micro_context->DeallocateTempTfLiteTensor(input);
+  micro_context->DeallocateTempTfLiteTensor(output);
+  micro_context->DeallocateTempTfLiteTensor(noise_estimate);
+  micro_context->DeallocateTempTfLiteTensor(gain_lut);
+  return kTfLiteOk;
+}
+
+TfLiteStatus PcanEval(TfLiteContext* context, TfLiteNode* node) {
+  auto* params = reinterpret_cast<TfLitePcanParams*>(node->user_data);
+
+  const TfLiteEvalTensor* input =
+      tflite::micro::GetEvalInput(context, node, kInputTensor);
+  TF_LITE_ENSURE(context, input != nullptr);
+  const TfLiteEvalTensor* noise_estimate =
+      tflite::micro::GetEvalInput(context, node, kNoiseEstimateTensor);
+  TF_LITE_ENSURE(context, noise_estimate != nullptr);
+  const TfLiteEvalTensor* gain_lut =
+      tflite::micro::GetEvalInput(context, node, kGainLutTensor);
+  TF_LITE_ENSURE(context, gain_lut != nullptr);
+  TfLiteEvalTensor* output =
+      tflite::micro::GetEvalOutput(context, node, kOutputTensor);
+  TF_LITE_ENSURE(context, output != nullptr);
+
+  const uint32_t* input_data = tflite::micro::GetTensorData<uint32_t>(input);
+  const uint32_t* noise_estimate_data =
+      tflite::micro::GetTensorData<uint32_t>(noise_estimate);
+  const int16_t* gain_lut_data =
+      tflite::micro::GetTensorData<int16_t>(gain_lut);
+  uint32_t* output_data = tflite::micro::GetTensorData<uint32_t>(output);
+
+  int num_channels = input->dims->data[0];
+
+  size_t output_byte_size;
+  TF_LITE_ENSURE_OK(
+      context, tflite::TfLiteEvalTensorByteLength(output, &output_byte_size));
+
+  memcpy(output_data, input_data, output_byte_size);
+
+  tflite::tflm_signal::ApplyPcanAutoGainControlFixed(
+      gain_lut_data, params->snr_shift, noise_estimate_data, output_data,
+      num_channels);
+  return kTfLiteOk;
+}
+
+TFLMRegistration* Register_PCAN() {
+  static TFLMRegistration r =
+      tflite::micro::RegisterOp(PcanInit, PcanPrepare, PcanEval);
+  return &r;
+}
+
+}  // namespace tflm_signal
+}  // namespace tflite
diff --git a/signal/micro/kernels/pcan_flexbuffers_generated_data.cc b/signal/micro/kernels/pcan_flexbuffers_generated_data.cc
new file mode 100644
index 0000000..5fc9a9f
--- /dev/null
+++ b/signal/micro/kernels/pcan_flexbuffers_generated_data.cc
@@ -0,0 +1,7 @@
+#include "signal/micro/kernels/pcan_flexbuffers_generated_data.h"
+
+const int g_gen_data_size_snr_shift_6_test = 20;
+const unsigned char g_gen_data_snr_shift_6_test[] = {
+    0x73, 0x6e, 0x72, 0x5f, 0x73, 0x68, 0x69, 0x66, 0x74, 0x00,
+    0x01, 0x0b, 0x01, 0x01, 0x01, 0x06, 0x04, 0x02, 0x24, 0x01,
+};
diff --git a/signal/micro/kernels/pcan_flexbuffers_generated_data.h b/signal/micro/kernels/pcan_flexbuffers_generated_data.h
new file mode 100644
index 0000000..32b4cd7
--- /dev/null
+++ b/signal/micro/kernels/pcan_flexbuffers_generated_data.h
@@ -0,0 +1,7 @@
+#ifndef SIGNAL_MICRO_KERNELS_PCAN_FLEXBUFFERS_GENERATED_DATA_H_
+#define SIGNAL_MICRO_KERNELS_PCAN_FLEXBUFFERS_GENERATED_DATA_H_
+
+extern const int g_gen_data_size_snr_shift_6_test;
+extern const unsigned char g_gen_data_snr_shift_6_test[];
+
+#endif  // SIGNAL_MICRO_KERNELS_PCAN_FLEXBUFFERS_GENERATED_DATA_H_
diff --git a/signal/micro/kernels/pcan_test.cc b/signal/micro/kernels/pcan_test.cc
new file mode 100644
index 0000000..aecb5d1
--- /dev/null
+++ b/signal/micro/kernels/pcan_test.cc
@@ -0,0 +1,132 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "signal/micro/kernels/pcan_flexbuffers_generated_data.h"
+#include "tensorflow/lite/micro/kernels/kernel_runner.h"
+#include "tensorflow/lite/micro/test_helpers.h"
+#include "tensorflow/lite/micro/testing/micro_test.h"
+
+namespace tflite {
+namespace tflm_signal {
+namespace {
+
+TfLiteStatus TestPCAN(const unsigned char* init_data, int init_data_size,
+                      int* input_dims_data, const uint32_t* input_data,
+                      int* noise_estimate_dims_data,
+                      const uint32_t* noise_estimate_data,
+                      int* gain_lut_dims_data, const int16_t* gain_lut_data,
+                      int* output_dims_data, const uint32_t* golden,
+                      uint32_t* output_data) {
+  TfLiteIntArray* input_dims =
+      ::tflite::testing::IntArrayFromInts(input_dims_data);
+  TfLiteIntArray* noise_estimate_dims =
+      ::tflite::testing::IntArrayFromInts(noise_estimate_dims_data);
+  TfLiteIntArray* gain_lut_dims =
+      ::tflite::testing::IntArrayFromInts(gain_lut_dims_data);
+  TfLiteIntArray* output_dims =
+      ::tflite::testing::IntArrayFromInts(output_dims_data);
+  const int output_len = ElementCount(*output_dims);
+  constexpr int kInputsSize = 3;
+  constexpr int kOutputsSize = 1;
+  constexpr int kTensorsSize = kInputsSize + kOutputsSize;
+  TfLiteTensor tensors[kTensorsSize] = {
+      tflite::testing::CreateTensor(input_data, input_dims),
+      tflite::testing::CreateTensor(noise_estimate_data, noise_estimate_dims),
+      tflite::testing::CreateTensor(gain_lut_data, gain_lut_dims),
+      tflite::testing::CreateTensor(output_data, output_dims),
+  };
+  int inputs_array_data[] = {3, 0, 1, 2};
+  TfLiteIntArray* inputs_array =
+      ::tflite::testing::IntArrayFromInts(inputs_array_data);
+  int outputs_array_data[] = {1, 3};
+  TfLiteIntArray* outputs_array =
+      ::tflite::testing::IntArrayFromInts(outputs_array_data);
+
+  const TFLMRegistration* registration = tflite::tflm_signal::Register_PCAN();
+  micro::KernelRunner runner(*registration, tensors, kTensorsSize, inputs_array,
+                             outputs_array,
+                             /*builtin_data=*/nullptr);
+
+  // TfLite uses a char* for the raw bytes whereas flexbuffers use an unsigned
+  // char*. This small discrepancy results in compiler warnings unless we
+  // reinterpret_cast right before passing in the flexbuffer bytes to the
+  // KernelRunner.
+  TfLiteStatus status = runner.InitAndPrepare(
+      reinterpret_cast<const char*>(init_data), init_data_size);
+  if (status != kTfLiteOk) {
+    return status;
+  }
+  status = runner.Invoke();
+  if (status != kTfLiteOk) {
+    return status;
+  }
+  for (int i = 0; i < output_len; ++i) {
+    TF_LITE_MICRO_EXPECT_EQ(golden[i], output_data[i]);
+  }
+  return kTfLiteOk;
+}
+
+}  // namespace
+}  // namespace tflm_signal
+}  // namespace tflite
+
+TF_LITE_MICRO_TESTS_BEGIN
+
+TF_LITE_MICRO_TEST(Mix1Ref1Case) {
+  int input_shape[] = {1, 40};
+  int noise_estimate_shape[] = {1, 40};
+  int gain_lut_shape[] = {1, 125};
+  int output_shape[] = {1, 40};
+  const uint32_t input[] = {286, 298, 305, 291, 290, 279, 273, 257, 250, 240,
+                            240, 233, 234, 230, 221, 205, 183, 159, 156, 188,
+                            239, 298, 345, 374, 380, 369, 359, 364, 372, 354,
+                            302, 243, 194, 135, 64,  72,  171, 245, 277, 304};
+  const uint32_t noise_estimate[] = {
+      7310, 18308, 7796, 17878, 7413, 17141, 6978, 15789, 6390, 14745,
+      6135, 14314, 5981, 14130, 5649, 12594, 4677, 9768,  3987, 11550,
+      6109, 18308, 8819, 22977, 9713, 22670, 9176, 22363, 9509, 21748,
+      7719, 14929, 4959, 8294,  1636, 4423,  4371, 15052, 7080, 18677};
+
+  const int16_t gain_lut[] = {
+      32636, 32633,  32630, -6,     0,     -21589, 32624, -12,    0,     -21589,
+      32612, -23,    -2,    -21589, 32587, -48,    0,     -21589, 32539, -96,
+      0,     -21589, 32443, -190,   0,     -21589, 32253, -378,   4,     -21589,
+      31879, -739,   18,    -21589, 31158, -1409,  62,    -21589, 29811, -2567,
+      202,   -21589, 27446, -4301,  562,   -21589, 23707, -6265,  1230,  -21589,
+      18672, -7458,  1952,  -21589, 13166, -7030,  2212,  -21589, 8348,  -5342,
+      1868,  -21589, 4874,  -3459,  1282,  -21589, 2697,  -2025,  774,   -21589,
+      1446,  -1120,  436,   -21589, 762,   -596,   232,   -21589, 398,   -313,
+      122,   -21589, 207,   -164,   64,    -21589, 107,   -85,    34,    -21589,
+      56,    -45,    18,    -21589, 29,    -22,    8,     -21589, 15,    -13,
+      6,     -21589, 8,     -8,     4,     -21589, 4,     -2,     0,     -21589,
+      2,     -3,     2,     -21589, 1,     0,      0,     -21589, 1,     -3,
+      2,     -21589, 0,     0,      0};
+
+  uint32_t output[40];
+  const uint32_t golden[] = {1301, 836, 1354, 827, 1312, 811, 1263, 779,
+                             1192, 753, 1160, 743, 1140, 738, 1096, 698,
+                             956,  607, 845,  667, 1157, 836, 1461, 912,
+                             1546, 908, 1496, 904, 1527, 895, 1346, 758,
+                             999,  548, 378,  344, 908,  761, 1274, 843};
+  memset(output, 0, sizeof(output));
+  TF_LITE_MICRO_EXPECT_EQ(
+      kTfLiteOk,
+      tflite::tflm_signal::TestPCAN(
+          g_gen_data_snr_shift_6_test, g_gen_data_size_snr_shift_6_test,
+          input_shape, input, noise_estimate_shape, noise_estimate,
+          gain_lut_shape, gain_lut, output_shape, golden, output));
+}
+
+TF_LITE_MICRO_TESTS_END
diff --git a/signal/micro/kernels/rfft.cc b/signal/micro/kernels/rfft.cc
index fccc6eb..c9472b0 100644
--- a/signal/micro/kernels/rfft.cc
+++ b/signal/micro/kernels/rfft.cc
@@ -48,12 +48,13 @@
   int32_t output_length;
   TfLiteType fft_type;
   T* work_area;
+  int scratch_buffer_index;
   int8_t* state;
 };
 
 template <typename T, size_t (*get_needed_memory_func)(int32_t),
           void* (*init_func)(int32_t, void*, size_t)>
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* RfftInit(TfLiteContext* context, const char* buffer, size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
 
   const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
@@ -65,9 +66,6 @@
   params->fft_length = fbw.ElementAsInt32(kFftLengthIndex);
   params->fft_type = typeToTfLiteType<T>();
 
-  params->work_area = static_cast<T*>(context->AllocatePersistentBuffer(
-      context, params->fft_length * sizeof(T)));
-
   size_t state_size = (*get_needed_memory_func)(params->fft_length);
   params->state = static_cast<int8_t*>(
       context->AllocatePersistentBuffer(context, state_size * sizeof(int8_t)));
@@ -76,7 +74,7 @@
 }
 
 template <typename T, TfLiteType TfLiteTypeEnum>
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus RfftPrepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
 
@@ -103,13 +101,15 @@
   params->output_length =
       output_shape.Dims(output_shape.DimensionsCount() - 1) / 2;
 
+  context->RequestScratchBufferInArena(context, params->fft_length * sizeof(T),
+                                       &params->scratch_buffer_index);
   micro_context->DeallocateTempTfLiteTensor(input);
   micro_context->DeallocateTempTfLiteTensor(output);
   return kTfLiteOk;
 }
 
 template <typename T, void (*apply_func)(void*, const T* input, Complex<T>*)>
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus RfftEval(TfLiteContext* context, TfLiteNode* node) {
   auto* params =
       reinterpret_cast<TfLiteAudioFrontendRfftParams<T>*>(node->user_data);
 
@@ -122,74 +122,76 @@
       tflite::micro::GetEvalOutput(context, node, kOutputTensor);
   Complex<T>* output_data = tflite::micro::GetTensorData<Complex<T>>(output);
 
+  T* work_area = static_cast<T*>(
+      context->GetScratchBuffer(context, params->scratch_buffer_index));
+
   for (int input_idx = 0, output_idx = 0; input_idx < params->input_size;
        input_idx += params->input_length, output_idx += params->output_length) {
-    memcpy(params->work_area, &input_data[input_idx],
-           sizeof(T) * params->input_length);
+    memcpy(work_area, &input_data[input_idx], sizeof(T) * params->input_length);
     // Zero pad input to FFT length
-    memset(&params->work_area[params->input_length], 0,
+    memset(&work_area[params->input_length], 0,
            sizeof(T) * (params->fft_length - params->input_length));
 
-    (*apply_func)(params->state, params->work_area, &output_data[output_idx]);
+    (*apply_func)(params->state, work_area, &output_data[output_idx]);
   }
   return kTfLiteOk;
 }
 
-void* InitAll(TfLiteContext* context, const char* buffer, size_t length) {
+void* RfftInitAll(TfLiteContext* context, const char* buffer, size_t length) {
   const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
   const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap();
   auto tensor_type = static_cast<tflite::TensorType>(m["T"].AsInt32());
 
   switch (tensor_type) {
     case TensorType_INT16: {
-      return Init<int16_t, ::tflm_signal::RfftInt16GetNeededMemory,
-                  ::tflm_signal::RfftInt16Init>(context, buffer, length);
+      return RfftInit<int16_t, ::tflm_signal::RfftInt16GetNeededMemory,
+                      ::tflm_signal::RfftInt16Init>(context, buffer, length);
     }
     case TensorType_INT32: {
-      return Init<int32_t, ::tflm_signal::RfftInt32GetNeededMemory,
-                  ::tflm_signal::RfftInt32Init>(context, buffer, length);
+      return RfftInit<int32_t, ::tflm_signal::RfftInt32GetNeededMemory,
+                      ::tflm_signal::RfftInt32Init>(context, buffer, length);
     }
     case TensorType_FLOAT32: {
-      return Init<float, ::tflm_signal::RfftFloatGetNeededMemory,
-                  ::tflm_signal::RfftFloatInit>(context, buffer, length);
+      return RfftInit<float, ::tflm_signal::RfftFloatGetNeededMemory,
+                      ::tflm_signal::RfftFloatInit>(context, buffer, length);
     }
     default:
       return nullptr;
   }
 }
 
-TfLiteStatus PrepareAll(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus RfftPrepareAll(TfLiteContext* context, TfLiteNode* node) {
   auto* params =
       reinterpret_cast<TfLiteAudioFrontendRfftParams<void>*>(node->user_data);
 
   switch (params->fft_type) {
     case kTfLiteInt16: {
-      return Prepare<int16_t, kTfLiteInt16>(context, node);
+      return RfftPrepare<int16_t, kTfLiteInt16>(context, node);
     }
     case kTfLiteInt32: {
-      return Prepare<int32_t, kTfLiteInt32>(context, node);
+      return RfftPrepare<int32_t, kTfLiteInt32>(context, node);
     }
     case kTfLiteFloat32: {
-      return Prepare<float, kTfLiteFloat32>(context, node);
+      return RfftPrepare<float, kTfLiteFloat32>(context, node);
     }
     default:
       return kTfLiteError;
   }
 }
 
-TfLiteStatus EvalAll(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus RfftEvalAll(TfLiteContext* context, TfLiteNode* node) {
   auto* params =
       reinterpret_cast<TfLiteAudioFrontendRfftParams<void>*>(node->user_data);
 
   switch (params->fft_type) {
     case kTfLiteInt16: {
-      return Eval<int16_t, ::tflm_signal::RfftInt16Apply>(context, node);
+      return RfftEval<int16_t, ::tflm_signal::RfftInt16Apply>(context, node);
     }
     case kTfLiteInt32: {
-      return Eval<int32_t, ::tflm_signal::RfftInt32Apply>(context, node);
+      return RfftEval<int32_t, ::tflm_signal::RfftInt32Apply>(context, node);
     }
     case kTfLiteFloat32: {
-      return Eval<float, ::tflm_signal::RfftFloatApply>(context, node);
+      return RfftEval<float, ::tflm_signal::RfftFloatApply>(context, node);
     }
     default:
       return kTfLiteError;
@@ -202,34 +204,34 @@
 
 TFLMRegistration* Register_RFFT() {
   static TFLMRegistration r =
-      tflite::micro::RegisterOp(InitAll, PrepareAll, EvalAll);
+      tflite::micro::RegisterOp(RfftInitAll, RfftPrepareAll, RfftEvalAll);
   return &r;
 }
 
 TFLMRegistration* Register_RFFT_FLOAT() {
   static TFLMRegistration r = tflite::micro::RegisterOp(
-      Init<float, ::tflm_signal::RfftFloatGetNeededMemory,
-           ::tflm_signal::RfftFloatInit>,
-      Prepare<float, kTfLiteFloat32>,
-      Eval<float, ::tflm_signal::RfftFloatApply>);
+      RfftInit<float, ::tflm_signal::RfftFloatGetNeededMemory,
+               ::tflm_signal::RfftFloatInit>,
+      RfftPrepare<float, kTfLiteFloat32>,
+      RfftEval<float, ::tflm_signal::RfftFloatApply>);
   return &r;
 }
 
 TFLMRegistration* Register_RFFT_INT16() {
   static TFLMRegistration r = tflite::micro::RegisterOp(
-      Init<int16_t, ::tflm_signal::RfftInt16GetNeededMemory,
-           ::tflm_signal::RfftInt16Init>,
-      Prepare<int16_t, kTfLiteInt16>,
-      Eval<int16_t, ::tflm_signal::RfftInt16Apply>);
+      RfftInit<int16_t, ::tflm_signal::RfftInt16GetNeededMemory,
+               ::tflm_signal::RfftInt16Init>,
+      RfftPrepare<int16_t, kTfLiteInt16>,
+      RfftEval<int16_t, ::tflm_signal::RfftInt16Apply>);
   return &r;
 }
 
 TFLMRegistration* Register_RFFT_INT32() {
   static TFLMRegistration r = tflite::micro::RegisterOp(
-      Init<int32_t, ::tflm_signal::RfftInt32GetNeededMemory,
-           ::tflm_signal::RfftInt32Init>,
-      Prepare<int32_t, kTfLiteInt32>,
-      Eval<int32_t, ::tflm_signal::RfftInt32Apply>);
+      RfftInit<int32_t, ::tflm_signal::RfftInt32GetNeededMemory,
+               ::tflm_signal::RfftInt32Init>,
+      RfftPrepare<int32_t, kTfLiteInt32>,
+      RfftEval<int32_t, ::tflm_signal::RfftInt32Apply>);
   return &r;
 }
 
diff --git a/signal/micro/kernels/stacker.cc b/signal/micro/kernels/stacker.cc
index 42a2ee6..fc1a4a3 100644
--- a/signal/micro/kernels/stacker.cc
+++ b/signal/micro/kernels/stacker.cc
@@ -52,7 +52,7 @@
   tflm_signal::CircularBuffer* circular_buffer;
 };
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* StackerInit(TfLiteContext* context, const char* buffer, size_t length) {
   const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
 
   auto* params =
@@ -88,7 +88,7 @@
   return params;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus StackerPrepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
 
@@ -118,7 +118,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus StackerEval(TfLiteContext* context, TfLiteNode* node) {
   auto* params = reinterpret_cast<TFLMSignalStackerParams*>(node->user_data);
   TF_LITE_ENSURE(context, params != nullptr);
 
@@ -157,7 +157,7 @@
   return kTfLiteOk;
 }
 
-void Reset(TfLiteContext* context, void* buffer) {
+void StackerReset(TfLiteContext* context, void* buffer) {
   auto* params = static_cast<TFLMSignalStackerParams*>(buffer);
   tflm_signal::CircularBufferReset(params->circular_buffer);
   params->stacker_has_first_frame = false;
@@ -167,8 +167,8 @@
 
 namespace tflm_signal {
 TFLMRegistration* Register_STACKER() {
-  static TFLMRegistration r =
-      tflite::micro::RegisterOp(Init, Prepare, Eval, /*Free*/ nullptr, Reset);
+  static TFLMRegistration r = tflite::micro::RegisterOp(
+      StackerInit, StackerPrepare, StackerEval, /*Free*/ nullptr, StackerReset);
   return &r;
 }
 }  // namespace tflm_signal
diff --git a/signal/micro/kernels/window.cc b/signal/micro/kernels/window.cc
index e850898..cd9c462 100644
--- a/signal/micro/kernels/window.cc
+++ b/signal/micro/kernels/window.cc
@@ -41,7 +41,7 @@
   int32_t input_size;
 };
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* WindowInit(TfLiteContext* context, const char* buffer, size_t length) {
   const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
 
   auto* params =
@@ -53,7 +53,7 @@
   return params;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus WindowPrepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
 
@@ -87,7 +87,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus WindowEval(TfLiteContext* context, TfLiteNode* node) {
   auto* params = reinterpret_cast<TFLMSignalWindowParams*>(node->user_data);
 
   const TfLiteEvalTensor* input =
@@ -114,7 +114,8 @@
 namespace tflm_signal {
 
 TFLMRegistration* Register_WINDOW() {
-  static TFLMRegistration r = tflite::micro::RegisterOp(Init, Prepare, Eval);
+  static TFLMRegistration r =
+      tflite::micro::RegisterOp(WindowInit, WindowPrepare, WindowEval);
   return &r;
 }
 
diff --git a/signal/micro/kernels/xtensa/fft_auto_scale_kernel.cc b/signal/micro/kernels/xtensa/fft_auto_scale_kernel.cc
new file mode 100644
index 0000000..fbd739b
--- /dev/null
+++ b/signal/micro/kernels/xtensa/fft_auto_scale_kernel.cc
@@ -0,0 +1,103 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "signal/micro/kernels/fft_auto_scale_kernel.h"
+
+#include <math.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "signal/src/fft_auto_scale.h"
+#include "signal/src/max_abs.h"
+#include "signal/src/msb.h"
+#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
+#include "tensorflow/lite/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/micro_context.h"
+
+#if XCHAL_HAVE_HIFI3
+#include <xtensa/tie/xt_hifi3.h>
+namespace {
+// Implementation for DSPs that support the Hifi3 ISA. Bit exact with the
+// portable version below.
+int XtensaFftAutoScale(const int16_t* input, int size, int16_t* output) {
+  const int16_t max = tflite::tflm_signal::MaxAbs16(input, size);
+  int scale_bits = (sizeof(int16_t) * 8) -
+                   tflite::tflm_signal::MostSignificantBit32(max) - 1;
+  int i;
+  if (scale_bits > 0) {
+    const ae_int16x4* input_16x4_ptr =
+        reinterpret_cast<const ae_int16x4*>(input);
+    ae_int16x4* output_16x4_ptr = reinterpret_cast<ae_int16x4*>(output);
+    const int num_iterations = ((size + 3) >> 2);
+    for (i = 0; i < num_iterations; ++i) {
+      ae_int16x4 input_16x4;
+      AE_L16X4_IP(input_16x4, input_16x4_ptr, 8);
+      ae_f16x4 input_f16x4 = *reinterpret_cast<ae_f16x4*>(&input_16x4);
+      input_f16x4 = AE_SLAA16S(input_f16x4, scale_bits);
+      input_16x4 = *reinterpret_cast<ae_int16x4*>(&input_f16x4);
+      AE_S16X4_IP(input_16x4, output_16x4_ptr, 8);
+    }
+  } else {
+    memcpy(output, input, size * sizeof(output[0]));
+    scale_bits = 0;
+  }
+  return scale_bits;
+}
+}  // namespace
+#endif
+
+namespace tflite {
+namespace {
+
+constexpr int kInputTensor = 0;
+constexpr int kOutputTensor = 0;
+constexpr int kScaleBitTensor = 1;
+
+TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+  const TfLiteEvalTensor* input =
+      tflite::micro::GetEvalInput(context, node, kInputTensor);
+  TfLiteEvalTensor* output =
+      tflite::micro::GetEvalOutput(context, node, kOutputTensor);
+  TfLiteEvalTensor* scale_bit =
+      tflite::micro::GetEvalOutput(context, node, kScaleBitTensor);
+
+  const int16_t* input_data = tflite::micro::GetTensorData<int16_t>(input);
+  int16_t* output_data = tflite::micro::GetTensorData<int16_t>(output);
+  int32_t* scale_bit_data = tflite::micro::GetTensorData<int32_t>(scale_bit);
+
+#if XCHAL_HAVE_HIFI3
+  *scale_bit_data =
+      XtensaFftAutoScale(input_data, output->dims->data[0], output_data);
+#else
+  *scale_bit_data =
+      tflm_signal::FftAutoScale(input_data, output->dims->data[0], output_data);
+#endif
+  return kTfLiteOk;
+}
+
+}  // namespace
+
+// TODO(b/286250473): remove namespace once de-duped libraries
+namespace tflm_signal {
+
+TFLMRegistration* Register_FFT_AUTO_SCALE() {
+  static TFLMRegistration r =
+      tflite::micro::RegisterOp(nullptr, FftAutoScalePrepare, Eval);
+  return &r;
+}
+
+}  // namespace tflm_signal
+}  // namespace tflite
diff --git a/signal/micro/kernels/xtensa/filter_bank_square_root.cc b/signal/micro/kernels/xtensa/filter_bank_square_root.cc
new file mode 100644
index 0000000..60e4119
--- /dev/null
+++ b/signal/micro/kernels/xtensa/filter_bank_square_root.cc
@@ -0,0 +1,72 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "signal/micro/kernels/filter_bank_square_root.h"
+
+#include <stdint.h>
+
+#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
+#include "tensorflow/lite/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/memory_helpers.h"
+#include "tensorflow/lite/micro/micro_utils.h"
+
+// Defined in square_root.S
+extern "C" uint32_t xtensa_sqrt_64(const uint64_t num);
+
+namespace tflite {
+namespace {
+
+constexpr int kInputTensor = 0;
+constexpr int kScaleBitsTensor = 1;
+constexpr int kOutputTensor = 0;
+
+void ApplyFilterbankSqrt(const uint64_t* input, int num_channels,
+                         int scale_down_bits, uint32_t* output) {
+  for (int i = 0; i < num_channels; ++i) {
+    output[i] = xtensa_sqrt_64(input[i]) >> scale_down_bits;
+  }
+}
+
+TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+  const TfLiteEvalTensor* input =
+      tflite::micro::GetEvalInput(context, node, kInputTensor);
+  const TfLiteEvalTensor* scale_bits =
+      tflite::micro::GetEvalInput(context, node, kScaleBitsTensor);
+  TfLiteEvalTensor* output =
+      tflite::micro::GetEvalOutput(context, node, kOutputTensor);
+
+  const uint64_t* input_data = tflite::micro::GetTensorData<uint64_t>(input);
+  const int32_t* scale_bits_data =
+      tflite::micro::GetTensorData<int32_t>(scale_bits);
+  uint32_t* output_data = tflite::micro::GetTensorData<uint32_t>(output);
+  int32_t num_channels = input->dims->data[0];
+  ApplyFilterbankSqrt(input_data, num_channels, *scale_bits_data, output_data);
+  return kTfLiteOk;
+}
+
+}  // namespace
+
+namespace tflm_signal {
+
+TFLMRegistration* Register_FILTER_BANK_SQUARE_ROOT() {
+  static TFLMRegistration r =
+      tflite::micro::RegisterOp(nullptr, FilterBankSquareRootPrepare, Eval);
+  return &r;
+}
+
+}  // namespace tflm_signal
+
+}  // namespace tflite
diff --git a/signal/micro/kernels/xtensa/xtensa_square_root.S b/signal/micro/kernels/xtensa/xtensa_square_root.S
new file mode 100644
index 0000000..054d72f
--- /dev/null
+++ b/signal/micro/kernels/xtensa/xtensa_square_root.S
@@ -0,0 +1,400 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+.section .note.GNU-stack,"",@progbits
+
+#include "xtensa/config/core-isa.h"
+
+#ifdef __XTENSA_CALL0_ABI__
+#define NO_REGISTER_WINDOW (1)
+#endif
+
+#if XCHAL_HAVE_WINDOWED == 0
+#define NO_REGISTER_WINDOW
+#endif
+
+// Since the 64 bit sqrt jumps into the middle of the 32 bit sqrt under certain
+// conditions, both functions should reserve the same amount of stack space.
+#define XTENSA_SQRT_STACK_SIZE 32
+
+.text
+.type xtensa_sqrt_64, @function
+.align 4
+.global xtensa_sqrt_64
+
+// Make macros for our 64 bit functions, since we don't have a carry/borrow bit
+// in the base ISA, these take up way more cycles than they should. These are
+// the "preferred instruction idioms" from 8.9.2 of the base ISA manual. Since
+// these macros define a jump (and I couldn't find a way to be clever and use
+// something like __LINE__/__FILE__ to define these automatically, you may also
+// have to provide an 'opname' that contains a unique string to define a label
+// for the macro.
+
+// dest must not be the same as num2, or this function will not work!
+#define ADD_64(dest, num1, num2, opname) \
+  add.n dest##_low, num1##_low, num2##_low; \
+  add.n dest##_high, num1##_high, num2##_high; \
+  bgeu dest##_low, num2##_low, .add_64_jump_##opname; \
+  addi.n dest##_high, dest##_high, 1; \
+  .add_64_jump_##opname:
+
+// All three registers must be unique, or this function will not work!
+#define SUB_64(dest, num1, num2, opname) \
+  sub dest##_low, num1##_low, num2##_low; \
+  sub dest##_high, num1##_high, num2##_high; \
+  bgeu num1##_low, num2##_low, .sub_64_jump_##opname; \
+  addi.n dest##_high, dest##_high, -1; \
+  .sub_64_jump_##opname:
+
+#define SRLI_64(dest, val, imm) \
+  slli scratch4, val##_high, (32 - imm); \
+  srli dest##_high, val##_high, imm; \
+  srli dest##_low, val##_low, imm; \
+  or dest##_low, dest##_low, scratch4;
+
+#define COND_MOV_64(op, dest, val, test) \
+  mov##op dest##_low, val##_low, test; \
+  mov##op dest##_high, val##_high, test
+
+#define num_low a2
+#define num_high a3
+#define bit_low a4
+#define bit_high a5
+#define res_low a6
+#define res_high a7
+#define temp1_low a8
+#define temp1_high a9
+#define temp2_low a10
+#define temp2_high a11
+#define scratch1 a12
+#define scratch2 a13
+#define scratch3 a14
+#define scratch4 a15
+#define temp3_low scratch1
+#define temp3_high scratch2
+
+.align 4
+xtensa_sqrt_64:
+#ifdef NO_REGISTER_WINDOW
+addi.n a1, a1, -XTENSA_SQRT_STACK_SIZE
+s32i.n a0, a1, 4
+s32i.n a11, a1, 8
+s32i.n a12, a1, 12
+s32i.n a13, a1, 16
+s32i.n a14, a1, 20
+s32i.n a15, a1, 24
+#else
+entry a1, XTENSA_SQRT_STACK_SIZE
+#endif
+// In the event that the upper word of the number is all zero, we can just
+// pretend that we're doing a 32 bit sqrt (but the rounding condition at the
+// end is slightly different, so we've got a bit of an anomly there. Such is
+// life)
+beqz.n num_high, .xtensa_sqrt_32_start
+// ** uint64 res= 0;
+movi.n res_low, 0
+movi.n res_high, 0
+
+movi.n scratch2, 1
+
+// Setup 'bit' - first we need to know what bit to set it to.
+// ** int max_bit_number = 64 - MostSignificantBit_64(num);
+movi.n bit_low, 0
+nsau scratch1, num_high
+
+// ** max_bit_number |= 1;
+or scratch1, scratch2, scratch1
+
+// The ammount we shift by is 31 - what's in scratch1 for the max bit number.
+// This is because we've got the two words, so we can't do a 64 bit shift.
+movi.n scratch3, 31
+sub scratch1, scratch3, scratch1
+
+// Do the shift
+// ** uint32 bit = 1 << (63 - max_bit_number);
+ssl scratch1
+sll bit_high, scratch2
+
+// Figure out how many iterations we're going to need. However, we already have
+// 31 - max_bit_number in scratch1, so just add 32 to that.
+// ** int iterations = (63 - max_bit_number) / 2 + 1;
+addi.n scratch1, scratch1, 32
+srli scratch1, scratch1, 1
+add scratch1, scratch1, scratch2
+
+// If the number of iterations is equal to 32, this means that we're likely in
+// an overflow spot if we try and do a subtraction (since the upper most bit is
+// going to be set since the bit had to be shifted up so high). We have to do
+// one iteration of the loop where we use the pipeline destroying branch call
+// that can compare two unsigned numbers. If we need less than 32 iterations,
+// we can skip this slow path and jump to the tight inner loop.
+blti scratch1, 32, .xtensa_sqrt_64_inner_loop_start
+
+// Cache bit + res.
+ADD_64(temp1, bit, res, temp1_bit_res)
+// Since we've stored a copy of bit + res, we can right shift res (since both
+// branches of the conditional are going to need it, one branch just needs to
+// perform an extra addition).
+// ** res <<= 1;
+SRLI_64(res, res, 1);
+
+// ** if (num >= res_plus_bit) {
+bltu num_high, temp1_high, .xtensa_sqrt_64_branch_skip
+bne num_high, temp1_high, .xtensa_sqrt_64_comparison_failed
+bltu num_low, temp1_low, .xtensa_sqrt_64_branch_skip
+.xtensa_sqrt_64_comparison_failed:
+
+// **   num -= res + bit;
+SUB_64(temp2, num, temp1, temp2_num_temp1_early_branch)
+// Since the sub can't use the same registers, we have to move it back to where
+// it belongs.
+mov.n num_low, temp2_low
+mov.n num_high, temp2_high
+// **   res += bit;
+ADD_64(res, res, bit, res_res_bit_early_branch)
+// ** }
+.xtensa_sqrt_64_branch_skip:
+
+// ** bit >>= 2;
+SRLI_64(bit, bit, 2)
+// Make sure we knock off this iteration when we fall into the inner loop.
+sub scratch1, scratch1, scratch2
+
+.xtensa_sqrt_64_inner_loop_start:
+loop scratch1, .xtensa_sqrt_64_round
+
+// We don't have enough registers to be as verbose as the 32 bit version, so
+// this version is not as easy to read. Instead of having the two operations in
+// the same style of conditional move, we sort of decide to do both branches at
+// the same time of the if, then fix up what was incorrect at the end.
+SRLI_64(temp1, res, 1)
+ADD_64(res, res, bit, res_res_bit)
+
+SUB_64(temp2, num, res, num_res_temp2)
+ADD_64(res, temp1, bit, res_temp1_bit)
+
+COND_MOV_64(gez, num, temp2, temp2_high)
+COND_MOV_64(ltz, res, temp1, temp2_high)
+
+// ** bit >>= 2;
+SRLI_64(bit, bit, 2)
+
+.xtensa_sqrt_64_round:
+
+// Need to do if (num > res) { ++res; }, but we'll do it with conditional moves
+// again. Except we're going to do it slightly backwards, since we need to move
+// the result into the num register to be returned. We'll do this by setting
+// the return value to res + 1, but in the event that it was a mistake, we'll
+// conditionally move the raw result back into place.
+SUB_64(temp1, res, num, res_num_temp1)
+addi.n num_low, res_low, 1
+movgez num_low, res_low, temp1_high
+
+// But we may have overflowed num_low - set it back to result_low if it's been
+// zeroed out.
+moveqz num_low, res_low, num_low
+
+#ifdef NO_REGISTER_WINDOW
+l32i.n a0, a1, 4
+l32i.n a11, a1, 8
+l32i.n a12, a1, 12
+l32i.n a13, a1, 16
+l32i.n a14, a1, 20
+l32i.n a15, a1, 24
+addi a1, a1, XTENSA_SQRT_STACK_SIZE
+ret.n
+#else
+retw.n
+#endif
+.xtensa_sqrt_64_end:
+  .size xtensa_sqrt_64, . - xtensa_sqrt_64
+
+
+#undef ADD_64
+#undef SUB_64
+#undef SRLI_64
+#undef COND_MOV_64
+
+#undef num_low
+#undef num_high
+#undef bit_low
+#undef bit_high
+#undef res_low
+#undef res_high
+#undef temp1_low
+#undef temp1_high
+#undef temp2_low
+#undef temp2_high
+#undef scratch1
+#undef scratch2
+#undef scratch3
+#undef scratch4
+#undef temp3_low
+#undef temp3_high
+.text
+.type xtensa_sqrt_32, @function
+.align 4
+.global xtensa_sqrt_32
+
+// Make the program more readable...
+#define num a2
+#define bit a4
+#define res a5
+#define one a6
+#define max_bit_number a7
+#define iterations max_bit_number
+#define bit_plus_res a8
+#define num_minus_bit_plus_res a9
+#define res_shift_left_plus_bit a10
+#define res_minus_num res_shift_left_plus_bit
+
+xtensa_sqrt_32:
+#ifdef NO_REGISTER_WINDOW
+addi.n a1, a1, -XTENSA_SQRT_STACK_SIZE
+s32i.n a0, a1, 4
+s32i.n a11, a1, 8
+s32i.n a12, a1, 12
+s32i.n a13, a1, 16
+s32i.n a14, a1, 20
+s32i.n a15, a1, 24
+#else
+entry a1, XTENSA_SQRT_STACK_SIZE
+#endif
+
+.xtensa_sqrt_32_start:
+// If the number is zero, just quickly exit without doing anything.
+beqz.n num, .xtensa_sqrt_32_return
+
+// ** uint32 res = 0;
+movi.n res, 0
+// Also, setup the handy constant we need a few times.
+movi.n one, 1
+
+// This will give us (32 - index of the first bit that is set).
+// ** int max_bit_number = 32 - MostSignificantBit_32(num);
+nsau max_bit_number, num
+
+// ** max_bit_number |= one;
+or max_bit_number, max_bit_number, one
+
+// The ammount we shift by is 31 - what we stored in max_bit_number.
+movi.n a15, 31
+sub max_bit_number, a15, max_bit_number
+
+// Do the shift.
+// ** uint32 bit = 1 << (31 - max_bit_number);
+ssl max_bit_number
+sll bit, one
+
+// Compute the number of iterations we're going to need.
+// ** int iterations = (31 - max_bit_number) / 2 + 1;
+srli iterations, max_bit_number, 1
+add iterations, iterations, one
+
+// If the number of iterations is equal to 16, this means that we're likely in
+// an overflow spot if we try and do a subtraction (since the upper most bit is
+// going to be set since the bit had to be shifted up so high). We have to do
+// one iteration of the loop where we use the pipeline destroying branch call
+// that can compare two unsigned numbers. If we need less than 16 iterations,
+// we can skip this slow path and jump to the tight inner loop.
+blti iterations, 16, .xtensa_sqrt_32_inner_loop_start
+
+// Cache bit + res into another register.
+add.n bit_plus_res, bit, res
+// Since we've stored a copy of bit + res, we can right shift res (since both
+// branches of the conditional are going to need it, one branch just needs to
+// perform an extra addition).
+// ** res <<= 1;
+srli res, res, 1
+// ** if (num >= res_plus_bit) {
+bltu num, bit_plus_res, .xtensa_sqrt_32_branch_skip
+// **   num -= res + bit;
+sub num, num, bit_plus_res
+// **   res += bit;
+add res, res, bit
+// ** }
+.xtensa_sqrt_32_branch_skip:
+
+// ** bit >>= 2;
+srli bit, bit, 2
+// Make sure we knock off this iteration when we fall into the inner loop.
+sub iterations, iterations, one
+
+.xtensa_sqrt_32_inner_loop_start:
+// Start a zero overhead loop for the number of remaining iterations.
+loop iterations, .xtensa_sqrt_32_round
+
+// Cache bit + res into another register.
+add.n bit_plus_res, bit, res
+// ** res <<= 1;
+srli res, res, 1
+
+// We can dodge a hefty branch penalty by doing conditional moves - so we need
+// to compute the values for num and res for what would happen if we took the
+// if part of the condition. If the condition is true, then we'll copy stuff
+// across.
+
+// compute num - bit_plus_res. We can use this for the conditional check
+// against zero.
+sub num_minus_bit_plus_res, num, bit_plus_res
+// compute the shifted res + bit.
+add res_shift_left_plus_bit, res, bit
+
+// Copy stuff if the condition is true.
+movgez num, num_minus_bit_plus_res, num_minus_bit_plus_res
+movgez res, res_shift_left_plus_bit, num_minus_bit_plus_res
+
+// ** bit >>= 2;
+srli bit, bit, 2
+
+.xtensa_sqrt_32_round:
+
+// Need to do if (num > res) { ++res; }, but we'll do it with conditional moves
+// again. Except we're going to do it slightly backwards, since we need to move
+// the result into the num register to be returned. We'll do this by setting
+// the return value to res + 1, but in the event that it was a mistake, we'll
+// conditionally move the raw result back into place.
+sub res_minus_num, res, num
+add.n num, res, one
+movgez num, res, res_minus_num
+
+// But we might have also pooched the rounding by adding an extra bit, make sure
+// we don't explode when we overflow.
+clamps num, num, 16
+
+.xtensa_sqrt_32_return:
+#ifdef NO_REGISTER_WINDOW
+l32i.n a0, a1, 4
+l32i.n a11, a1, 8
+l32i.n a12, a1, 12
+l32i.n a13, a1, 16
+l32i.n a14, a1, 20
+l32i.n a15, a1, 24
+addi a1, a1, XTENSA_SQRT_STACK_SIZE
+ret.n
+#else
+retw.n
+#endif
+
+#undef num
+#undef bit
+#undef res
+#undef one
+#undef max_bit_number
+#undef iterations
+#undef bit_plus_res
+#undef num_minus_bit_plus_res
+#undef res_shift_left_plus_bit
+#undef res_minus_num
diff --git a/signal/src/BUILD b/signal/src/BUILD
index c09f608..e5c1060 100644
--- a/signal/src/BUILD
+++ b/signal/src/BUILD
@@ -142,3 +142,13 @@
         ":square_root_64",
     ],
 )
+
+cc_library(
+    name = "pcan_argc_fixed",
+    srcs = ["pcan_argc_fixed.cc"],
+    hdrs = ["pcan_argc_fixed.h"],
+    deps = [
+        ":msb_32",
+        "//tensorflow/lite/kernels/internal:compatibility",
+    ],
+)
diff --git a/signal/src/max_abs.cc b/signal/src/max_abs.cc
index 365938d..0ad117a 100644
--- a/signal/src/max_abs.cc
+++ b/signal/src/max_abs.cc
@@ -15,11 +15,58 @@
 
 #include "signal/src/max_abs.h"
 
+#if defined(XTENSA)
+#include <stdint.h>
+#include <xtensa/tie/xt_misc.h>
+#endif
+#if XCHAL_HAVE_HIFI3
+#include <xtensa/tie/xt_hifi3.h>
+static inline ae_p24x2s MaxAbs16Single(ae_p24x2s max, ae_p24x2s current) {
+  return AE_MAXABSSP24S(max, current);
+}
+#elif XCHAL_HAVE_HIFI_MINI || XCHAL_HAVE_HIFI2 || XCHAL_HAVE_HIFI_EP
+#include <xtensa/tie/xt_hifi2.h>
+static inline ae_p24x2s MaxAbs16Single(ae_p24x2s max, ae_p24x2s current) {
+  current = AE_ABSSP24S(current);
+  return AE_MAXP24S(max, current);
+}
+#endif
+
 // TODO(b/286250473): remove namespace once de-duped libraries
 namespace tflite {
 namespace tflm_signal {
 
+#if XCHAL_HAVE_HIFI_MINI || XCHAL_HAVE_HIFI2 || XCHAL_HAVE_HIFI_EP || \
+    XCHAL_HAVE_HIFI3
+int16_t XtensaMaxAbs16(const int16_t* input, int size) {
+  int i;
+  ae_p24x2s current_24x2;
+  // AE_LP16X2F_IU() effectively pre-increments the address in input_16x2 by 4
+  //  bytes before loading, so we need to initialize it accordingly.
+  const ae_p16x2s* input_16x2 = (const ae_p16x2s*)(input - 2);
+  ae_p24x2s max = AE_ZEROP48();
+  const int num_iterations = size / 2;
+  for (i = 0; i < num_iterations; i++) {
+    // Advancing the pointer by 2 X 16-bits.
+    AE_LP16X2F_IU(current_24x2, input_16x2, 4);
+    max = MaxAbs16Single(max, current_24x2);
+  }
+  if (size & 1) {  // n is odd
+    // Advancing the pointer by 2 X 16-bits.
+    current_24x2 = AE_LP16F_I((ae_p16s*)input_16x2, 4);
+    max = MaxAbs16Single(max, current_24x2);
+  }
+  const int max_L = AE_TRUNCA16P24S_L(max);
+  const int max_H = AE_TRUNCA16P24S_H(max);
+  return (max_L >= max_H) ? max_L : max_H;
+}
+#endif
+
 int16_t MaxAbs16(const int16_t* input, int size) {
+#if XCHAL_HAVE_HIFI_MINI || XCHAL_HAVE_HIFI2 || XCHAL_HAVE_HIFI_EP || \
+    XCHAL_HAVE_HIFI3
+  return XtensaMaxAbs16(input, size);
+#else
   int16_t max = 0;
   for (int i = 0; i < size; i++) {
     const int16_t value = input[i];
@@ -30,6 +77,8 @@
     }
   }
   return max;
+#endif
 }
+
 }  // namespace tflm_signal
 }  // namespace tflite
diff --git a/signal/src/msb_32.cc b/signal/src/msb_32.cc
index 6f12dc4..67f2664 100644
--- a/signal/src/msb_32.cc
+++ b/signal/src/msb_32.cc
@@ -15,13 +15,21 @@
 
 #include "signal/src/msb.h"
 
+#if defined(XTENSA)
+#include <xtensa/tie/xt_misc.h>
+#endif
+
 namespace tflite {
 namespace tflm_signal {
 // TODO(b/286250473): remove namespace once de-duped libraries above
 
 // TODO(b/291167350):  can allow __builtin_clz to be used in more cases here
 uint32_t MostSignificantBit32(uint32_t x) {
-#if defined(__GNUC__)
+#if defined(XTENSA)
+  // XT_NSAU returns the number of left shifts needed to put the MSB in the
+  // leftmost position. Returns 32 if the argument is 0.
+  return 32 - XT_NSAU(x);
+#elif defined(__GNUC__)
   if (x) {
     return 32 - __builtin_clz(x);
   }
diff --git a/signal/src/msb_64.cc b/signal/src/msb_64.cc
index 233985c..7416438 100644
--- a/signal/src/msb_64.cc
+++ b/signal/src/msb_64.cc
@@ -15,12 +15,25 @@
 
 #include "signal/src/msb.h"
 
+#if defined(XTENSA)
+#include <xtensa/tie/xt_misc.h>
+#endif
+
 namespace tflite {
 namespace tflm_signal {
 // TODO(b/286250473): remove namespace once de-duped libraries above
 
 uint32_t MostSignificantBit64(uint64_t x) {
-#if defined(__GNUC__)
+#if defined(XTENSA)
+  // XT_NSAU returns the number of left shifts needed to put the MSB in the
+  // leftmost position. Returns 32 if the argument is 0.
+  uint32_t upper = 64 - XT_NSAU((uint32_t)(x >> 32));
+  if (upper != 32) {
+    return upper;
+  }
+  // Only if the upper bits are all clear do we want to look at the lower bits.
+  return 32 - XT_NSAU((uint32_t)x);
+#elif defined(__GNUC__)
   if (x) {
     return 64 - __builtin_clzll(x);
   }
diff --git a/signal/src/pcan_argc_fixed.cc b/signal/src/pcan_argc_fixed.cc
new file mode 100644
index 0000000..2700b28
--- /dev/null
+++ b/signal/src/pcan_argc_fixed.cc
@@ -0,0 +1,75 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "pcan_argc_fixed.h"
+
+namespace tflite {
+namespace tflm_signal {
+
+int16_t WideDynamicFunction(const uint32_t x, const int16_t* lut) {
+  if (x <= 2) {
+    return lut[x];
+  }
+
+  const int16_t interval = MostSignificantBit32(x);
+  lut += 4 * interval - 6;
+
+  const int16_t frac =
+      ((interval < 11) ? (x << (11 - interval)) : (x >> (interval - 11))) &
+      0x3FF;
+
+  int32_t result = ((int32_t)lut[2] * frac) >> 5;
+  result += (int32_t)((uint32_t)lut[1] << 5);
+  result *= frac;
+  result = (result + (1 << 14)) >> 15;
+  result += lut[0];
+  return (int16_t)result;
+}
+
+// Evaluate the piecewise polynomial "shrink" function defined by
+//   shrink(x) = x^2 / 4  for x < 2,
+//   shrink(x) = x - 1    for x >= 2.
+// The input x has kPcanSnrBits fractional bits, and the output has
+// kPcanOutputBits fractional bits.
+uint32_t PcanShrink(const uint32_t x) {
+  TFLITE_DCHECK(kPcanSnrBits >= kPcanOutputBits);
+  if (x < (2 << kPcanSnrBits)) {
+    // Compute x^2 / 4.
+    return (x * x) >> (2 + 2 * kPcanSnrBits - kPcanOutputBits);
+  } else {
+    // Compute x - 1.
+    return (x >> (kPcanSnrBits - kPcanOutputBits)) - (1 << kPcanOutputBits);
+  }
+}
+
+void ApplyPcanAutoGainControlFixed(const int16_t* gain_lut, int32_t snr_shift,
+                                   const uint32_t* noise_estimate,
+                                   uint32_t* filterbank_output,
+                                   int num_channels) {
+  int i;
+  for (i = 0; i < num_channels; ++i) {
+    // The gain has gain_bits fractional bits, and filterbank_output[i] has
+    // -input_correction_bits fractional bits. The product is shifted so that
+    // the resulting snr has kPcanSnrBits fractional bits.
+    const uint32_t gain = WideDynamicFunction(noise_estimate[i], gain_lut);
+    const uint32_t snr = ((uint64_t)filterbank_output[i] * gain) >> snr_shift;
+    // Result has kPcanOutputBits fractional bits.
+    // NOTE: This assumes filterbank_output_scale = 1 << kPcanOutputBits.
+    filterbank_output[i] = PcanShrink(snr);
+  }
+}
+
+}  // namespace tflm_signal
+}  // namespace tflite
diff --git a/signal/src/pcan_argc_fixed.h b/signal/src/pcan_argc_fixed.h
new file mode 100644
index 0000000..36eaf3d
--- /dev/null
+++ b/signal/src/pcan_argc_fixed.h
@@ -0,0 +1,41 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef SIGNAL_MICRO_KERNELS__SRC_PCAN_AGC_FIXED_H
+#define SIGNAL_MICRO_KERNELS__SRC_PCAN_AGC_FIXED_H
+#include <cstdint>
+
+#include "msb.h"
+#include "tensorflow/lite/kernels/internal/compatibility.h"
+
+namespace tflite {
+namespace tflm_signal {
+
+#define kPcanSnrBits 12
+#define kPcanOutputBits 6
+
+int16_t WideDynamicFunction(const uint32_t x, const int16_t* lut);
+
+uint32_t PcanShrink(const uint32_t x);
+
+void ApplyPcanAutoGainControlFixed(const int16_t* gain_lut, int32_t snr_shift,
+                                   const uint32_t* noise_estimate,
+                                   uint32_t* filterbank_output,
+                                   int num_channels);
+
+}  // namespace tflm_signal
+}  // namespace tflite
+
+#endif  // SIGNAL_MICRO_KERNELS__PCAN_AGC_FIXED_H
diff --git a/signal/tensorflow_core/kernels/BUILD b/signal/tensorflow_core/kernels/BUILD
index 3b7d1be..33f5d08 100644
--- a/signal/tensorflow_core/kernels/BUILD
+++ b/signal/tensorflow_core/kernels/BUILD
@@ -66,6 +66,15 @@
 )
 
 tflm_signal_kernel_library(
+    name = "pcan_kernel",
+    srcs = ["pcan_kernel.cc"],
+    deps = [
+        "//signal/src:pcan_argc_fixed",
+        "@tensorflow_cc_deps//:cc_library",
+    ],
+)
+
+tflm_signal_kernel_library(
     name = "stacker_kernel",
     srcs = ["stacker_kernel.cc"],
     deps = [
diff --git a/signal/tensorflow_core/kernels/pcan_kernel.cc b/signal/tensorflow_core/kernels/pcan_kernel.cc
new file mode 100644
index 0000000..a1adde0
--- /dev/null
+++ b/signal/tensorflow_core/kernels/pcan_kernel.cc
@@ -0,0 +1,53 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "signal/src/pcan_argc_fixed.h"
+#include "tensorflow/core/framework/op_kernel.h"
+#include "tensorflow/core/framework/types.h"
+
+namespace tensorflow {
+namespace signal {
+
+class PcanOp : public tensorflow::OpKernel {
+ public:
+  explicit PcanOp(tensorflow::OpKernelConstruction* context)
+      : tensorflow::OpKernel(context) {
+    OP_REQUIRES_OK(context, context->GetAttr("snr_shift", &snr_shift_));
+  }
+
+  void Compute(tensorflow::OpKernelContext* context) override {
+    tensorflow::Tensor* output_tensor = nullptr;
+    const uint32_t* input = context->input(0).flat<uint32_t>().data();
+    const uint32_t* noise_estimate = context->input(1).flat<uint32_t>().data();
+    const int16_t* gain_lut = context->input(2).flat<int16_t>().data();
+    int32_t num_channels = context->input(0).NumElements();
+    OP_REQUIRES_OK(context,
+                   context->allocate_output(0, {num_channels}, &output_tensor));
+    uint32_t* output = output_tensor->flat<uint32_t>().data();
+
+    memcpy(output, input, sizeof(uint32_t) * num_channels);
+    tflite::tflm_signal::ApplyPcanAutoGainControlFixed(
+        gain_lut, snr_shift_, noise_estimate, output, num_channels);
+  }
+
+ private:
+  int snr_shift_;
+};
+
+REGISTER_KERNEL_BUILDER(Name("SignalPCAN").Device(tensorflow::DEVICE_CPU),
+                        PcanOp);
+
+}  // namespace signal
+}  // namespace tensorflow
diff --git a/signal/tensorflow_core/ops/BUILD b/signal/tensorflow_core/ops/BUILD
index 185c593..8ef47d8 100644
--- a/signal/tensorflow_core/ops/BUILD
+++ b/signal/tensorflow_core/ops/BUILD
@@ -54,6 +54,14 @@
 )
 
 tflm_signal_kernel_library(
+    name = "pcan_op",
+    srcs = ["pcan_op.cc"],
+    deps = [
+        "@tensorflow_cc_deps//:cc_library",
+    ],
+)
+
+tflm_signal_kernel_library(
     name = "stacker_op",
     srcs = ["stacker_op.cc"],
     deps = [
diff --git a/signal/tensorflow_core/ops/delay_op.cc b/signal/tensorflow_core/ops/delay_op.cc
index dfee3d2..fb816a6 100644
--- a/signal/tensorflow_core/ops/delay_op.cc
+++ b/signal/tensorflow_core/ops/delay_op.cc
@@ -22,11 +22,11 @@
 namespace tensorflow {
 namespace signal {
 
-Status DelayShape(InferenceContext* c) {
+absl::Status DelayShape(InferenceContext* c) {
   ShapeHandle out;
   TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &out));
   c->set_output(0, out);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 // TODO(b/286250473): change back name after name clash resolved
diff --git a/signal/tensorflow_core/ops/energy_op.cc b/signal/tensorflow_core/ops/energy_op.cc
index 9ca74ed..d4f7cb7 100644
--- a/signal/tensorflow_core/ops/energy_op.cc
+++ b/signal/tensorflow_core/ops/energy_op.cc
@@ -22,7 +22,7 @@
 namespace tensorflow {
 namespace signal {
 
-Status EnergyShape(InferenceContext* c) {
+absl::Status EnergyShape(InferenceContext* c) {
   ShapeHandle out;
 
   TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &out));
@@ -30,7 +30,7 @@
 
   TF_RETURN_IF_ERROR(c->ReplaceDim(out, 0, c->MakeDim(length), &out));
   c->set_output(0, out);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 // TODO(b/286250473): change back name after name clash resolved
diff --git a/signal/tensorflow_core/ops/fft_ops.cc b/signal/tensorflow_core/ops/fft_ops.cc
index 4f52a2b..63d8e78 100644
--- a/signal/tensorflow_core/ops/fft_ops.cc
+++ b/signal/tensorflow_core/ops/fft_ops.cc
@@ -22,7 +22,7 @@
 namespace tensorflow {
 namespace signal {
 
-Status RfftShape(InferenceContext* c) {
+absl::Status RfftShape(InferenceContext* c) {
   ShapeHandle out;
   int fft_length;
   TF_RETURN_IF_ERROR(c->GetAttr<int>("fft_length", &fft_length));
@@ -30,17 +30,17 @@
   auto dim = ((fft_length / 2) + 1) * 2;  // * 2 for complex
   TF_RETURN_IF_ERROR(c->ReplaceDim(out, -1, c->MakeDim(dim), &out));
   c->set_output(0, out);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status IrfftShape(InferenceContext* c) {
+absl::Status IrfftShape(InferenceContext* c) {
   ShapeHandle out;
   int fft_length;
   TF_RETURN_IF_ERROR(c->GetAttr<int>("fft_length", &fft_length));
   TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &out));
   TF_RETURN_IF_ERROR(c->ReplaceDim(out, -1, c->MakeDim(fft_length), &out));
   c->set_output(0, out);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 // TODO(b/286250473): change back name after name clash resolved
@@ -107,7 +107,7 @@
       TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &out));
       c->set_output(0, out);
       c->set_output(1, c->Scalar());
-      return OkStatus();
+      return absl::OkStatus();
     })
     .Doc(R"doc(
 Shifts the input left until the amplitude is maximized without clipping. Returns
diff --git a/signal/tensorflow_core/ops/filter_bank_ops.cc b/signal/tensorflow_core/ops/filter_bank_ops.cc
index de16715..7e021ff 100644
--- a/signal/tensorflow_core/ops/filter_bank_ops.cc
+++ b/signal/tensorflow_core/ops/filter_bank_ops.cc
@@ -22,7 +22,7 @@
 namespace tensorflow {
 namespace signal {
 
-Status FilterBankShape(InferenceContext* c) {
+absl::Status FilterBankShape(InferenceContext* c) {
   ShapeHandle out;
   shape_inference::DimensionHandle unused;
   int num_channels;
@@ -49,7 +49,7 @@
       c->WithValue(c->Dim(c->input(5), 0), num_channels + 1, &unused));
   TF_RETURN_IF_ERROR(c->ReplaceDim(out, 0, c->MakeDim(num_channels), &out));
   c->set_output(0, out);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 // TODO(b/286250473): change back name after name clash resolved
@@ -92,7 +92,7 @@
       ShapeHandle out;
       TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &out));
       c->set_output(0, out);
-      return OkStatus();
+      return absl::OkStatus();
     })
     .Doc(R"doc(
 Applies a square root to each element in the input then shift right by
@@ -122,7 +122,7 @@
       TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &out));
       c->set_output(0, out);
       c->set_output(1, out);
-      return OkStatus();
+      return absl::OkStatus();
     })
     .Doc(R"doc(
 Applies spectral subtraction to a filter bank output of size num_channels
@@ -152,7 +152,7 @@
       ShapeHandle out;
       TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &out));
       c->set_output(0, out);
-      return OkStatus();
+      return absl::OkStatus();
     })
     .Doc(R"doc(
 Applies natural log to each element in input with pre-shift and post scaling.
diff --git a/signal/tensorflow_core/ops/framer_op.cc b/signal/tensorflow_core/ops/framer_op.cc
index ebac6e7..bf77699 100644
--- a/signal/tensorflow_core/ops/framer_op.cc
+++ b/signal/tensorflow_core/ops/framer_op.cc
@@ -22,7 +22,7 @@
 namespace tensorflow {
 namespace signal {
 
-Status FramerShape(InferenceContext* c) {
+absl::Status FramerShape(InferenceContext* c) {
   ShapeHandle unused;
   ShapeHandle in;
   int frame_step, frame_size;
@@ -41,7 +41,7 @@
   TF_RETURN_IF_ERROR(c->ReplaceDim(out, -1, c->MakeDim(frame_size), &out));
   c->set_output(0, out);
   c->set_output(1, c->Scalar());
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 // TODO(b/286250473): change back name after name clash resolved
diff --git a/signal/tensorflow_core/ops/overlap_add_op.cc b/signal/tensorflow_core/ops/overlap_add_op.cc
index 759faa9..74ccb1f 100644
--- a/signal/tensorflow_core/ops/overlap_add_op.cc
+++ b/signal/tensorflow_core/ops/overlap_add_op.cc
@@ -22,7 +22,7 @@
 namespace tensorflow {
 namespace signal {
 
-Status OverlapAddShape(InferenceContext* c) {
+absl::Status OverlapAddShape(InferenceContext* c) {
   shape_inference::DimensionHandle unused;
   ShapeHandle in;
   ShapeHandle out;
@@ -39,7 +39,7 @@
         c->ReplaceDim(out, -1, c->MakeDim(n_frames * frame_step), &out));
   }
   c->set_output(0, out);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 // TODO(b/286250473): change back name after name clash resolved
diff --git a/signal/tensorflow_core/ops/pcan_op.cc b/signal/tensorflow_core/ops/pcan_op.cc
new file mode 100644
index 0000000..2a8490f
--- /dev/null
+++ b/signal/tensorflow_core/ops/pcan_op.cc
@@ -0,0 +1,60 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/framework/op.h"
+#include "tensorflow/core/framework/shape_inference.h"
+
+using ::tensorflow::shape_inference::InferenceContext;
+using ::tensorflow::shape_inference::ShapeHandle;
+
+namespace tensorflow {
+namespace signal {
+
+namespace {
+
+absl::Status PcanShape(InferenceContext* c) {
+  ShapeHandle out, lut;
+  TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &out));
+  TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &out));
+  TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &lut));
+
+  c->set_output(0, out);
+  return absl::OkStatus();
+}
+
+}  // namespace
+
+REGISTER_OP("SignalPCAN")
+    .Attr("snr_shift: int")
+    .Input("input: uint32")
+    .Input("noise_estimate: uint32")
+    .Input("gain_lut: int16")
+    .Output("output: uint32")
+    .SetShapeFn(PcanShape)
+    .Doc(R"doc(
+Determines whether per-channel amplitude-normalized (PCAN) auto gain control is
+applied, using either floating-point or fixed-point computation. If enabled,
+the dynamic range of the filterbank output is compressed by dividing by a power
+of the noise estimate.
+
+input: A 1-D array of mel-spectrum subband filter bank outputs.
+noise_estimate: A 1-D array of mel-spectrun subbabd noise estimates.
+gain_lut: A 1-D lookup table for gain calculation.
+output: A 1-D array of processed subband filter bank.
+snr_shift: Amount of right shift when calculcating the SNR.
+)doc");
+
+}  // namespace signal
+}  // namespace tensorflow
diff --git a/signal/tensorflow_core/ops/stacker_op.cc b/signal/tensorflow_core/ops/stacker_op.cc
index 2aa69c0..5fd2aca 100644
--- a/signal/tensorflow_core/ops/stacker_op.cc
+++ b/signal/tensorflow_core/ops/stacker_op.cc
@@ -22,7 +22,7 @@
 namespace tensorflow {
 namespace signal {
 
-Status StackerShape(InferenceContext* c) {
+absl::Status StackerShape(InferenceContext* c) {
   int num_channels, stacker_left_context, stacker_right_context;
   TF_RETURN_IF_ERROR(c->GetAttr<int>("num_channels", &num_channels));
   TF_RETURN_IF_ERROR(
@@ -41,7 +41,7 @@
       c->ReplaceDim(out, 0, c->MakeDim(num_channels * output_frames), &out));
   c->set_output(0, out);
   c->set_output(1, c->Scalar());
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 // TODO(b/286250473): change back name after name clash resolved
diff --git a/signal/tensorflow_core/ops/window_op.cc b/signal/tensorflow_core/ops/window_op.cc
index 24e51b2..5a08da8 100644
--- a/signal/tensorflow_core/ops/window_op.cc
+++ b/signal/tensorflow_core/ops/window_op.cc
@@ -22,7 +22,7 @@
 namespace tensorflow {
 namespace signal {
 
-Status WindowShape(InferenceContext* c) {
+absl::Status WindowShape(InferenceContext* c) {
   ShapeHandle out;
   TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &out));
   TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &out));
@@ -33,7 +33,7 @@
   TF_RETURN_IF_ERROR(c->WithValue(c->Dim(c->input(1), 0),
                                   InferenceContext::Value(dim_in), &dim_in));
   c->set_output(0, out);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 // TODO(b/286250473): change back name to "Window" after name clash resolved
@@ -55,4 +55,4 @@
 )doc");
 
 }  // namespace signal
-}  // namespace tensorflow
\ No newline at end of file
+}  // namespace tensorflow
diff --git a/tensorflow/compiler/mlir/lite/kernels/internal/BUILD b/tensorflow/compiler/mlir/lite/kernels/internal/BUILD
new file mode 100644
index 0000000..4ebbc21
--- /dev/null
+++ b/tensorflow/compiler/mlir/lite/kernels/internal/BUILD
@@ -0,0 +1,10 @@
+package(
+    default_visibility = ["//visibility:public"],
+    licenses = ["notice"],
+)
+
+cc_library(
+    name = "compatibility_macros",
+    hdrs = ["compatibility_macros.h"],
+    deps = ["//tensorflow/lite/kernels/internal:compatibility"],
+)
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.cc b/tensorflow/compiler/mlir/lite/kernels/internal/compatibility_macros.h
similarity index 60%
copy from tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.cc
copy to tensorflow/compiler/mlir/lite/kernels/internal/compatibility_macros.h
index e2cf661..5d07380 100644
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.cc
+++ b/tensorflow/compiler/mlir/lite/kernels/internal/compatibility_macros.h
@@ -1,4 +1,4 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -13,11 +13,9 @@
 limitations under the License.
 ==============================================================================*/
 
-#include "tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.h"
+#ifndef TENSORFLOW_COMPILER_MLIR_LITE_KERNELS_INTERNAL_COMPATABILITY_MACROS_H_
+#define TENSORFLOW_COMPILER_MLIR_LITE_KERNELS_INTERNAL_COMPATABILITY_MACROS_H_
 
-const char* kCategoryLabels[kCategoryCount] = {
-    "silence",
-    "unknown",
-    "yes",
-    "no",
-};
+#include "tensorflow/lite/kernels/internal/compatibility.h"  // IWYU pragma: keep
+
+#endif  // TENSORFLOW_COMPILER_MLIR_LITE_KERNELS_INTERNAL_COMPATABILITY_MACROS_H_
diff --git a/tensorflow/compiler/mlir/lite/schema/BUILD b/tensorflow/compiler/mlir/lite/schema/BUILD
new file mode 100644
index 0000000..f54726b
--- /dev/null
+++ b/tensorflow/compiler/mlir/lite/schema/BUILD
@@ -0,0 +1,41 @@
+load("@flatbuffers//:build_defs.bzl", "flatbuffer_cc_library")
+
+package(
+    default_visibility = [
+        "//visibility:public",
+    ],
+    licenses = ["notice"],
+)
+
+exports_files(
+    srcs = ["schema.fbs"],
+)
+
+flatbuffer_cc_library(
+    name = "schema_fbs",
+    srcs = ["schema.fbs"],
+)
+
+# Generic schema for inference on device (but with reflections makes bigger).
+flatbuffer_cc_library(
+    name = "schema_fbs_with_reflection",
+    srcs = ["schema.fbs"],
+    flatc_args = [
+        "--reflect-types",
+        "--reflect-names",
+        "--no-union-value-namespacing",
+        "--gen-object-api",
+    ],
+    out_prefix = "reflection/",
+)
+
+cc_library(
+    name = "schema_utils",
+    srcs = ["schema_utils.cc"],
+    hdrs = ["schema_utils.h"],
+    deps = [
+        ":schema_fbs",
+        "//tensorflow/compiler/mlir/lite/kernels/internal:compatibility_macros",
+        "@flatbuffers//:runtime_cc",
+    ],
+)
diff --git a/tensorflow/lite/schema/schema.fbs b/tensorflow/compiler/mlir/lite/schema/schema.fbs
similarity index 79%
rename from tensorflow/lite/schema/schema.fbs
rename to tensorflow/compiler/mlir/lite/schema/schema.fbs
index 4c84646..7ab78be 100644
--- a/tensorflow/lite/schema/schema.fbs
+++ b/tensorflow/compiler/mlir/lite/schema/schema.fbs
@@ -58,6 +58,7 @@
   UINT32 = 15,
   UINT16 = 16,
   INT4 = 17,
+  BFLOAT16 = 18,
 }
 
 // Custom quantization parameters for experimenting with new quantization
@@ -235,8 +236,12 @@
   // represented with -1.
   shape_signature:[int]; // Optional.
 
-  // If false, the rank or the number of tensor dimensions is unknown.
-  // If false, "shape" must be [].
+  // This field is added to distinguish between scalars and tensors of unknown
+  // ranks (both of which shape is []).
+  // For scalars (rank = 0), shape = [] and has_rank = true.
+  // For tensors with known rank (rank > 0) and shape, shape = [...] and
+  // has_rank = true.
+  // For tensors with unknown rank and shape, shape = [] and has_rank = false.
   has_rank: bool = false;
 
   // The nested Tensor types for VARIANT type. This is always empty for
@@ -420,6 +425,53 @@
   BITCAST = 159,
   BITWISE_XOR = 160,
   RIGHT_SHIFT = 161,
+  // All Operators start with STABLEHLO_ prefixes are subject to change
+  // Many of the ops below can not be executed by TFlite runtime
+  STABLEHLO_LOGISTIC = 162, // WARNING: Do not have runtime support
+  STABLEHLO_ADD = 163,
+  STABLEHLO_DIVIDE = 164, // WARNING: No runtime support yet
+  STABLEHLO_MULTIPLY = 165,
+  STABLEHLO_MAXIMUM = 166,
+  STABLEHLO_RESHAPE = 167, // WARNING: No runtime support yet
+  STABLEHLO_CLAMP = 168, // WARNING: No runtime support
+  STABLEHLO_CONCATENATE = 169, // WARNING: No runtime support
+  STABLEHLO_BROADCAST_IN_DIM = 170, // WARNING: No runtime support
+  STABLEHLO_CONVOLUTION = 171, // WARNING: No runtime support
+  STABLEHLO_SLICE = 172, // WARNING: No runtime support
+  STABLEHLO_CUSTOM_CALL = 173, // WARNING: No runtime support
+  STABLEHLO_REDUCE = 174, // WARNING: No runtime support
+  STABLEHLO_ABS = 175, // WARNING: No runtime support
+  STABLEHLO_AND = 176, // WARNING: No runtime support
+  STABLEHLO_COSINE = 177, // WARNING: No runtime support
+  STABLEHLO_EXPONENTIAL = 178, // WARNING: No runtime support
+  STABLEHLO_FLOOR = 179, // WARNING: No runtime support
+  STABLEHLO_LOG = 180, // WARNING: No runtime support
+  STABLEHLO_MINIMUM = 181,
+  STABLEHLO_NEGATE = 182, // WARNING: No runtime support
+  STABLEHLO_OR = 183, // WARNING: No runtime support
+  STABLEHLO_POWER = 184, // WARNING: No runtime support
+  STABLEHLO_REMAINDER = 185, // WARNING: No runtime support
+  STABLEHLO_RSQRT = 186, // WARNING: No runtime support
+  STABLEHLO_SELECT = 187, // WARNING: No runtime support
+  STABLEHLO_SUBTRACT = 188, // WARNING: No runtime support
+  STABLEHLO_TANH = 189, // WARNING: No runtime support
+  STABLEHLO_SCATTER = 190,
+  STABLEHLO_COMPARE = 191, // WARNING: No runtime support
+  STABLEHLO_CONVERT = 192, // WARNING: No runtime support
+  STABLEHLO_DYNAMIC_SLICE = 193, // WARNING: No runtime support
+  STABLEHLO_DYNAMIC_UPDATE_SLICE = 194, // WARNING: No runtime support
+  STABLEHLO_PAD = 195,
+  STABLEHLO_IOTA = 196, // WARNING: No runtime support
+  STABLEHLO_DOT_GENERAL = 197, // WARNING: No runtime support
+  STABLEHLO_REDUCE_WINDOW = 198,
+  STABLEHLO_SORT = 199, // WARNING: No runtime support
+  STABLEHLO_WHILE = 200, // WARNING: No runtime support
+  STABLEHLO_GATHER = 201,
+  STABLEHLO_TRANSPOSE = 202, // WARNING: No runtime support
+  DILATE = 203,
+  STABLEHLO_RNG_BIT_GENERATOR = 204,
+  REDUCE_WINDOW = 205 (deprecated),
+  STABLEHLO_COMPOSITE = 206, // WARNING: No runtime support
 }
 // LINT.ThenChange(nnapi_linter/linter.proto)
 
@@ -551,6 +603,192 @@
   BitcastOptions,
   BitwiseXorOptions,
   RightShiftOptions,
+  // DO NOT add new options this union, will cause failure in Java api
+  // generation otherwise
+  // Add new builtin options into builtin options 2 instead
+}
+
+union BuiltinOptions2{
+  StablehloConcatenateOptions,
+  StablehloBroadcastInDimOptions,
+  StablehloSliceOptions,
+  StablehloConvolutionOptions,
+  StablehloCustomCallOptions,
+  StablehloReduceOptions,
+  StablehloScatterOptions,
+  StablehloCompareOptions,
+  StablehloDynamicSliceOptions,
+  StablehloPadOptions,
+  StablehloIotaOptions,
+  StablehloDotGeneralOptions,
+  StablehloReduceWindowOptions,
+  StablehloSortOptions,
+  StablehloWhileOptions,
+  StablehloGatherOptions,
+  StablehloTransposeOptions,
+  DilateOptions,
+  StablehloRngBitGeneratorOptions,
+  ReduceWindowOptions (deprecated),
+  StableHLOCompositeOptions,
+}
+
+table StablehloGatherOptions{
+  offset_dims : [long];
+  collapsed_slice_dims : [long];
+  start_index_map : [long];
+  index_vector_dim : long;
+  slice_sizes : [long];
+  indices_are_sorted : bool;
+}
+
+table StablehloTransposeOptions{
+  permutation : [long];
+}
+
+enum StablehloPrecisionConfig : uint {
+  DEFAULT,
+  HIGH,
+  HIGHEST,
+}
+
+table StablehloDotGeneralOptions{
+  lhs_batching_dimensions : [long];
+  rhs_batching_dimensions : [long];
+  lhs_contracting_dimensions : [long];
+  rhs_contracting_dimensions : [long];
+  precision_config : [StablehloPrecisionConfig];
+}
+
+table StablehloReduceWindowOptions{
+  window_dimensions : [long];
+  window_strides : [long];
+  base_dilations : [long];
+  window_dilations : [long];
+  padding : [long];
+  body_subgraph_index : int;
+}
+
+table StablehloWhileOptions{
+  cond_subgraph_index : int;
+  body_subgraph_index : int;
+}
+
+table StablehloSortOptions{
+  dimension : long;
+  is_stable : bool;
+  comparator_subgraph_index : int;
+}
+
+table StablehloConcatenateOptions {
+  dimension : long;
+}
+
+table StablehloBroadcastInDimOptions{
+  broadcast_dimensions : [long];
+}
+
+enum StablehloComparisonDirection : uint {
+  STABLEHLO_COMPARISON_DIRECTION_EQ,
+  STABLEHLO_COMPARISON_DIRECTION_NE,
+  STABLEHLO_COMPARISON_DIRECTION_GE,
+  STABLEHLO_COMPARISON_DIRECTION_GT,
+  STABLEHLO_COMPARISON_DIRECTION_LE,
+  STABLEHLO_COMPARISON_DIRECTION_LT,
+
+}
+
+enum StablehloComparisonType : uint {
+  STABLEHLO_COMPARISON_TYPE_NOTYPE,
+  STABLEHLO_COMPARISON_TYPE_FLOAT,
+  STABLEHLO_COMPARISON_TYPE_FLOAT_TOTAL_ORDER,
+  STABLEHLO_COMPARISON_TYPE_SIGNED,
+  STABLEHLO_COMPARISON_TYPE_UNSIGNED,
+}
+
+table StablehloCompareOptions{
+  comparison_direction : StablehloComparisonDirection;
+  compare_type : StablehloComparisonType;
+}
+
+table StablehloDynamicSliceOptions{
+  slice_sizes : [long];
+}
+
+table StablehloPadOptions{
+  edge_padding_low : [long];
+  edge_padding_high : [long];
+  interior_padding : [long];
+}
+
+table StablehloIotaOptions{
+  iota_dimension : long;
+}
+
+table StablehloCustomCallOptions {
+  call_target_name : string;
+  has_side_effect : bool;
+  backend_config: string;
+  api_version : int; // will be decprecated
+  called_computations: [int]; // should point to subgraphs of the computations
+  custom_attributes : [ubyte];
+}
+
+table StablehloReduceOptions {
+  dimensions : [long];
+  body_subgraph_index : int;
+}
+
+table StablehloSliceOptions{
+  start_indices : [long];
+  limit_indices : [long];
+  strides : [long];
+}
+
+table StablehloConvolutionOptions{
+  window_strides : [long];
+  padding : [long];
+  lhs_dilation : [long];
+  rhs_dilation : [long];
+  window_reversal : [bool];
+  input_batch_dimension : long;
+  input_feature_dimension : long;
+  input_spatial_dimensions : [long];
+  kernel_input_feature_dimension : long;
+  kernel_output_feature_dimension : long;
+  kernel_spatial_dimensions : [long];
+  output_batch_dimension : long;
+  output_feature_dimension : long;
+  output_spatial_dimensions	: [long];
+  feature_group_count : long;
+  batch_group_count : long;
+  precision_config : [StablehloPrecisionConfig];
+}
+
+table StablehloScatterOptions {
+  indices_are_sorted: bool;
+  update_window_dims: [long];
+  inserted_window_dims: [long];
+  scatter_dims_to_operand_dims: [long];
+  index_vector_dim: long;
+  unique_indices: bool;
+  update_computation_subgraph_index: int;
+}
+
+enum RngAlgorithm : byte {
+  // An algorithm auto-selected by the system according to device type.
+  DEFAULT = 0,
+  // The Philox algorithm, as described in paper
+  // ['Parallel Random Numbers: As Easy as 1, 2, 3']
+  // (https://www.thesalmons.org/john/random123/papers/random123sc11.pdf)
+  PHILOX = 1,
+  // The ThreeFry algorithm, as described in paper
+  // ['Parallel Random Numbers: As Easy as 1, 2, 3']
+  // (https://www.thesalmons.org/john/random123/papers/random123sc11.pdf)
+  THREEFRY = 2,
+}
+
+table StablehloRngBitGeneratorOptions {
+  algorithm:RngAlgorithm;
 }
 
 // LINT.IfChange
@@ -575,6 +813,9 @@
   fused_activation_function:ActivationFunctionType;
   dilation_w_factor:int = 1;
   dilation_h_factor:int = 1;
+  // Parameters for Conv2D version 8 or above.
+  // When set, quantized_bias_type defines the dtype for both bias and accumulator.
+  quantized_bias_type: TensorType;
 }
 
 // Options for both Conv3D and Conv3DTranspose.
@@ -682,6 +923,10 @@
   // If set to true, then weights-only op will use asymmetric quantization for
   // inputs.
   asymmetric_quantize_inputs: bool;
+
+  // Parameters for FullyConnected version 11 or above.
+  // When set, quantized_bias_type defines the dtype for both bias and accumulator.
+  quantized_bias_type: TensorType;
 }
 
 table SoftmaxOptions {
@@ -941,6 +1186,11 @@
 
   // Parameters supported by version 4:
   fused_activation_function:ActivationFunctionType = NONE;
+
+  // Parameters for TransposeConv version 5 or above.
+  // If set, use this for bias and accumulator.
+  // When set, quantized_bias_type defines the dtype for both bias and accumulator.
+  quantized_bias_type: TensorType;
 }
 
 table ExpandDimsOptions {
@@ -1198,6 +1448,23 @@
 table RightShiftOptions {
 }
 
+table DilateOptions {
+}
+
+enum ReduceWindowFunction : int {
+  UNSUPPORTED,
+  ADD,
+  MUL,
+  MINIMUM,
+  MAXIMUM,
+  ALL,
+  ANY,
+}
+
+table ReduceWindowOptions (deprecated) {
+  reduce_function: ReduceWindowFunction;
+}
+
 // An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
 // builtin, or a string if the operator is custom.
 table OperatorCode {
@@ -1222,6 +1489,14 @@
   FLEXBUFFERS = 0,
 }
 
+table StableHLOCompositeOptions {
+  name:string;
+  decomposition_subgraph_index:int32;
+  composite_attributes:[ubyte];
+  composite_attributes_format:CustomOptionsFormat;
+  version:int32;
+}
+
 // An operator takes tensors as inputs and outputs. The type of operation being
 // performed is determined by an index into the list of valid OperatorCodes,
 // while the specifics of each operations is configured using builtin_options
@@ -1264,6 +1539,11 @@
   // beginning of the file and is only valid if > 1
   large_custom_options_offset: ulong;
   large_custom_options_size: ulong;
+
+  // Flatbuffers union struct has a 128 elements limit in JAVA, so a second
+  // union is added, in the case of where BuitlinOptions2 runs out, a third
+  // one can be added
+  builtin_options_2 : BuiltinOptions2;
 }
 
 // The root type, defining a subgraph, which typically represents an entire
diff --git a/tensorflow/compiler/mlir/lite/schema/schema_generated.h b/tensorflow/compiler/mlir/lite/schema/schema_generated.h
new file mode 100644
index 0000000..7eeedf8
--- /dev/null
+++ b/tensorflow/compiler/mlir/lite/schema/schema_generated.h
@@ -0,0 +1,22 @@
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_GENERATED_H_
+#define TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_GENERATED_H_
+
+// This file should only be used by the make build to redirect schema_utils.cc
+// usage of the generated schema to the proper location.
+#include "tensorflow/lite/schema/schema_generated.h"  // IWYU pragma: keep
+
+#endif  // TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_
diff --git a/tensorflow/lite/schema/schema_utils.cc b/tensorflow/compiler/mlir/lite/schema/schema_utils.cc
similarity index 89%
rename from tensorflow/lite/schema/schema_utils.cc
rename to tensorflow/compiler/mlir/lite/schema/schema_utils.cc
index fc19290..a173380 100644
--- a/tensorflow/lite/schema/schema_utils.cc
+++ b/tensorflow/compiler/mlir/lite/schema/schema_utils.cc
@@ -12,16 +12,16 @@
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
-#include "tensorflow/lite/schema/schema_utils.h"
+#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h"
 
 #include <algorithm>
 
-#include "tensorflow/lite/kernels/internal/compatibility.h"
+#include "tensorflow/compiler/mlir/lite/kernels/internal/compatibility_macros.h"
 
 namespace tflite {
 
 // The following GetBuiltinCode methods are the utility methods for reading
-// builtin operatore code, ensuring compatibility issues between v3 and v3a
+// builtin operator code, ensuring compatibility issues between v3 and v3a
 // schema. Always the maximum value of the two fields always will be the correct
 // value as follows:
 //
@@ -29,7 +29,7 @@
 //
 // The `builtin_code` field is not available in the v3 models. Flatbuffer
 // library will feed zero value, which is the default value in the v3a schema.
-// The actual builtin operatore code value will exist in the
+// The actual builtin operator code value will exist in the
 // `deprecated_builtin_code` field. At the same time, it implies that
 // `deprecated_builtin_code` >= `builtin_code` and the maximum value of the two
 // fields will be same with `deprecated_builtin_code'.
diff --git a/tensorflow/compiler/mlir/lite/schema/schema_utils.h b/tensorflow/compiler/mlir/lite/schema/schema_utils.h
new file mode 100644
index 0000000..7498aa0
--- /dev/null
+++ b/tensorflow/compiler/mlir/lite/schema/schema_utils.h
@@ -0,0 +1,33 @@
+/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_UTILS_H_
+#define TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_UTILS_H_
+
+#include "flatbuffers/flatbuffers.h"
+#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
+
+namespace tflite {
+
+// The following methods are introduced to resolve op builtin code shortage
+// problem. The new builtin operator will be assigned to the extended builtin
+// code field in the flatbuffer schema. Those methods helps to hide builtin code
+// details.
+BuiltinOperator GetBuiltinCode(const OperatorCode *op_code);
+
+BuiltinOperator GetBuiltinCode(const OperatorCodeT *op_code);
+
+}  // namespace tflite
+
+#endif  // TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_UTILS_H_
diff --git a/tensorflow/extra_rules.bzl b/tensorflow/extra_rules.bzl
index 7a3523e..29e0bda 100644
--- a/tensorflow/extra_rules.bzl
+++ b/tensorflow/extra_rules.bzl
@@ -19,6 +19,10 @@
     """Config setting for all Fusion F1 based cores."""
     return "//tensorflow/lite/micro/kernels:xtensa_fusion_f1_default"
 
+def xtensa_hifi_3_config():
+    """Config setting for all HiFi 3 based cores."""
+    return "//tensorflow/lite/micro/kernels:xtensa_hifi_3_default"
+
 def xtensa_hifi_3z_config():
     """Config setting for all HiFi 3z based cores."""
     return "//tensorflow/lite/micro/kernels:xtensa_hifi_3z_default"
diff --git a/tensorflow/lite/build_def.bzl b/tensorflow/lite/build_def.bzl
index e8fc49c..e65ec23 100644
--- a/tensorflow/lite/build_def.bzl
+++ b/tensorflow/lite/build_def.bzl
@@ -3,6 +3,7 @@
     copts = [
         "-DFARMHASH_NO_CXX_STRING",
         "-Wno-sign-compare",
+        "-Wno-unused-parameter",
         "-fno-exceptions",  # Exceptions are unused in TFLite.
     ]
     return copts
diff --git a/tensorflow/lite/builtin_ops.h b/tensorflow/lite/builtin_ops.h
index f9871ad..5dba0f6 100644
--- a/tensorflow/lite/builtin_ops.h
+++ b/tensorflow/lite/builtin_ops.h
@@ -189,6 +189,51 @@
   kTfLiteBuiltinBitcast = 159,
   kTfLiteBuiltinBitwiseXor = 160,
   kTfLiteBuiltinRightShift = 161,
+  kTfLiteBuiltinStablehloLogistic = 162,
+  kTfLiteBuiltinStablehloAdd = 163,
+  kTfLiteBuiltinStablehloDivide = 164,
+  kTfLiteBuiltinStablehloMultiply = 165,
+  kTfLiteBuiltinStablehloMaximum = 166,
+  kTfLiteBuiltinStablehloReshape = 167,
+  kTfLiteBuiltinStablehloClamp = 168,
+  kTfLiteBuiltinStablehloConcatenate = 169,
+  kTfLiteBuiltinStablehloBroadcastInDim = 170,
+  kTfLiteBuiltinStablehloConvolution = 171,
+  kTfLiteBuiltinStablehloSlice = 172,
+  kTfLiteBuiltinStablehloCustomCall = 173,
+  kTfLiteBuiltinStablehloReduce = 174,
+  kTfLiteBuiltinStablehloAbs = 175,
+  kTfLiteBuiltinStablehloAnd = 176,
+  kTfLiteBuiltinStablehloCosine = 177,
+  kTfLiteBuiltinStablehloExponential = 178,
+  kTfLiteBuiltinStablehloFloor = 179,
+  kTfLiteBuiltinStablehloLog = 180,
+  kTfLiteBuiltinStablehloMinimum = 181,
+  kTfLiteBuiltinStablehloNegate = 182,
+  kTfLiteBuiltinStablehloOr = 183,
+  kTfLiteBuiltinStablehloPower = 184,
+  kTfLiteBuiltinStablehloRemainder = 185,
+  kTfLiteBuiltinStablehloRsqrt = 186,
+  kTfLiteBuiltinStablehloSelect = 187,
+  kTfLiteBuiltinStablehloSubtract = 188,
+  kTfLiteBuiltinStablehloTanh = 189,
+  kTfLiteBuiltinStablehloScatter = 190,
+  kTfLiteBuiltinStablehloCompare = 191,
+  kTfLiteBuiltinStablehloConvert = 192,
+  kTfLiteBuiltinStablehloDynamicSlice = 193,
+  kTfLiteBuiltinStablehloDynamicUpdateSlice = 194,
+  kTfLiteBuiltinStablehloPad = 195,
+  kTfLiteBuiltinStablehloIota = 196,
+  kTfLiteBuiltinStablehloDotGeneral = 197,
+  kTfLiteBuiltinStablehloReduceWindow = 198,
+  kTfLiteBuiltinStablehloSort = 199,
+  kTfLiteBuiltinStablehloWhile = 200,
+  kTfLiteBuiltinStablehloGather = 201,
+  kTfLiteBuiltinStablehloTranspose = 202,
+  kTfLiteBuiltinDilate = 203,
+  kTfLiteBuiltinStablehloRngBitGenerator = 204,
+  kTfLiteBuiltinReduceWindow = 205,
+  kTfLiteBuiltinStablehloComposite = 206,
 } TfLiteBuiltinOperator;
 
 #ifdef __cplusplus
diff --git a/tensorflow/lite/c/builtin_op_data.h b/tensorflow/lite/c/builtin_op_data.h
index 7628e5a..0606819 100644
--- a/tensorflow/lite/c/builtin_op_data.h
+++ b/tensorflow/lite/c/builtin_op_data.h
@@ -15,6 +15,9 @@
 #ifndef TENSORFLOW_LITE_C_BUILTIN_OP_DATA_H_
 #define TENSORFLOW_LITE_C_BUILTIN_OP_DATA_H_
 
+/// For documentation, see
+/// third_party/tensorflow/lite/core/c/builtin_op_data.h
+
 #include "tensorflow/lite/core/c/builtin_op_data.h"
 
 #endif  // TENSORFLOW_LITE_C_BUILTIN_OP_DATA_H_
diff --git a/tensorflow/lite/c/c_api_types.h b/tensorflow/lite/c/c_api_types.h
index cdbf1fd..05cda07 100644
--- a/tensorflow/lite/c/c_api_types.h
+++ b/tensorflow/lite/c/c_api_types.h
@@ -15,6 +15,12 @@
 #ifndef TENSORFLOW_LITE_C_C_API_TYPES_H_
 #define TENSORFLOW_LITE_C_C_API_TYPES_H_
 
+/// \file
+///
+/// C API types for TensorFlow Lite.
+///
+/// For documentation, see tensorflow/lite/core/c/c_api_types.h
+
 #include "tensorflow/lite/core/c/c_api_types.h"
 
 #endif  // TENSORFLOW_LITE_C_C_API_TYPES_H_
diff --git a/tensorflow/lite/c/common.h b/tensorflow/lite/c/common.h
index e3e8001..8a8b513 100644
--- a/tensorflow/lite/c/common.h
+++ b/tensorflow/lite/c/common.h
@@ -13,25 +13,17 @@
 limitations under the License.
 ==============================================================================*/
 
-// This file defines common C types and APIs for implementing operations,
-// delegates and other constructs in TensorFlow Lite. The actual operations and
-// delegates can be defined using C++, but the interface between the interpreter
-// and the operations are C.
-//
-// Summary of abstractions
-// TF_LITE_ENSURE - Self-sufficient error checking
-// TfLiteStatus - Status reporting
-// TfLiteIntArray - stores tensor shapes (dims),
-// TfLiteContext - allows an op to access the tensors
-// TfLiteTensor - tensor (a multidimensional array)
-// TfLiteNode - a single node or operation
-// TfLiteRegistration - the implementation of a conceptual operation.
-// TfLiteDelegate - allows delegation of nodes to alternative backends.
-//
-// Some abstractions in this file are created and managed by Interpreter.
-//
-// NOTE: The order of values in these structs are "semi-ABI stable". New values
-// should be added only to the end of structs and never reordered.
+/// \file
+///
+/// This file defines common C types and APIs for implementing operations,
+/// delegates and other constructs in TensorFlow Lite. The actual operations and
+/// delegates can be defined using C++, but the interface between the
+/// interpreter and the operations are C.
+///
+/// For documentation, see tensorflow/lite/core/c/common.h.
+///
+/// See also c_api_opaque.h which has more ABI-stable variants of some of these
+/// APIs.
 
 #ifndef TENSORFLOW_LITE_C_COMMON_H_
 #define TENSORFLOW_LITE_C_COMMON_H_
diff --git a/tensorflow/lite/core/api/error_reporter.h b/tensorflow/lite/core/api/error_reporter.h
index 99ab8cf..1e0ef7d 100644
--- a/tensorflow/lite/core/api/error_reporter.h
+++ b/tensorflow/lite/core/api/error_reporter.h
@@ -61,9 +61,9 @@
 // reduce binary size, define TF_LITE_STRIP_ERROR_STRINGS when compiling and
 // every call will be stubbed out, taking no memory.
 #ifndef TF_LITE_STRIP_ERROR_STRINGS
-#define TF_LITE_REPORT_ERROR(reporter, ...)                             \
-  do {                                                                  \
-    static_cast<tflite::ErrorReporter*>(reporter)->Report(__VA_ARGS__); \
+#define TF_LITE_REPORT_ERROR(reporter, ...)                               \
+  do {                                                                    \
+    static_cast<::tflite::ErrorReporter*>(reporter)->Report(__VA_ARGS__); \
   } while (false)
 #else  // TF_LITE_STRIP_ERROR_STRINGS
 #define TF_LITE_REPORT_ERROR(reporter, ...)
diff --git a/tensorflow/lite/core/api/flatbuffer_conversions.cc b/tensorflow/lite/core/api/flatbuffer_conversions.cc
index 9f955df..3526810 100644
--- a/tensorflow/lite/core/api/flatbuffer_conversions.cc
+++ b/tensorflow/lite/core/api/flatbuffer_conversions.cc
@@ -15,11 +15,13 @@
 
 #include "tensorflow/lite/core/api/flatbuffer_conversions.h"
 
+#include <algorithm>
 #include <cstddef>
 #include <cstdint>
 #include <memory>
 
 #include "flatbuffers/flatbuffers.h"  // from @flatbuffers
+#include "flatbuffers/vector.h"  // from @flatbuffers
 #include "tensorflow/lite/core/api/error_reporter.h"
 #include "tensorflow/lite/core/c/builtin_op_data.h"
 #include "tensorflow/lite/core/c/common.h"
@@ -76,9 +78,10 @@
 // Copies the contents from the flatbuffer int vector `flatbuffer` into the
 // int array `buffer`. `flat_vector` and `buffer` represent the same
 // configuration operation for a given operation.
-TfLiteStatus FlatBufferIntVectorToArray(
-    int max_size_of_buffer, const flatbuffers::Vector<int32_t>* flat_vector,
-    int* buffer, ErrorReporter* error_reporter, const char* op_name) {
+template <typename DataType = int32_t>
+static TfLiteStatus FlatBufferIntVectorToArray(
+    int max_size_of_buffer, const flatbuffers::Vector<DataType>* flat_vector,
+    DataType* buffer, ErrorReporter* error_reporter, const char* op_name) {
   if (!flat_vector) {
     TF_LITE_REPORT_ERROR(error_reporter,
                          "Input array not provided for operation '%s'.\n",
@@ -86,7 +89,7 @@
     return kTfLiteError;
   } else {
     size_t num_dimensions = flat_vector->size();
-    if (num_dimensions > max_size_of_buffer / sizeof(int)) {
+    if (num_dimensions > max_size_of_buffer / sizeof(DataType)) {
       TF_LITE_REPORT_ERROR(
           error_reporter,
           "Found too many dimensions in the input array of operation '%s'.\n",
@@ -142,6 +145,18 @@
   return kTfLiteMirrorPaddingUnknown;
 }
 
+TfLiteRngAlgorithm ConvertRngAlgorithm(RngAlgorithm algorithm) {
+  switch (algorithm) {
+    case RngAlgorithm_THREEFRY:
+      return kTfLiteRngAlgorithmThreefry;
+    case RngAlgorithm_PHILOX:
+      return kTfLiteRngAlgorithmPhilox;
+    case RngAlgorithm_DEFAULT:
+      return kTfLiteRngAlgorithmDefault;
+  }
+  return kTfLiteRngAlgorithmUnknown;
+}
+
 #ifndef TF_LITE_STATIC_MEMORY
 TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
                                ErrorReporter* error_reporter,
@@ -857,6 +872,97 @@
       *builtin_data = params.release();
       return kTfLiteOk;
     }
+    case BuiltinOperator_STABLEHLO_SCATTER: {
+      return ParseStablehloScatter(op, error_reporter, allocator, builtin_data);
+    }
+    case BuiltinOperator_STABLEHLO_RNG_BIT_GENERATOR: {
+      return ParseStablehloRngBitGenerator(op, error_reporter, allocator,
+                                           builtin_data);
+    }
+    case BuiltinOperator_STABLEHLO_GATHER: {
+      return ParseStablehloGather(op, error_reporter, allocator, builtin_data);
+    }
+    case BuiltinOperator_STABLEHLO_REDUCE_WINDOW: {
+      return ParseStablehloReduceWindow(op, error_reporter, allocator,
+                                        builtin_data);
+    }
+    case BuiltinOperator_REDUCE_WINDOW: {
+      auto params = safe_allocator.Allocate<TfLiteReduceWindowParams>();
+      TF_LITE_ENSURE(error_reporter, params != nullptr);
+      if (const auto* reduce_params =
+              op->builtin_options_2_as_ReduceWindowOptions()) {
+        switch (reduce_params->reduce_function()) {
+          case ReduceWindowFunction_ADD:
+            params->reduce_function = TfLiteReduceWindowFunctionAdd;
+            break;
+          case ReduceWindowFunction_MUL:
+            params->reduce_function = TfLiteReduceWindowFunctionMul;
+            break;
+          case ReduceWindowFunction_MINIMUM:
+            params->reduce_function = TfLiteReduceWindowFunctionMin;
+            break;
+          case ReduceWindowFunction_MAXIMUM:
+            params->reduce_function = TfLiteReduceWindowFunctionMax;
+            break;
+          case ReduceWindowFunction_ALL:
+            params->reduce_function = TfLiteReduceWindowFunctionAll;
+            break;
+          case ReduceWindowFunction_ANY:
+            params->reduce_function = TfLiteReduceWindowFunctionAny;
+            break;
+          case ReduceWindowFunction_UNSUPPORTED:
+          default:
+            return kTfLiteError;
+        }
+      }
+      *builtin_data = params.release();
+      return kTfLiteOk;
+    }
+    case BuiltinOperator_STABLEHLO_PAD: {
+      return ParseStablehloPad(op, error_reporter, allocator, builtin_data);
+    }
+    case BuiltinOperator_STABLEHLO_COMPOSITE: {
+      return ParseStablehloComposite(op, error_reporter, allocator,
+                                     builtin_data);
+    }
+    // TODO: skip param parsing for now since ops below don't have kernels
+    case BuiltinOperator_STABLEHLO_SLICE:
+    case BuiltinOperator_STABLEHLO_BROADCAST_IN_DIM:
+    case BuiltinOperator_STABLEHLO_CONVOLUTION:
+    case BuiltinOperator_STABLEHLO_LOGISTIC:
+    case BuiltinOperator_STABLEHLO_ADD:
+    case BuiltinOperator_STABLEHLO_DIVIDE:
+    case BuiltinOperator_STABLEHLO_MULTIPLY:
+    case BuiltinOperator_STABLEHLO_MAXIMUM:
+    case BuiltinOperator_STABLEHLO_RESHAPE:
+    case BuiltinOperator_STABLEHLO_CLAMP:
+    case BuiltinOperator_STABLEHLO_CONCATENATE:
+    case BuiltinOperator_STABLEHLO_CUSTOM_CALL:
+    case BuiltinOperator_STABLEHLO_REDUCE:
+    case BuiltinOperator_STABLEHLO_ABS:
+    case BuiltinOperator_STABLEHLO_AND:
+    case BuiltinOperator_STABLEHLO_COSINE:
+    case BuiltinOperator_STABLEHLO_EXPONENTIAL:
+    case BuiltinOperator_STABLEHLO_FLOOR:
+    case BuiltinOperator_STABLEHLO_LOG:
+    case BuiltinOperator_STABLEHLO_MINIMUM:
+    case BuiltinOperator_STABLEHLO_NEGATE:
+    case BuiltinOperator_STABLEHLO_OR:
+    case BuiltinOperator_STABLEHLO_POWER:
+    case BuiltinOperator_STABLEHLO_REMAINDER:
+    case BuiltinOperator_STABLEHLO_RSQRT:
+    case BuiltinOperator_STABLEHLO_SELECT:
+    case BuiltinOperator_STABLEHLO_SUBTRACT:
+    case BuiltinOperator_STABLEHLO_TANH:
+    case BuiltinOperator_STABLEHLO_DYNAMIC_SLICE:
+    case BuiltinOperator_STABLEHLO_DYNAMIC_UPDATE_SLICE:
+    case BuiltinOperator_STABLEHLO_IOTA:
+    case BuiltinOperator_STABLEHLO_COMPARE:
+    case BuiltinOperator_STABLEHLO_CONVERT:
+    case BuiltinOperator_STABLEHLO_DOT_GENERAL:
+    case BuiltinOperator_STABLEHLO_SORT:
+    case BuiltinOperator_STABLEHLO_WHILE:
+    case BuiltinOperator_STABLEHLO_TRANSPOSE:
 
     // Below are the ops with no builtin_data structure.
     // TODO(aselle): Implement call in BuiltinOptions, but nullptrs are
@@ -899,6 +1005,7 @@
     case BuiltinOperator_SIGN:
     case BuiltinOperator_BITCAST:
     case BuiltinOperator_WHERE:
+    case BuiltinOperator_DILATE:
       return kTfLiteOk;
     case BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES:
       return kTfLiteError;
@@ -914,6 +1021,9 @@
     case TensorType_FLOAT16:
       *type = kTfLiteFloat16;
       return kTfLiteOk;
+    case TensorType_BFLOAT16:
+      *type = kTfLiteBFloat16;
+      return kTfLiteOk;
     case TensorType_FLOAT32:
       *type = kTfLiteFloat32;
       return kTfLiteOk;
@@ -999,7 +1109,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -1029,7 +1139,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -1054,7 +1164,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -1134,7 +1244,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -1192,7 +1302,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -1220,10 +1330,13 @@
 
     params->dilation_width_factor = schema_params->dilation_w_factor();
     params->dilation_height_factor = schema_params->dilation_h_factor();
+    TF_LITE_ENSURE_STATUS(
+        ConvertTensorType(schema_params->quantized_bias_type(),
+                          &params->quantized_bias_type, error_reporter));
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -1274,7 +1387,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -1310,7 +1423,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -1434,7 +1547,9 @@
     params->keep_num_dims = schema_params->keep_num_dims();
     params->asymmetric_quantize_inputs =
         schema_params->asymmetric_quantize_inputs();
-
+    TF_LITE_ENSURE_STATUS(
+        ConvertTensorType(schema_params->quantized_bias_type(),
+                          &params->quantized_bias_type, error_reporter));
     switch (schema_params->weights_format()) {
       case FullyConnectedOptionsWeightsFormat_DEFAULT:
         params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
@@ -1451,7 +1566,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -1528,7 +1643,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -1555,7 +1670,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -1711,7 +1826,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -1735,7 +1850,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -1776,7 +1891,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -1822,7 +1937,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -1880,7 +1995,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -1933,7 +2048,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -1994,6 +2109,307 @@
   return kTfLiteOk;
 }
 
+TfLiteStatus ParseStablehloReduceWindow(const Operator* op,
+                                        ErrorReporter* error_reporter,
+                                        BuiltinDataAllocator* allocator,
+                                        void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  auto params = safe_allocator.Allocate<TfLiteStablehloReduceWindowParams>();
+
+  const StablehloReduceWindowOptions* schema_params =
+      op->builtin_options_2_as_StablehloReduceWindowOptions();
+  if (schema_params) {
+    if (!schema_params->window_dimensions() ||
+        schema_params->window_dimensions()->size() == 0) {
+      TF_LITE_REPORT_ERROR(error_reporter,
+                           "'window_dimensions' attribute is not optional for "
+                           "'stablehlo.reduce_window' and cannot be empty.");
+      return kTfLiteError;
+    }
+
+    const size_t rank = schema_params->window_dimensions()->size();
+
+    auto LoadAttr = [&error_reporter](
+                        int64_t* params_array, size_t params_array_size_bytes,
+                        const flatbuffers::Vector<int64_t>* flatbuffer_vector,
+                        const char* attr_name, const size_t expected_size,
+                        const int64_t fill_value) -> TfLiteStatus {
+      if (flatbuffer_vector && flatbuffer_vector->size()) {
+        if (expected_size != 0 && flatbuffer_vector->size() != expected_size) {
+          TF_LITE_REPORT_ERROR(
+              error_reporter,
+              "'%s' attribute of 'stablehlo.reduce_window' does not have the "
+              "expected size (%llu != %llu).",
+              attr_name, flatbuffer_vector->size(), expected_size);
+          return kTfLiteError;
+        }
+        TfLiteStatus status = FlatBufferIntVectorToArray(
+            params_array_size_bytes, flatbuffer_vector, params_array,
+            error_reporter, "stablehlo.reduce_window");
+        if (status != kTfLiteOk) {
+          TF_LITE_REPORT_ERROR(error_reporter, "Check the '%s' attribute.",
+                               attr_name);
+          return status;
+        }
+      } else {
+        std::fill_n(params_array, params_array_size_bytes / sizeof(int64_t),
+                    fill_value);
+      }
+      return kTfLiteOk;
+    };
+
+    TF_LITE_ENSURE_STATUS(
+        LoadAttr(params->window_dimensions, sizeof(params->window_dimensions),
+                 schema_params->window_dimensions(), "window_dimensions",
+                 /*expected_size=*/rank, /*fill_value=*/1));
+    TF_LITE_ENSURE_STATUS(
+        LoadAttr(params->window_strides, sizeof(params->window_strides),
+                 schema_params->window_strides(), "window_strides",
+                 /*expected_size=*/rank, /*fill_value=*/1));
+    TF_LITE_ENSURE_STATUS(
+        LoadAttr(params->base_dilations, sizeof(params->base_dilations),
+                 schema_params->base_dilations(), "base_dilations",
+                 /*expected_size=*/rank, /*fill_value=*/1));
+    TF_LITE_ENSURE_STATUS(
+        LoadAttr(params->window_dilations, sizeof(params->window_dilations),
+                 schema_params->window_dilations(), "window_dilations",
+                 /*expected_size=*/rank, /*fill_value=*/1));
+    TF_LITE_ENSURE_STATUS(LoadAttr(params->padding, sizeof(params->padding),
+                                   schema_params->padding(), "padding",
+                                   /*expected_size=*/2 * rank,
+                                   /*fill_value=*/0));
+
+    params->body_subgraph_index = schema_params->body_subgraph_index();
+    *builtin_data = params.release();
+    return kTfLiteOk;
+  }
+  TF_LITE_REPORT_ERROR(
+      error_reporter,
+      "Could not get 'stablehlo.reduce_window' operation parameters.");
+  return kTfLiteError;
+}
+
+TfLiteStatus ParseStablehloScatter(const Operator* op,
+                                   ErrorReporter* error_reporter,
+                                   BuiltinDataAllocator* allocator,
+                                   void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  std::unique_ptr<TfLiteStablehloScatterParams,
+                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLiteStablehloScatterParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const StablehloScatterOptions* schema_params =
+      op->builtin_options_2_as_StablehloScatterOptions();
+  if (schema_params) {
+    params->indices_are_sorted = schema_params->indices_are_sorted();
+
+    if (schema_params->update_window_dims()) {
+      TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray<int64_t>(
+          schema_params->update_window_dims()->size() * sizeof(int64_t),
+          schema_params->update_window_dims(), params->update_window_dims,
+          error_reporter, "stablehlo_scatter"));
+      params->num_update_window_dims =
+          schema_params->update_window_dims()->size();
+    }
+
+    if (schema_params->inserted_window_dims()) {
+      TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray<int64_t>(
+          schema_params->inserted_window_dims()->size() * sizeof(int64_t),
+          schema_params->inserted_window_dims(), params->inserted_window_dims,
+          error_reporter, "stablehlo_scatter"));
+      params->num_inserted_window_dims =
+          schema_params->inserted_window_dims()->size();
+    }
+
+    if (schema_params->scatter_dims_to_operand_dims()) {
+      TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray<int64_t>(
+          schema_params->scatter_dims_to_operand_dims()->size() *
+              sizeof(int64_t),
+          schema_params->scatter_dims_to_operand_dims(),
+          params->scatter_dims_to_operand_dims, error_reporter,
+          "stablehlo_scatter"));
+      params->num_scatter_dims_to_operand_dims =
+          schema_params->scatter_dims_to_operand_dims()->size();
+    }
+
+    params->index_vector_dim = schema_params->index_vector_dim();
+    params->unique_indices = schema_params->unique_indices();
+    params->update_computation_subgraph_index =
+        schema_params->update_computation_subgraph_index();
+  } else {
+    // TODO(b/157480169): We should either return kTfLiteError or fill in some
+    // reasonable defaults in the params struct. We are not doing so until we
+    // better understand the ramifications of changing the legacy behavior.
+  }
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParseStablehloRngBitGenerator(const Operator* op,
+                                           ErrorReporter* error_reporter,
+                                           BuiltinDataAllocator* allocator,
+                                           void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  std::unique_ptr<TfLiteStablehloRngBitGeneratorParams,
+                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLiteStablehloRngBitGeneratorParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const StablehloRngBitGeneratorOptions* schema_params =
+      op->builtin_options_2_as_StablehloRngBitGeneratorOptions();
+  if (schema_params != nullptr) {
+    params->algorithm = ConvertRngAlgorithm(schema_params->algorithm());
+  } else {
+    // TODO(b/157480169): We should either return kTfLiteError or fill in some
+    // reasonable defaults in the params struct. We are not doing so until we
+    // better understand the ramifications of changing the legacy behavior.
+  }
+
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParseStablehloGather(const Operator* op,
+                                  ErrorReporter* error_reporter,
+                                  BuiltinDataAllocator* allocator,
+                                  void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  std::unique_ptr<TfLiteStablehloGatherParams,
+                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLiteStablehloGatherParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const StablehloGatherOptions* schema_params =
+      op->builtin_options_2_as_StablehloGatherOptions();
+
+  if (schema_params != nullptr) {
+    TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray<int64_t>(
+        /*max_size_of_buffer=*/schema_params->offset_dims()->size() *
+            sizeof(int64_t),
+        /*flat_vector=*/schema_params->offset_dims(),
+        /*buffer=*/params->offset_dims, /*error_reporter=*/error_reporter,
+        /*op_name=*/"stablehlo_gather"));
+    params->num_offset_dims = schema_params->offset_dims()->size();
+
+    TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray<int64_t>(
+        schema_params->collapsed_slice_dims()->size() * sizeof(int64_t),
+        schema_params->collapsed_slice_dims(), params->collapsed_slice_dims,
+        error_reporter, "stablehlo_gather"));
+    params->num_collapsed_slice_dims =
+        schema_params->collapsed_slice_dims()->size();
+
+    TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray<int64_t>(
+        schema_params->start_index_map()->size() * sizeof(int64_t),
+        schema_params->start_index_map(), params->start_index_map,
+        error_reporter, "stablehlo_gather"));
+    params->num_start_index_map = schema_params->start_index_map()->size();
+
+    params->index_vector_dim = schema_params->index_vector_dim();
+
+    TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray<int64_t>(
+        schema_params->slice_sizes()->size() * sizeof(int64_t),
+        schema_params->slice_sizes(), params->slice_sizes, error_reporter,
+        "stablehlo_gather"));
+    params->num_slice_sizes = schema_params->slice_sizes()->size();
+
+    params->indices_are_sorted = schema_params->indices_are_sorted();
+  } else {
+    // TODO(b/157480169): We should either return kTfLiteError or fill in some
+    // reasonable defaults in the params struct. We are not doing so until we
+    // better understand the ramifications of changing the legacy behavior.
+  }
+
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParseStablehloPad(const Operator* op,
+                               ErrorReporter* error_reporter,
+                               BuiltinDataAllocator* allocator,
+                               void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  auto params = safe_allocator.Allocate<TfLiteStablehloPadParams>();
+  const StablehloPadOptions* schema_params =
+      op->builtin_options_2_as_StablehloPadOptions();
+
+  if (schema_params) {
+    auto LoadAttr =
+        [&error_reporter](
+            int64_t* params_array, const size_t params_array_size_bytes,
+            const flatbuffers::Vector<int64_t>* const flatbuffer_vector,
+            const char* const attr_name) -> TfLiteStatus {
+      TfLiteStatus status = FlatBufferIntVectorToArray(
+          params_array_size_bytes, flatbuffer_vector, params_array,
+          error_reporter, "stablehlo.pad");
+      if (status != kTfLiteOk) {
+        TF_LITE_REPORT_ERROR(error_reporter, "Check the '%s' attribute.",
+                             attr_name);
+      }
+      return status;
+    };
+
+    TF_LITE_ENSURE_STATUS(
+        LoadAttr(params->edge_padding_low, sizeof(params->edge_padding_low),
+                 schema_params->edge_padding_low(), "edge_padding_low"));
+    TF_LITE_ENSURE_STATUS(
+        LoadAttr(params->edge_padding_high, sizeof(params->edge_padding_high),
+                 schema_params->edge_padding_high(), "edge_padding_high"));
+    TF_LITE_ENSURE_STATUS(
+        LoadAttr(params->interior_padding, sizeof(params->interior_padding),
+                 schema_params->interior_padding(), "interior_padding"));
+    if (schema_params->edge_padding_low()->size() !=
+            schema_params->edge_padding_high()->size() ||
+        schema_params->edge_padding_low()->size() !=
+            schema_params->interior_padding()->size()) {
+      TF_LITE_REPORT_ERROR(error_reporter,
+                           "'stablehlo.pad' operation parameter array sizes "
+                           "are not consistent.");
+      return kTfLiteError;
+    }
+    *builtin_data = params.release();
+    return kTfLiteOk;
+  }
+  TF_LITE_REPORT_ERROR(error_reporter,
+                       "Could not get 'stablehlo.pad' operation parameters.");
+  return kTfLiteError;
+}
+
+TfLiteStatus ParseStablehloComposite(const Operator* op,
+                                     ErrorReporter* error_reporter,
+                                     BuiltinDataAllocator* allocator,
+                                     void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  auto params = safe_allocator.Allocate<TfLiteStablehloCompositeParams>();
+  const StableHLOCompositeOptions* schema_params =
+      op->builtin_options_2_as_StableHLOCompositeOptions();
+  if (schema_params) {
+    params->name = schema_params->name()->c_str();
+    params->version = schema_params->version();
+    params->subgraph_index = schema_params->decomposition_subgraph_index();
+    params->attributes = schema_params->composite_attributes()->data();
+    params->attributes_size = schema_params->composite_attributes()->size();
+    *builtin_data = params.release();
+    return kTfLiteOk;
+  }
+  TF_LITE_REPORT_ERROR(
+      error_reporter,
+      "Could not get 'stablehlo.composite' operation parameters.");
+  return kTfLiteError;
+}
+
 // We have this parse function instead of directly returning kTfLiteOk from the
 // switch-case in ParseOpData because this function is used as part of the
 // selective registration for the OpResolver implementation in micro.
@@ -2034,7 +2450,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -2075,7 +2491,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -2108,7 +2524,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -2132,7 +2548,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -2156,7 +2572,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -2214,7 +2630,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -2270,7 +2686,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -2295,7 +2711,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -2322,7 +2738,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -2365,10 +2781,13 @@
 
     params->activation =
         ConvertActivation(transpose_conv_params->fused_activation_function());
+    TF_LITE_ENSURE_STATUS(
+        ConvertTensorType(transpose_conv_params->quantized_bias_type(),
+                          &params->quantized_bias_type, error_reporter));
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
   *builtin_data = params.release();
   return kTfLiteOk;
@@ -2392,7 +2811,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -2423,7 +2842,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
@@ -2448,7 +2867,7 @@
   } else {
     // TODO(b/157480169): We should either return kTfLiteError or fill in some
     // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
+    // better understand the ramifications of changing the legacy behavior.
   }
 
   *builtin_data = params.release();
diff --git a/tensorflow/lite/core/api/flatbuffer_conversions.h b/tensorflow/lite/core/api/flatbuffer_conversions.h
index 9ffe397..c01e887 100644
--- a/tensorflow/lite/core/api/flatbuffer_conversions.h
+++ b/tensorflow/lite/core/api/flatbuffer_conversions.h
@@ -420,6 +420,36 @@
                              BuiltinDataAllocator* allocator,
                              void** builtin_data);
 
+TfLiteStatus ParseStablehloScatter(const Operator* op,
+                                   ErrorReporter* error_reporter,
+                                   BuiltinDataAllocator* allocator,
+                                   void** builtin_data);
+
+TfLiteStatus ParseStablehloRngBitGenerator(const Operator* op,
+                                           ErrorReporter* error_reporter,
+                                           BuiltinDataAllocator* allocator,
+                                           void** builtin_data);
+
+TfLiteStatus ParseStablehloGather(const Operator* op,
+                                  ErrorReporter* error_reporter,
+                                  BuiltinDataAllocator* allocator,
+                                  void** builtin_data);
+
+TfLiteStatus ParseStablehloReduceWindow(const Operator* op,
+                                        ErrorReporter* error_reporter,
+                                        BuiltinDataAllocator* allocator,
+                                        void** builtin_data);
+
+TfLiteStatus ParseStablehloPad(const Operator* op,
+                               ErrorReporter* error_reporter,
+                               BuiltinDataAllocator* allocator,
+                               void** builtin_data);
+
+TfLiteStatus ParseStablehloComposite(const Operator* op,
+                                     ErrorReporter* error_reporter,
+                                     BuiltinDataAllocator* allocator,
+                                     void** builtin_data);
+
 }  // namespace tflite
 
 #endif  // TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_
diff --git a/tensorflow/lite/core/c/builtin_op_data.h b/tensorflow/lite/core/c/builtin_op_data.h
index e9c6eb3..e1428e7 100644
--- a/tensorflow/lite/core/c/builtin_op_data.h
+++ b/tensorflow/lite/core/c/builtin_op_data.h
@@ -21,6 +21,7 @@
 #define TENSORFLOW_LITE_CORE_C_BUILTIN_OP_DATA_H_
 
 #include <stdbool.h>
+#include <stddef.h>
 #include <stdint.h>
 
 #include "tensorflow/lite/core/c/common.h"
@@ -32,6 +33,10 @@
 // TfLiteReshapeParams can't have dynamic data so we fix the maximum possible
 // number of dimensions.
 #define TFLITE_RESHAPE_PARAMS_MAX_DIMENSION_COUNT 8
+#define TFLITE_STABLEHLO_SCATTER_PARAMS_MAX_DIMENSION_COUNT 8
+#define TFLITE_STABLEHLO_GATHER_PARAMS_MAX_DIMENSION_COUNT 8
+#define TFLITE_STABLEHLO_REDUCE_WINDOW_PARAMS_MAX_DIMENSION_COUNT 8
+#define TFLITE_STABLEHLO_PAD_PARAMS_MAX_DIMENSION_COUNT 8
 
 // TODO(aselle): Consider using "if this then that" for testing.
 
@@ -90,6 +95,10 @@
   // Note: Version 2 supports dilation values not equal to 1.
   int dilation_width_factor;
   int dilation_height_factor;
+
+  // Parameters for CONV_2D version 7 or above.
+  // Used to determine the default value for the quantized bias.
+  TfLiteType quantized_bias_type;
 } TfLiteConvParams;
 
 typedef struct {
@@ -193,6 +202,10 @@
   // If set to true and the weights are quantized, then non constant inputs
   // are quantized at evaluation time with asymmetric quantization.
   bool asymmetric_quantize_inputs;
+
+  // Parameters for FullyConnected version 10 or above.
+  // Used to determine the default value for the quantized bias.
+  TfLiteType quantized_bias_type;
 } TfLiteFullyConnectedParams;
 
 typedef enum {
@@ -341,7 +354,7 @@
   // These fields are only used in old models for backward compatibility.
   // In the current implementation, we use the 2nd input of the op as the shape,
   // and these fields are unused.
-  int shape[TFLITE_RESHAPE_PARAMS_MAX_DIMENSION_COUNT];
+  int32_t shape[TFLITE_RESHAPE_PARAMS_MAX_DIMENSION_COUNT];
   int num_dimensions;
 } TfLiteReshapeParams;
 
@@ -398,7 +411,7 @@
 typedef struct {
   // TODO(ahentz): We can't have dynamic data in this struct, at least not yet.
   // For now we will fix the maximum possible number of dimensions.
-  int squeeze_dims[8];
+  int32_t squeeze_dims[8];
   int num_squeeze_dims;
 } TfLiteSqueezeParams;
 
@@ -430,6 +443,10 @@
 
   // Parameters supported by version 4:
   TfLiteFusedActivation activation;
+
+  // Parameters for TransposeConv version 5 or above.
+  // Used to determine the default value for the quantized bias.
+  TfLiteType quantized_bias_type;
 } TfLiteTransposeConvParams;
 
 typedef struct {
@@ -535,6 +552,108 @@
   bool approximate;
 } TfLiteGeluParams;
 
+typedef struct {
+  int64_t dimension;
+} TfLiteStablehloConcatenateParams;
+
+typedef struct {
+  // See the stablehlo spec for the explanation of the attributes:
+  // https://github.com/openxla/stablehlo/blob/main/docs/spec.md#scatter
+  bool indices_are_sorted;
+  int64_t
+      update_window_dims[TFLITE_STABLEHLO_SCATTER_PARAMS_MAX_DIMENSION_COUNT];
+  int num_update_window_dims;
+  int64_t
+      inserted_window_dims[TFLITE_STABLEHLO_SCATTER_PARAMS_MAX_DIMENSION_COUNT];
+  int num_inserted_window_dims;
+  int64_t scatter_dims_to_operand_dims
+      [TFLITE_STABLEHLO_SCATTER_PARAMS_MAX_DIMENSION_COUNT];
+  int num_scatter_dims_to_operand_dims;
+  int64_t index_vector_dim;
+  bool unique_indices;
+  int update_computation_subgraph_index;
+} TfLiteStablehloScatterParams;
+
+typedef enum {
+  kTfLiteRngAlgorithmUnknown = 0,
+  // An algorithm auto-selected by the system according to device type.
+  kTfLiteRngAlgorithmDefault,
+  // The Philox algorithm, as described in paper
+  // ['Parallel Random Numbers: As Easy as 1, 2, 3']
+  // (https://www.thesalmons.org/john/random123/papers/random123sc11.pdf)
+  kTfLiteRngAlgorithmPhilox,
+  // The ThreeFry algorithm, as described in paper
+  // ['Parallel Random Numbers: As Easy as 1, 2, 3']
+  // (https://www.thesalmons.org/john/random123/papers/random123sc11.pdf)
+  kTfLiteRngAlgorithmThreefry,
+} TfLiteRngAlgorithm;
+
+typedef struct {
+  TfLiteRngAlgorithm algorithm;
+} TfLiteStablehloRngBitGeneratorParams;
+
+typedef struct {
+  // See the stablehlo spec for the explanation of the attributes:
+  // https://github.com/openxla/stablehlo/blob/main/docs/spec.md#gather
+  int64_t offset_dims[TFLITE_STABLEHLO_GATHER_PARAMS_MAX_DIMENSION_COUNT];
+  int num_offset_dims;
+  int64_t
+      collapsed_slice_dims[TFLITE_STABLEHLO_GATHER_PARAMS_MAX_DIMENSION_COUNT];
+  int num_collapsed_slice_dims;
+  int64_t start_index_map[TFLITE_STABLEHLO_GATHER_PARAMS_MAX_DIMENSION_COUNT];
+  int num_start_index_map;
+  int64_t index_vector_dim;
+  int64_t slice_sizes[TFLITE_STABLEHLO_GATHER_PARAMS_MAX_DIMENSION_COUNT];
+  int num_slice_sizes;
+  bool indices_are_sorted;
+} TfLiteStablehloGatherParams;
+
+typedef struct {
+  // See the stablehlo spec for the explanation of the attributes:
+  // https://github.com/openxla/stablehlo/blob/main/docs/spec.md#reduce_window
+  int64_t window_dimensions
+      [TFLITE_STABLEHLO_REDUCE_WINDOW_PARAMS_MAX_DIMENSION_COUNT];
+  int64_t
+      window_strides[TFLITE_STABLEHLO_REDUCE_WINDOW_PARAMS_MAX_DIMENSION_COUNT];
+  int64_t
+      base_dilations[TFLITE_STABLEHLO_REDUCE_WINDOW_PARAMS_MAX_DIMENSION_COUNT];
+  int64_t window_dilations
+      [TFLITE_STABLEHLO_REDUCE_WINDOW_PARAMS_MAX_DIMENSION_COUNT];
+  int64_t
+      padding[2 * TFLITE_STABLEHLO_REDUCE_WINDOW_PARAMS_MAX_DIMENSION_COUNT];
+  int body_subgraph_index;
+} TfLiteStablehloReduceWindowParams;
+
+enum TfLiteReduceWindowFunction {
+  TfLiteReduceWindowFunctionUnsupported,
+  TfLiteReduceWindowFunctionAdd,
+  TfLiteReduceWindowFunctionMul,
+  TfLiteReduceWindowFunctionMin,
+  TfLiteReduceWindowFunctionMax,
+  TfLiteReduceWindowFunctionAll,
+  TfLiteReduceWindowFunctionAny
+};
+
+typedef struct {
+  enum TfLiteReduceWindowFunction reduce_function;
+} TfLiteReduceWindowParams;
+
+typedef struct {
+  // See the stablehlo spec for the explanation of the attributes:
+  // https://github.com/openxla/stablehlo/blob/main/docs/spec.md#pad
+  int64_t edge_padding_low[TFLITE_STABLEHLO_PAD_PARAMS_MAX_DIMENSION_COUNT];
+  int64_t edge_padding_high[TFLITE_STABLEHLO_PAD_PARAMS_MAX_DIMENSION_COUNT];
+  int64_t interior_padding[TFLITE_STABLEHLO_PAD_PARAMS_MAX_DIMENSION_COUNT];
+} TfLiteStablehloPadParams;
+
+typedef struct {
+  const char* name;
+  int32_t subgraph_index;
+  int32_t version;
+  const uint8_t* attributes;
+  size_t attributes_size;
+} TfLiteStablehloCompositeParams;
+
 #ifdef __cplusplus
 }  // extern "C"
 #endif  // __cplusplus
diff --git a/tensorflow/lite/core/c/c_api_types.h b/tensorflow/lite/core/c/c_api_types.h
index 3a6594d..32cefa8 100644
--- a/tensorflow/lite/core/c/c_api_types.h
+++ b/tensorflow/lite/core/c/c_api_types.h
@@ -12,16 +12,24 @@
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
+// WARNING: Users of TensorFlow Lite should not include this file directly, but
+// should instead include "third_party/tensorflow/lite/c/c_api_types.h".
+// Only the TensorFlow Lite implementation itself should include this file
+// directly.
 
 /// This file declares types used by the pure C inference API defined in
 /// c_api.h, some of which are also used in the C++ and C kernel and interpreter
 /// APIs.
-
-// WARNING: Users of TensorFlow Lite should not include this file directly,
-// but should instead include
-// "third_party/tensorflow/lite/c/c_api_types.h".
-// Only the TensorFlow Lite implementation itself should include this
-// file directly.
+///
+// clang-format off
+// NOLINTBEGIN(whitespace/line_length)
+/// \note Users of TensorFlow Lite should use
+/// \code
+/// #include "tensorflow/lite/c/c_api_types.h"
+/// \endcode
+/// to access the APIs documented on this page.
+// NOLINTEND(whitespace/line_length)
+// clang-format on
 
 // IWYU pragma: private, include "third_party/tensorflow/lite/c/c_api_types.h"
 
@@ -34,9 +42,13 @@
 extern "C" {
 #endif
 
-/** \addtogroup c_api_types tensorflow/lite/c/c_api_types.h
+// clang-format off
+// NOLINTBEGIN(whitespace/line_length)
+/** \defgroup c_api_types lite/c/c_api_types.h
  *  @{
  */
+// NOLINTEND(whitespace/line_length)
+// clang-format on
 
 // Define TFL_CAPI_EXPORT macro to export a function properly with a shared
 // library.
@@ -121,14 +133,14 @@
   kTfLiteUInt32 = 16,
   kTfLiteUInt16 = 17,
   kTfLiteInt4 = 18,
+  kTfLiteBFloat16 = 19,
 } TfLiteType;
 
-/// Legacy. Will be deprecated in favor of TfLiteAffineQuantization.
+/// Legacy. Will be deprecated in favor of `TfLiteAffineQuantization`.
 /// If per-layer quantization is specified this field will still be populated in
-/// addition to TfLiteAffineQuantization.
+/// addition to `TfLiteAffineQuantization`.
 /// Parameters for asymmetric quantization. Quantized values can be converted
-/// back to float using:
-///     real_value = scale * (quantized_value - zero_point)
+/// back to float using: `real_value = scale * (quantized_value - zero_point)`
 typedef struct TfLiteQuantizationParams {
   float scale;
   int32_t zero_point;
@@ -156,6 +168,7 @@
 /// This is an abstract type that is intended to have the same
 /// role as TfLiteDelegate, but without exposing the implementation
 /// details of how delegates are implemented.
+///
 /// WARNING: This is an experimental type and subject to change.
 typedef struct TfLiteOpaqueDelegateStruct TfLiteOpaqueDelegateStruct;
 
@@ -163,6 +176,7 @@
 /// TfLiteDelegate; allows delegation of nodes to alternative backends.
 /// For TF Lite in Play Services, this is an opaque type,
 /// but for regular TF Lite, this is just a typedef for TfLiteDelegate.
+///
 /// WARNING: This is an experimental type and subject to change.
 #if TFLITE_WITH_STABLE_ABI || TFLITE_USE_OPAQUE_DELEGATE
 typedef TfLiteOpaqueDelegateStruct TfLiteOpaqueDelegate;
diff --git a/tensorflow/lite/core/c/common.cc b/tensorflow/lite/core/c/common.cc
index 367f175..7afecdb 100644
--- a/tensorflow/lite/core/c/common.cc
+++ b/tensorflow/lite/core/c/common.cc
@@ -370,6 +370,8 @@
       return "STRING";
     case kTfLiteFloat16:
       return "FLOAT16";
+    case kTfLiteBFloat16:
+      return "BFLOAT16";
     case kTfLiteFloat64:
       return "FLOAT64";
     case kTfLiteResource:
@@ -384,40 +386,128 @@
 
 TfLiteDelegate TfLiteDelegateCreate() { return TfLiteDelegate{}; }
 
-#ifndef TF_LITE_STATIC_MEMORY
-TfLiteOpaqueDelegate* TfLiteOpaqueDelegateCreate(
-    const TfLiteOpaqueDelegateBuilder* opaque_delegate_builder) {
-  if (!opaque_delegate_builder) return nullptr;
-
-  TfLiteDelegate* result = new TfLiteDelegate{};
-  result->opaque_delegate_builder = new TfLiteOpaqueDelegateBuilder{};
-  *(result->opaque_delegate_builder) = *opaque_delegate_builder;
-
-  return reinterpret_cast<TfLiteOpaqueDelegate*>(result);
+// Returns a tensor data allocation strategy.
+TfLiteAllocationStrategy TfLiteTensorGetAllocationStrategy(
+    const TfLiteTensor* const t) {
+  switch (t->allocation_type) {
+    case kTfLiteMemNone:
+      return kTfLiteAllocationStrategyNone;
+    case kTfLiteMmapRo:
+      return kTfLiteAllocationStrategyMMap;
+    case kTfLiteArenaRw:
+      return kTfLiteAllocationStrategyArena;
+    case kTfLiteArenaRwPersistent:
+      return kTfLiteAllocationStrategyArena;
+    case kTfLiteDynamic:
+      return kTfLiteAllocationStrategyMalloc;
+    case kTfLitePersistentRo:
+      return kTfLiteAllocationStrategyUnknown;
+    case kTfLiteCustom:
+      return kTfLiteAllocationStrategyUnknown;
+    case kTfLiteVariantObject:
+      return kTfLiteAllocationStrategyNew;
+  }
+  return kTfLiteAllocationStrategyUnknown;
 }
 
-void TfLiteOpaqueDelegateDelete(TfLiteOpaqueDelegate* opaque_delegate) {
-  if (!opaque_delegate) return;
-
-  const TfLiteDelegate* tflite_delegate =
-      reinterpret_cast<const TfLiteDelegate*>(opaque_delegate);
-  delete tflite_delegate->opaque_delegate_builder;
-  delete tflite_delegate;
+// Returns how stable a tensor data buffer address is across runs.
+TfLiteRunStability TfLiteTensorGetBufferAddressStability(
+    const TfLiteTensor* const t) {
+  switch (t->allocation_type) {
+    case kTfLiteMemNone:
+      return kTfLiteRunStabilityAcrossRuns;
+    case kTfLiteMmapRo:
+      return kTfLiteRunStabilityAcrossRuns;
+    case kTfLiteArenaRw:
+      return kTfLiteRunStabilityUnstable;
+    case kTfLiteArenaRwPersistent:
+      return kTfLiteRunStabilityUnstable;
+    case kTfLiteDynamic:
+      return kTfLiteRunStabilitySingleRun;
+    case kTfLitePersistentRo:
+      return kTfLiteRunStabilitySingleRun;
+    case kTfLiteCustom:
+      return kTfLiteRunStabilityUnknown;
+    case kTfLiteVariantObject:
+      return kTfLiteRunStabilityAcrossRuns;
+  }
+  return kTfLiteRunStabilityUnknown;
 }
-#endif  // TF_LITE_STATIC_MEMORY
 
-void* TfLiteOpaqueDelegateGetData(const TfLiteOpaqueDelegate* delegate) {
-  if (!delegate) return nullptr;
+// Returns how stable a tensor data values are across runs.
+TfLiteRunStability TfLiteTensorGetDataStability(const TfLiteTensor* const t) {
+  switch (t->allocation_type) {
+    case kTfLiteMemNone:
+      return kTfLiteRunStabilityAcrossRuns;
+    case kTfLiteMmapRo:
+      return kTfLiteRunStabilityAcrossRuns;
+    case kTfLiteArenaRw:
+      return kTfLiteRunStabilitySingleRun;
+    case kTfLiteArenaRwPersistent:
+      return kTfLiteRunStabilityAcrossRuns;
+    case kTfLiteDynamic:
+      return kTfLiteRunStabilitySingleRun;
+    case kTfLitePersistentRo:
+      return kTfLiteRunStabilitySingleRun;
+    case kTfLiteCustom:
+      return kTfLiteRunStabilityUnknown;
+    case kTfLiteVariantObject:
+      return kTfLiteRunStabilitySingleRun;
+  }
+  return kTfLiteRunStabilityUnknown;
+}
 
-  // The following cast is safe only because this code is part of the
-  // TF Lite runtime implementation.  Apps using TF Lite should not rely on
-  // 'TfLiteOpaqueDelegate' and 'TfLiteDelegate' being equivalent.
-  const auto* tflite_delegate =
-      reinterpret_cast<const TfLiteDelegate*>(delegate);
+// Returns the operation step when the data of a tensor is populated.
+//
+// Some operations can precompute their results before the evaluation step. This
+// makes the data available earlier for subsequent operations.
+TfLiteRunStep TfLiteTensorGetDataKnownStep(const TfLiteTensor* t) {
+  switch (t->allocation_type) {
+    case kTfLiteMemNone:
+      return kTfLiteRunStepInit;
+    case kTfLiteMmapRo:
+      return kTfLiteRunStepInit;
+    case kTfLiteArenaRw:
+      return kTfLiteRunStepEval;
+    case kTfLiteArenaRwPersistent:
+      return kTfLiteRunStepEval;
+    case kTfLiteDynamic:
+      return kTfLiteRunStepEval;
+    case kTfLitePersistentRo:
+      return kTfLiteRunStepPrepare;
+    case kTfLiteCustom:
+      return kTfLiteRunStepUnknown;
+    case kTfLiteVariantObject:
+      return kTfLiteRunStepEval;
+  }
+  return kTfLiteRunStepUnknown;
+}
 
-  if (!tflite_delegate->opaque_delegate_builder) return tflite_delegate->data_;
-
-  return tflite_delegate->opaque_delegate_builder->data;
+// Returns the operation steop when the shape of a tensor is computed.
+//
+// Some operations can precompute the shape of their results before the
+// evaluation step. This makes the shape available earlier for subsequent
+// operations.
+TfLiteRunStep TfLiteTensorGetShapeKnownStep(const TfLiteTensor* t) {
+  switch (t->allocation_type) {
+    case kTfLiteMemNone:
+      return kTfLiteRunStepInit;
+    case kTfLiteMmapRo:
+      return kTfLiteRunStepInit;
+    case kTfLiteArenaRw:
+      return kTfLiteRunStepPrepare;
+    case kTfLiteArenaRwPersistent:
+      return kTfLiteRunStepPrepare;
+    case kTfLiteDynamic:
+      return kTfLiteRunStepEval;
+    case kTfLitePersistentRo:
+      return kTfLiteRunStepPrepare;
+    case kTfLiteCustom:
+      return kTfLiteRunStepUnknown;
+    case kTfLiteVariantObject:
+      return kTfLiteRunStepEval;
+  }
+  return kTfLiteRunStepUnknown;
 }
 
 }  // extern "C"
diff --git a/tensorflow/lite/core/c/common.h b/tensorflow/lite/core/c/common.h
index 19a74a7..648b862 100644
--- a/tensorflow/lite/core/c/common.h
+++ b/tensorflow/lite/core/c/common.h
@@ -12,32 +12,41 @@
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
+// WARNING: Users of TensorFlow Lite should not include this file directly, but
+// should instead include "third_party/tensorflow/lite/c/common.h".
+// Only the TensorFlow Lite implementation itself should include this file
+// directly.
 
-// This file defines common C types and APIs for implementing operations,
-// delegates and other constructs in TensorFlow Lite. The actual operations and
-// delegates can be defined using C++, but the interface between the interpreter
-// and the operations are C.
-//
-// Summary of abstractions
-// TF_LITE_ENSURE - Self-sufficient error checking
-// TfLiteStatus - Status reporting
-// TfLiteIntArray - stores tensor shapes (dims),
-// TfLiteContext - allows an op to access the tensors
-// TfLiteTensor - tensor (a multidimensional array)
-// TfLiteNode - a single node or operation
-// TfLiteRegistration - the implementation of a conceptual operation.
-// TfLiteDelegate - allows delegation of nodes to alternative backends.
-//
-// Some abstractions in this file are created and managed by Interpreter.
-//
-// NOTE: The order of values in these structs are "semi-ABI stable". New values
-// should be added only to the end of structs and never reordered.
+/// This file defines common C types and APIs for implementing operations,
+/// delegates and other constructs in TensorFlow Lite. The actual operations and
+/// delegates can be defined using C++, but the interface between the
+/// interpreter and the operations are C.
+///
+/// Summary of abstractions:
+/// * `TF_LITE_ENSURE` - self-sufficient error checking
+/// * `TfLiteStatus` - status reporting
+/// * `TfLiteIntArray` - stores tensor shapes (dims),
+/// * `TfLiteContext` - allows an op to access the tensors
+/// * `TfLiteTensor` - tensor (a multidimensional array)
+/// * `TfLiteNode` - a single node or operation
+/// * `TfLiteRegistration` - the implementation of a conceptual operation.
+/// * `TfLiteDelegate` - allows delegation of nodes to alternative backends.
+///
+/// Some abstractions in this file are created and managed by Interpreter.
+///
+/// NOTE: The order of values in these structs are "semi-ABI stable". New values
+/// should be added only to the end of structs and never reordered.
+///
+// clang-format off
+// NOLINTBEGIN(whitespace/line_length)
+/// \note Users of TensorFlow Lite should use
+/// \code
+/// #include "tensorflow/lite/c/common.h"
+/// \endcode
+/// to access the APIs documented on this page.
+// NOLINTEND(whitespace/line_length)
+// clang-format on
 
-/// WARNING: Users of TensorFlow Lite should not include this file directly,
-/// but should instead include
-/// "third_party/tensorflow/lite/c/common.h".
-/// Only the TensorFlow Lite implementation itself should include this
-/// file directly.
 // IWYU pragma: private, include "third_party/tensorflow/lite/c/common.h"
 
 #ifndef TENSORFLOW_LITE_CORE_C_COMMON_H_
@@ -54,15 +63,23 @@
 extern "C" {
 #endif  // __cplusplus
 
-// The list of external context types known to TF Lite. This list exists solely
-// to avoid conflicts and to ensure ops can share the external contexts they
-// need. Access to the external contexts is controlled by one of the
-// corresponding support files.
+// clang-format off
+// NOLINTBEGIN(whitespace/line_length)
+/** \defgroup common lite/c/common.h
+ *  @{
+ */
+// NOLINTEND(whitespace/line_length)
+// clang-format on
+
+/// The list of external context types known to TF Lite. This list exists solely
+/// to avoid conflicts and to ensure ops can share the external contexts they
+/// need. Access to the external contexts is controlled by one of the
+/// corresponding support files.
 typedef enum TfLiteExternalContextType {
-  kTfLiteEigenContext = 0,       // include eigen_support.h to use.
-  kTfLiteGemmLowpContext = 1,    // include gemm_support.h to use.
-  kTfLiteEdgeTpuContext = 2,     // Placeholder for Edge TPU support.
-  kTfLiteCpuBackendContext = 3,  // include cpu_backend_context.h to use.
+  kTfLiteEigenContext = 0,       /// include eigen_support.h to use.
+  kTfLiteGemmLowpContext = 1,    /// include gemm_support.h to use.
+  kTfLiteEdgeTpuContext = 2,     /// Placeholder for Edge TPU support.
+  kTfLiteCpuBackendContext = 3,  /// include cpu_backend_context.h to use.
   kTfLiteMaxExternalContexts = 4
 } TfLiteExternalContextType;
 
@@ -73,20 +90,22 @@
 struct TfLiteRegistration;
 struct TfLiteOpaqueDelegateBuilder;
 
-// An external context is a collection of information unrelated to the TF Lite
-// framework, but useful to a subset of the ops. TF Lite knows very little
-// about the actual contexts, but it keeps a list of them, and is able to
-// refresh them if configurations like the number of recommended threads
-// change.
+/// An external context is a collection of information unrelated to the TF Lite
+/// framework, but useful to a subset of the ops. TF Lite knows very little
+/// about the actual contexts, but it keeps a list of them, and is able to
+/// refresh them if configurations like the number of recommended threads
+/// change.
 typedef struct TfLiteExternalContext {
   TfLiteExternalContextType type;
   TfLiteStatus (*Refresh)(struct TfLiteContext* context);
 } TfLiteExternalContext;
 
+// LINT.IfChange(optional_tensor)
 #define kTfLiteOptionalTensor (-1)
+// LINT.ThenChange(//tensorflow/compiler/mlir/lite/flatbuffer_export.cc:optional_tensor)
 
-// Fixed size list of integers. Used for dimensions and inputs/outputs tensor
-// indices
+/// Fixed size list of integers. Used for dimensions and inputs/outputs tensor
+/// indices
 typedef struct TfLiteIntArray {
   int size;
 
@@ -105,33 +124,33 @@
 #endif
 } TfLiteIntArray;
 
-// Given the size (number of elements) in a TfLiteIntArray, calculate its size
-// in bytes.
+/// Given the size (number of elements) in a TfLiteIntArray, calculate its size
+/// in bytes.
 size_t TfLiteIntArrayGetSizeInBytes(int size);
 
 #ifndef TF_LITE_STATIC_MEMORY
-// Create a array of a given `size` (uninitialized entries).
-// This returns a pointer, that you must free using TfLiteIntArrayFree().
+/// Create a array of a given `size` (uninitialized entries).
+/// This returns a pointer, that you must free using TfLiteIntArrayFree().
 TfLiteIntArray* TfLiteIntArrayCreate(int size);
 #endif
 
-// Check if two intarrays are equal. Returns 1 if they are equal, 0 otherwise.
+/// Check if two intarrays are equal. Returns 1 if they are equal, 0 otherwise.
 int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b);
 
-// Check if an intarray equals an array. Returns 1 if equals, 0 otherwise.
+/// Check if an intarray equals an array. Returns 1 if equals, 0 otherwise.
 int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size,
                               const int b_data[]);
 
 #ifndef TF_LITE_STATIC_MEMORY
-// Create a copy of an array passed as `src`.
-// You are expected to free memory with TfLiteIntArrayFree
+/// Create a copy of an array passed as `src`.
+/// You are expected to free memory with TfLiteIntArrayFree
 TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src);
 
-// Free memory of array `a`.
+/// Free memory of array `a`.
 void TfLiteIntArrayFree(TfLiteIntArray* a);
 #endif  // TF_LITE_STATIC_MEMORY
 
-// Fixed size list of floats. Used for per-channel quantization.
+/// Fixed size list of floats. Used for per-channel quantization.
 typedef struct TfLiteFloatArray {
   int size;
 #if defined(_MSC_VER)
@@ -149,20 +168,20 @@
 #endif
 } TfLiteFloatArray;
 
-// Given the size (number of elements) in a TfLiteFloatArray, calculate its size
-// in bytes.
+/// Given the size (number of elements) in a TfLiteFloatArray, calculate its
+/// size in bytes.
 int TfLiteFloatArrayGetSizeInBytes(int size);
 
 #ifndef TF_LITE_STATIC_MEMORY
-// Create a array of a given `size` (uninitialized entries).
-// This returns a pointer, that you must free using TfLiteFloatArrayFree().
+/// Create a array of a given `size` (uninitialized entries).
+/// This returns a pointer, that you must free using TfLiteFloatArrayFree().
 TfLiteFloatArray* TfLiteFloatArrayCreate(int size);
 
-// Create a copy of an array passed as `src`.
-// You are expected to free memory with TfLiteFloatArrayFree.
+/// Create a copy of an array passed as `src`.
+/// You are expected to free memory with TfLiteFloatArrayFree.
 TfLiteFloatArray* TfLiteFloatArrayCopy(const TfLiteFloatArray* src);
 
-// Free memory of array `a`.
+/// Free memory of array `a`.
 void TfLiteFloatArrayFree(TfLiteFloatArray* a);
 #endif  // TF_LITE_STATIC_MEMORY
 
@@ -191,18 +210,18 @@
 #define TF_LITE_MAYBE_KERNEL_LOG(context, ...) ARGS_UNUSED(__VA_ARGS__)
 #endif  // TF_LITE_STRIP_ERROR_STRINGS
 
-// Check whether value is true, and if not return kTfLiteError from
-// the current function (and report the error string msg).
-#define TF_LITE_ENSURE_MSG(context, value, msg)        \
-  do {                                                 \
-    if (!(value)) {                                    \
-      TF_LITE_KERNEL_LOG((context), __FILE__ " " msg); \
-      return kTfLiteError;                             \
-    }                                                  \
+/// Check whether value is true, and if not return kTfLiteError from
+/// the current function (and report the error string msg).
+#define TF_LITE_ENSURE_MSG(context, value, ...)                \
+  do {                                                         \
+    if (!(value)) {                                            \
+      TF_LITE_KERNEL_LOG((context), __FILE__ " " __VA_ARGS__); \
+      return kTfLiteError;                                     \
+    }                                                          \
   } while (0)
 
-// Check whether the value `a` is true, and if not return kTfLiteError from
-// the current function, while also reporting the location of the error.
+/// Check whether the value `a` is true, and if not return kTfLiteError from
+/// the current function, while also reporting the location of the error.
 #define TF_LITE_ENSURE(context, a)                                      \
   do {                                                                  \
     if (!(a)) {                                                         \
@@ -220,11 +239,12 @@
     }                            \
   } while (0)
 
-// Check whether the value `a == b` is true, and if not return kTfLiteError from
-// the current function, while also reporting the location of the error.
-// `a` and `b` may be evaluated more than once, so no side effects or
-// extremely expensive computations should be done.
-// NOTE: Use TF_LITE_ENSURE_TYPES_EQ if comparing TfLiteTypes.
+/// Check whether the value `a == b` is true, and if not return kTfLiteError
+/// from the current function, while also reporting the location of the error.
+/// `a` and `b` may be evaluated more than once, so no side effects or
+/// extremely expensive computations should be done.
+///
+/// NOTE: Use TF_LITE_ENSURE_TYPES_EQ if comparing TfLiteTypes.
 #define TF_LITE_ENSURE_EQ(context, a, b)                                   \
   do {                                                                     \
     if ((a) != (b)) {                                                      \
@@ -263,61 +283,69 @@
     }                                      \
   } while (0)
 
-// Single-precision complex data type compatible with the C99 definition.
+/// Single-precision complex data type compatible with the C99 definition.
 typedef struct TfLiteComplex64 {
-  float re, im;  // real and imaginary parts, respectively.
+  float re, im;  /// real and imaginary parts, respectively.
 } TfLiteComplex64;
 
-// Double-precision complex data type compatible with the C99 definition.
+/// Double-precision complex data type compatible with the C99 definition.
 typedef struct TfLiteComplex128 {
-  double re, im;  // real and imaginary parts, respectively.
+  double re, im;  /// real and imaginary parts, respectively.
 } TfLiteComplex128;
 
-// Half precision data type compatible with the C99 definition.
+/// Half precision data type compatible with the C99 definition.
 typedef struct TfLiteFloat16 {
   uint16_t data;
 } TfLiteFloat16;
 
-// Return the name of a given type, for error reporting purposes.
+/// bfloat16 data type compatible with the Google Brain definition.
+/// https://cloud.google.com/tpu/docs/bfloat16.
+/// This provides 1 bit of sign, 8 bits of exponent, and 7 bits of mantissa.
+typedef struct TfLiteBFloat16 {
+  uint16_t data;
+} TfLiteBFloat16;
+
+/// Return the name of a given type, for error reporting purposes.
 const char* TfLiteTypeGetName(TfLiteType type);
 
-// SupportedQuantizationTypes.
+/// SupportedQuantizationTypes.
 typedef enum TfLiteQuantizationType {
-  // No quantization.
+  /// No quantization.
   kTfLiteNoQuantization = 0,
-  // Affine quantization (with support for per-channel quantization).
-  // Corresponds to TfLiteAffineQuantization.
+  /// Affine quantization (with support for per-channel quantization).
+  /// Corresponds to TfLiteAffineQuantization.
   kTfLiteAffineQuantization = 1,
 } TfLiteQuantizationType;
 
-// Structure specifying the quantization used by the tensor, if-any.
+/// Structure specifying the quantization used by the tensor, if-any.
 typedef struct TfLiteQuantization {
-  // The type of quantization held by params.
+  /// The type of quantization held by params.
   TfLiteQuantizationType type;
-  // Holds an optional reference to a quantization param structure. The actual
-  // type depends on the value of the `type` field (see the comment there for
-  // the values and corresponding types).
+  /// Holds an optional reference to a quantization param structure. The actual
+  /// type depends on the value of the `type` field (see the comment there for
+  /// the values and corresponding types).
   void* params;
 } TfLiteQuantization;
 
-// Parameters for asymmetric quantization across a dimension (i.e per output
-// channel quantization).
-// quantized_dimension specifies which dimension the scales and zero_points
-// correspond to.
-// For a particular value in quantized_dimension, quantized values can be
-// converted back to float using:
-//     real_value = scale * (quantized_value - zero_point)
+/// Parameters for asymmetric quantization across a dimension (i.e per output
+/// channel quantization).
+/// quantized_dimension specifies which dimension the scales and zero_points
+/// correspond to.
+/// For a particular value in quantized_dimension, quantized values can be
+/// converted back to float using:
+///     `real_value = scale * (quantized_value - zero_point)`
 typedef struct TfLiteAffineQuantization {
   TfLiteFloatArray* scale;
   TfLiteIntArray* zero_point;
   int32_t quantized_dimension;
 } TfLiteAffineQuantization;
 
-/* A union of pointers that points to memory for a given tensor. */
+/// A union of pointers that points to memory for a given tensor.
+///
+/// Do not access these members directly, if possible, use
+/// `GetTensorData<TYPE>(tensor)` instead, otherwise only access `.data`, as
+/// other members are deprecated.
 typedef union TfLitePtrUnion {
-  /* Do not access these members directly, if possible, use
-   * GetTensorData<TYPE>(tensor) instead, otherwise only access .data, as other
-   * members are deprecated. */
   int32_t* i32;
   uint32_t* u32;
   int64_t* i64;
@@ -334,24 +362,26 @@
   TfLiteComplex64* c64;
   TfLiteComplex128* c128;
   int8_t* int8;
-  /* Only use this member. */
+  /// Only use this member.
   void* data;
 } TfLitePtrUnion;
 
-// Memory allocation strategies.
-//  * kTfLiteMmapRo: Read-only memory-mapped data, or data externally allocated.
-//  * kTfLiteArenaRw: Arena allocated with no guarantees about persistence,
-//        and available during eval.
-//  * kTfLiteArenaRwPersistent: Arena allocated but persistent across eval, and
-//        only available during eval.
-//  * kTfLiteDynamic: Allocated during eval, or for string tensors.
-//  * kTfLitePersistentRo: Allocated and populated during prepare. This is
-//        useful for tensors that can be computed during prepare and treated
-//        as constant inputs for downstream ops (also in prepare).
-//  * kTfLiteCustom: Custom memory allocation provided by the user. See
-//        TfLiteCustomAllocation below.
-// * kTfLiteVariantObject: Allocation is an arbitrary type-erased C++ object.
-//        Allocation and deallocation are done through `new` and `delete`.
+/// Memory allocation strategies.
+///  * `kTfLiteMmapRo`: Read-only memory-mapped data, or data externally
+///        allocated.
+///  * `kTfLiteArenaRw`: Arena allocated with no guarantees about persistence,
+///        and available during eval.
+///  * `kTfLiteArenaRwPersistent`: Arena allocated but persistent across eval,
+///  and only available during eval.
+///  * `kTfLiteDynamic`: Allocated during eval, or for string tensors.
+///  * `kTfLitePersistentRo`: Allocated and populated during prepare. This is
+///        useful for tensors that can be computed during prepare and treated
+///        as constant inputs for downstream ops (also in prepare).
+///  * `kTfLiteCustom`: Custom memory allocation provided by the user. See
+///        TfLiteCustomAllocation below.
+///  * `kTfLiteVariantObject`: Allocation is an arbitrary type-erased C++
+///  object.
+///        Allocation and deallocation are done through `new` and `delete`.
 typedef enum TfLiteAllocationType {
   kTfLiteMemNone = 0,
   kTfLiteMmapRo,
@@ -363,20 +393,51 @@
   kTfLiteVariantObject,
 } TfLiteAllocationType;
 
-// The delegates should use zero or positive integers to represent handles.
-// -1 is reserved from unallocated status.
+/// Memory allocation strategies.
+///
+/// TfLiteAllocationType values have been overloaded to mean more than their
+/// original intent. This enum should only be used to document the allocation
+/// strategy used by a tensor for it data.
+typedef enum TfLiteAllocationStrategy {
+  kTfLiteAllocationStrategyUnknown,
+  kTfLiteAllocationStrategyNone,    /// No data is allocated.
+  kTfLiteAllocationStrategyMMap,    /// Data is mmaped.
+  kTfLiteAllocationStrategyArena,   /// Handled by the arena.
+  kTfLiteAllocationStrategyMalloc,  /// Uses `malloc`/`free`.
+  kTfLiteAllocationStrategyNew      /// Uses `new[]`/`delete[]`.
+} TfLiteAllocationStrategy;
+
+/// Describes how stable a tensor attribute is with regards to an interpreter
+/// runs.
+typedef enum TfLiteRunStability {
+  kTfLiteRunStabilityUnknown,
+  kTfLiteRunStabilityUnstable,   /// May change at any time.
+  kTfLiteRunStabilitySingleRun,  /// Will stay the same for one run.
+  kTfLiteRunStabilityAcrossRuns  /// Will stay the same across all runs.
+} TfLiteRunStability;
+
+/// Describes the steps of a TFLite operation life cycle.
+typedef enum TfLiteRunStep {
+  kTfLiteRunStepUnknown,
+  kTfLiteRunStepInit,
+  kTfLiteRunStepPrepare,
+  kTfLiteRunStepEval
+} TfLiteRunStep;
+
+/// The delegates should use zero or positive integers to represent handles.
+/// -1 is reserved from unallocated status.
 typedef int TfLiteBufferHandle;
 enum {
   kTfLiteNullBufferHandle = -1,
 };
 
-// Storage format of each dimension in a sparse tensor.
+/// Storage format of each dimension in a sparse tensor.
 typedef enum TfLiteDimensionType {
   kTfLiteDimDense = 0,
   kTfLiteDimSparseCSR,
 } TfLiteDimensionType;
 
-// Metadata to encode each dimension in a sparse tensor.
+/// Metadata to encode each dimension in a sparse tensor.
 typedef struct TfLiteDimensionMetadata {
   TfLiteDimensionType format;
   int dense_size;
@@ -384,8 +445,8 @@
   TfLiteIntArray* array_indices;
 } TfLiteDimensionMetadata;
 
-// Parameters used to encode a sparse tensor. For detailed explanation of each
-// field please refer to lite/schema/schema.fbs.
+/// Parameters used to encode a sparse tensor. For detailed explanation of each
+/// field please refer to lite/schema/schema.fbs.
 typedef struct TfLiteSparsity {
   TfLiteIntArray* traversal_order;
   TfLiteIntArray* block_map;
@@ -393,133 +454,141 @@
   int dim_metadata_size;
 } TfLiteSparsity;
 
-// Defines a custom memory allocation not owned by the runtime.
-// `data` should be aligned to kDefaultTensorAlignment defined in
-// lite/util.h. (Currently 64 bytes)
-// NOTE: See Interpreter.SetCustomAllocationForTensor for details on usage.
+/// Defines a custom memory allocation not owned by the runtime.
+/// `data` should be aligned to kDefaultTensorAlignment defined in
+/// lite/util.h. (Currently 64 bytes)
+/// NOTE: See `Interpreter::SetCustomAllocationForTensor` for details on usage.
 typedef struct TfLiteCustomAllocation {
   void* data;
   size_t bytes;
 } TfLiteCustomAllocation;
 
-// The flags used in `Interpreter::SetCustomAllocationForTensor`.
-// Note that this is a bitmask, so the values should be 1, 2, 4, 8, ...etc.
+/// The flags used in `Interpreter::SetCustomAllocationForTensor`.
+/// Note that this is a bitmask, so the values should be 1, 2, 4, 8, ...etc.
 typedef enum TfLiteCustomAllocationFlags {
   kTfLiteCustomAllocationFlagsNone = 0,
-  // Skips checking whether allocation.data points to an aligned buffer as
-  // expected by the TFLite runtime.
-  // NOTE: Setting this flag can cause crashes when calling Invoke().
-  // Use with caution.
+  /// Skips checking whether allocation.data points to an aligned buffer as
+  /// expected by the TFLite runtime.
+  /// NOTE: Setting this flag can cause crashes when calling Invoke().
+  /// Use with caution.
   kTfLiteCustomAllocationFlagsSkipAlignCheck = 1,
 } TfLiteCustomAllocationFlags;
 
-// A tensor in the interpreter system which is a wrapper around a buffer of
-// data including a dimensionality (or NULL if not currently defined).
+enum { kTfLiteNoBufferIdentifier = SIZE_MAX };
+
+/// A tensor in the interpreter system which is a wrapper around a buffer of
+/// data including a dimensionality (or NULL if not currently defined).
 #ifndef TF_LITE_STATIC_MEMORY
 typedef struct TfLiteTensor {
-  // The data type specification for data stored in `data`. This affects
-  // what member of `data` union should be used.
+  /// The data type specification for data stored in `data`. This affects
+  /// what member of `data` union should be used.
   TfLiteType type;
-  // A union of data pointers. The appropriate type should be used for a typed
-  // tensor based on `type`.
+  /// A union of data pointers. The appropriate type should be used for a typed
+  /// tensor based on `type`.
   TfLitePtrUnion data;
-  // A pointer to a structure representing the dimensionality interpretation
-  // that the buffer should have. NOTE: the product of elements of `dims`
-  // and the element datatype size should be equal to `bytes` below.
+  /// A pointer to a structure representing the dimensionality interpretation
+  /// that the buffer should have. NOTE: the product of elements of `dims`
+  /// and the element datatype size should be equal to `bytes` below.
   TfLiteIntArray* dims;
-  // Quantization information.
+  /// Quantization information.
   TfLiteQuantizationParams params;
-  // How memory is mapped
-  //  kTfLiteMmapRo: Memory mapped read only.
-  //  i.e. weights
-  //  kTfLiteArenaRw: Arena allocated read write memory
-  //  (i.e. temporaries, outputs).
+  /// How memory is mapped
+  ///  kTfLiteMmapRo: Memory mapped read only.
+  ///  i.e. weights
+  ///  kTfLiteArenaRw: Arena allocated read write memory
+  ///  (i.e. temporaries, outputs).
   TfLiteAllocationType allocation_type;
-  // The number of bytes required to store the data of this Tensor. I.e.
-  // (bytes of each element) * dims[0] * ... * dims[n-1].  For example, if
-  // type is kTfLiteFloat32 and dims = {3, 2} then
-  // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24.
+  /// The number of bytes required to store the data of this Tensor. I.e.
+  /// (bytes of each element) * dims[0] * ... * dims[n-1].  For example, if
+  /// type is kTfLiteFloat32 and dims = {3, 2} then
+  /// bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24.
   size_t bytes;
 
-  // An opaque pointer to a tflite::MMapAllocation
+  /// An opaque pointer to a tflite::MMapAllocation
   const void* allocation;
 
-  // Null-terminated name of this tensor.
+  /// Null-terminated name of this tensor.
   const char* name;
 
-  // The delegate which knows how to handle `buffer_handle`.
-  // WARNING: This is an experimental interface that is subject to change.
+  /// The delegate which knows how to handle `buffer_handle`.
+  ///
+  /// WARNING: This is an experimental interface that is subject to change.
   struct TfLiteDelegate* delegate;
 
-  // An integer buffer handle that can be handled by `delegate`.
-  // The value is valid only when delegate is not null.
-  // WARNING: This is an experimental interface that is subject to change.
+  /// An integer buffer handle that can be handled by `delegate`.
+  /// The value is valid only when delegate is not null.
+  ///
+  /// WARNING: This is an experimental interface that is subject to change.
   TfLiteBufferHandle buffer_handle;
 
-  // If the delegate uses its own buffer (e.g. GPU memory), the delegate is
-  // responsible to set data_is_stale to true.
-  // `delegate->CopyFromBufferHandle` can be called to copy the data from
-  // delegate buffer.
-  // WARNING: This is an // experimental interface that is subject to change.
+  /// If the delegate uses its own buffer (e.g. GPU memory), the delegate is
+  /// responsible to set data_is_stale to true.
+  /// `delegate->CopyFromBufferHandle` can be called to copy the data from
+  /// delegate buffer.
+  ///
+  /// WARNING: This is an experimental interface that is subject to change.
   bool data_is_stale;
 
-  // True if the tensor is a variable.
+  /// True if the tensor is a variable.
   bool is_variable;
 
-  // Quantization information. Replaces params field above.
+  /// Quantization information. Replaces params field above.
   TfLiteQuantization quantization;
 
-  // Parameters used to encode a sparse tensor.
-  // This is optional. The field is NULL if a tensor is dense.
-  // WARNING: This is an experimental interface that is subject to change.
+  /// Parameters used to encode a sparse tensor.
+  /// This is optional. The field is NULL if a tensor is dense.
+  ///
+  /// WARNING: This is an experimental interface that is subject to change.
   TfLiteSparsity* sparsity;
 
-  // Optional. Encodes shapes with unknown dimensions with -1. This field is
-  // only populated when unknown dimensions exist in a read-write tensor (i.e.
-  // an input or output tensor). (e.g.  `dims` contains [1, 1, 1, 3] and
-  // `dims_signature` contains [1, -1, -1, 3]).  If no unknown dimensions exist
-  // then `dims_signature` is either null, or set to an empty array.  Note that
-  // this field only exists when TF_LITE_STATIC_MEMORY is not defined.
+  /// Optional. Encodes shapes with unknown dimensions with -1. This field is
+  /// only populated when unknown dimensions exist in a read-write tensor (i.e.
+  /// an input or output tensor). (e.g.  `dims` contains [1, 1, 1, 3] and
+  /// `dims_signature` contains [1, -1, -1, 3]).  If no unknown dimensions exist
+  /// then `dims_signature` is either null, or set to an empty array.  Note that
+  /// this field only exists when TF_LITE_STATIC_MEMORY is not defined.
   const TfLiteIntArray* dims_signature;
 } TfLiteTensor;
 
-// A structure representing an instance of a node.
-// This structure only exhibits the inputs, outputs, user defined data and some
-// node properties (like statefulness), not other features like the type.
+/// A structure representing an instance of a node.
+/// This structure only exhibits the inputs, outputs, user defined data and some
+/// node properties (like statefulness), not other features like the type.
 typedef struct TfLiteNode {
-  // Inputs to this node expressed as indices into the simulator's tensors.
+  /// Inputs to this node expressed as indices into the simulator's tensors.
   TfLiteIntArray* inputs;
 
-  // Outputs to this node expressed as indices into the simulator's tensors.
+  /// Outputs to this node expressed as indices into the simulator's tensors.
   TfLiteIntArray* outputs;
 
-  // intermediate tensors to this node expressed as indices into the simulator's
-  // tensors.
+  /// intermediate tensors to this node expressed as indices into the
+  /// simulator's tensors.
   TfLiteIntArray* intermediates;
 
-  // Temporary tensors uses during the computations. This usually contains no
-  // tensors, but ops are allowed to change that if they need scratch space of
-  // any sort.
+  /// Temporary tensors uses during the computations. This usually contains no
+  /// tensors, but ops are allowed to change that if they need scratch space of
+  /// any sort.
   TfLiteIntArray* temporaries;
 
-  // Opaque data provided by the node implementer through `Registration.init`.
+  /// Opaque data provided by the node implementer through `Registration.init`.
   void* user_data;
 
-  // Opaque data provided to the node if the node is a builtin. This is usually
-  // a structure defined in builtin_op_data.h
+  /// Opaque data provided to the node if the node is a builtin. This is usually
+  /// a structure defined in builtin_op_data.h
   void* builtin_data;
 
-  // Custom initial data. This is the opaque data provided in the flatbuffer.
-  // WARNING: This is an experimental interface that is subject to change.
+  /// Custom initial data. This is the opaque data provided in the flatbuffer.
+  ///
+  /// WARNING: This is an experimental interface that is subject to change.
   const void* custom_initial_data;
   int custom_initial_data_size;
 
-  // The pointer to the delegate. This is non-null only when the node is
-  // created by calling `interpreter.ModifyGraphWithDelegate`.
-  // WARNING: This is an experimental interface that is subject to change.
+  /// The pointer to the delegate. This is non-null only when the node is
+  /// created by calling `interpreter.ModifyGraphWithDelegate`.
+  ///
+  /// WARNING: This is an experimental interface that is subject to change.
   struct TfLiteDelegate* delegate;
 
-  // Whether this op might have side effect (e.g. stateful op).
+  /// Whether this op might have side effect (e.g. stateful op).
   bool might_have_side_effect;
 } TfLiteNode;
 #else   // defined(TF_LITE_STATIC_MEMORY)?
@@ -602,90 +671,89 @@
   void* builtin_data;
 
   // Custom initial data. This is the opaque data provided in the flatbuffer.
+  //
   // WARNING: This is an experimental interface that is subject to change.
   const void* custom_initial_data;
   int custom_initial_data_size;
 } TfLiteNode;
 #endif  // TF_LITE_STATIC_MEMORY
 
-// Light-weight tensor struct for TF Micro runtime. Provides the minimal amount
-// of information required for a kernel to run during TfLiteRegistration::Eval.
+/// Light-weight tensor struct for TF Micro runtime. Provides the minimal amount
+/// of information required for a kernel to run during TfLiteRegistration::Eval.
 // TODO(b/160955687): Move this field into TF_LITE_STATIC_MEMORY when TFLM
 // builds with this flag by default internally.
 typedef struct TfLiteEvalTensor {
-  // A union of data pointers. The appropriate type should be used for a typed
-  // tensor based on `type`.
+  /// A union of data pointers. The appropriate type should be used for a typed
+  /// tensor based on `type`.
   TfLitePtrUnion data;
 
-  // A pointer to a structure representing the dimensionality interpretation
-  // that the buffer should have.
+  /// A pointer to a structure representing the dimensionality interpretation
+  /// that the buffer should have.
   TfLiteIntArray* dims;
 
-  // The data type specification for data stored in `data`. This affects
-  // what member of `data` union should be used.
+  /// The data type specification for data stored in `data`. This affects
+  /// what member of `data` union should be used.
   TfLiteType type;
 } TfLiteEvalTensor;
 
 #ifndef TF_LITE_STATIC_MEMORY
-// Free data memory of tensor `t`.
+/// Free data memory of tensor `t`.
 void TfLiteTensorDataFree(TfLiteTensor* t);
 
-// Free quantization data.
+/// Free quantization data.
 void TfLiteQuantizationFree(TfLiteQuantization* quantization);
 
-// Free sparsity parameters.
+/// Free sparsity parameters.
 void TfLiteSparsityFree(TfLiteSparsity* sparsity);
 
-// Free memory of tensor `t`.
+/// Free memory of tensor `t`.
 void TfLiteTensorFree(TfLiteTensor* t);
 
-// Set all of a tensor's fields (and free any previously allocated data).
+/// Set all of a tensor's fields (and free any previously allocated data).
 void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
                        TfLiteQuantizationParams quantization, char* buffer,
                        size_t size, TfLiteAllocationType allocation_type,
                        const void* allocation, bool is_variable,
                        TfLiteTensor* tensor);
 
-// Copies the contents of 'src' in 'dst'.
-// Function does nothing if either 'src' or 'dst' is passed as nullptr and
-// return kTfLiteOk.
-// Returns kTfLiteError if 'src' and 'dst' doesn't have matching data size.
-// Note function copies contents, so it won't create new data pointer
-// or change allocation type.
-// All Tensor related properties will be copied from 'src' to 'dst' like
-// quantization, sparsity, ...
+/// Copies the contents of `src` in `dst`.
+/// Function does nothing if either `src` or `dst` is passed as nullptr and
+/// return `kTfLiteOk`.
+/// Returns `kTfLiteError` if `src` and `dst` doesn't have matching data size.
+/// Note function copies contents, so it won't create new data pointer
+/// or change allocation type.
+/// All Tensor related properties will be copied from `src` to `dst` like
+/// quantization, sparsity, ...
 TfLiteStatus TfLiteTensorCopy(const TfLiteTensor* src, TfLiteTensor* dst);
 
-// Change the size of the memory block owned by `tensor` to `num_bytes`.
-// Tensors with allocation types other than `kTfLiteDynamic` will be ignored and
-// a kTfLiteOk will be returned.
-// `tensor`'s internal data buffer will be assigned a pointer
-// which can safely be passed to free or realloc if `num_bytes` is zero.
-// If `preserve_data` is true, tensor data will be unchanged in the range from
-// the start of the region up to the minimum of the old and new sizes. In the
-// case of NULL tensor, or an error allocating new memory, returns
-// `kTfLiteError`.
+/// Change the size of the memory block owned by `tensor` to `num_bytes`.
+/// Tensors with allocation types other than `kTfLiteDynamic` will be ignored
+/// and a `kTfLiteOk` will be returned. `tensor`'s internal data buffer will be
+/// assigned a pointer which can safely be passed to free or realloc if
+/// `num_bytes` is zero. If `preserve_data` is true, tensor data will be
+/// unchanged in the range from the start of the region up to the minimum of the
+/// old and new sizes. In the case of NULL tensor, or an error allocating new
+/// memory, returns `kTfLiteError`.
 TfLiteStatus TfLiteTensorResizeMaybeCopy(size_t num_bytes, TfLiteTensor* tensor,
                                          bool preserve_data);
 
-// Change the size of the memory block owned by `tensor` to `num_bytes`.
-// Tensors with allocation types other than kTfLiteDynamic will be ignored and
-// a kTfLiteOk will be returned.
-// `tensor`'s internal data buffer will be assigned a pointer
-// which can safely be passed to free or realloc if `num_bytes` is zero.
-// Tensor data will be unchanged in the range from the start of the region up to
-// the minimum of the old and new sizes. In the case
-// of NULL tensor, or an error allocating new memory, returns `kTfLiteError`.
+/// Change the size of the memory block owned by `tensor` to `num_bytes`.
+/// Tensors with allocation types other than `kTfLiteDynamic` will be ignored
+/// and a `kTfLiteOk` will be returned. `tensor`'s internal data buffer will be
+/// assigned a pointer which can safely be passed to free or realloc if
+/// `num_bytes` is zero. Tensor data will be unchanged in the range from the
+/// start of the region up to the minimum of the old and new sizes. In the case
+/// of NULL tensor, or an error allocating new memory, returns `kTfLiteError`.
 TfLiteStatus TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor);
 #endif  // TF_LITE_STATIC_MEMORY
 
-// WARNING: This is an experimental interface that is subject to change.
-//
-// Currently, TfLiteDelegateParams has to be allocated in a way that it's
-// trivially destructable. It will be stored as `builtin_data` field in
-// `TfLiteNode` of the delegate node.
-//
-// See also the `CreateDelegateParams` function in `interpreter.cc` details.
+/// WARNING: This is an experimental interface that is subject to change.
+///
+/// Currently, TfLiteDelegateParams has to be allocated in a way that it's
+/// trivially destructable. It will be stored as `builtin_data` field in
+/// `TfLiteNode` of the delegate node.
+///
+/// See also the `CreateDelegateParams` function in `interpreter.cc` details.
 typedef struct TfLiteDelegateParams {
   struct TfLiteDelegate* delegate;
   TfLiteIntArray* nodes_to_replace;
@@ -693,14 +761,14 @@
   TfLiteIntArray* output_tensors;
 } TfLiteDelegateParams;
 
-// WARNING: This is an experimental interface that is subject to change.
-//
-// Currently, TfLiteOpaqueDelegateParams has to be allocated in a way that it's
-// trivially destructable. It will be stored as `builtin_data` field in
-// `TfLiteNode` of the delegate node.
-//
-// See also the `CreateOpaqueDelegateParams` function in `subgraph.cc`
-// details.
+/// WARNING: This is an experimental interface that is subject to change.
+///
+/// Currently, TfLiteOpaqueDelegateParams has to be allocated in a way that it's
+/// trivially destructable. It will be stored as `builtin_data` field in
+/// `TfLiteNode` of the delegate node.
+///
+/// See also the `CreateOpaqueDelegateParams` function in `subgraph.cc`
+/// details.
 typedef struct TfLiteOpaqueDelegateParams {
   TfLiteOpaqueDelegate* delegate;
   void* delegate_data;
@@ -709,371 +777,430 @@
   TfLiteIntArray* output_tensors;
 } TfLiteOpaqueDelegateParams;
 
+/// `TfLiteContext` allows an op to access the tensors.
+///
+/// `TfLiteContext` is a struct that is created by the TF Lite runtime
+/// and passed to the "methods" (C function pointers) in the
+/// `TfLiteRegistration` struct that are used to define custom ops and custom
+/// delegate kernels. It contains information and methods (C function pointers)
+/// that can be called by the code implementing a custom op or a custom delegate
+/// kernel. These methods provide access to the context in which that custom op
+/// or custom delegate kernel occurs, such as access to the input and output
+/// tensors for that op, as well as methods for allocating memory buffers
+/// and intermediate tensors, etc.
+///
+/// See also `TfLiteOpaqueContext`, which is an more ABI-stable equivalent.
 typedef struct TfLiteContext {
-  // Number of tensors in the context.
+  /// Number of tensors in the context.
   size_t tensors_size;
 
-  // The execution plan contains a list of the node indices in execution
-  // order. execution_plan->size is the current number of nodes. And,
-  // execution_plan->data[0] is the first node that needs to be run.
-  // TfLiteDelegates can traverse the current execution plan by iterating
-  // through each member of this array and using GetNodeAndRegistration() to
-  // access details about a node. i.e.
-  //
-  // TfLiteIntArray* execution_plan;
-  // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &execution_plan));
-  // for (int exec_index = 0; exec_index < execution_plan->size; exec_index++) {
-  //    int node_index = execution_plan->data[exec_index];
-  //    TfLiteNode* node;
-  //    TfLiteRegistration* reg;
-  //    context->GetNodeAndRegistration(context, node_index, &node, &reg);
-  // }
-  // Note: the memory pointed by '`*execution_plan` is OWNED by TfLite runtime.
-  // Future calls to GetExecutionPlan invalidates earlier outputs. The following
-  // code snippet shows the issue of such an invocation pattern. After calling
-  // CheckNode, subsequent access to `plan_1st` is undefined.
-  //
-  // void CheckNode(const TfLiteNode* node) {
-  //   ...
-  //   TfLiteIntArray* plan_2nd;
-  //   TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan_2nd));
-  //   ...
-  // }
-  //
-  // TfLiteIntArray* plan_1st;
-  // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan_1st));
-  // for (int exec_index = 0; exec_index < plan_1st->size; exec_index++) {
-  //    int node_index = plan_1st->data[exec_index];
-  //    TfLiteNode* node;
-  //    TfLiteRegistration* reg;
-  //    context->GetNodeAndRegistration(context, node_index, &node, &reg);
-  //    CheckNode(node);
-  // }
-  //
-  // WARNING: This is an experimental interface that is subject to change.
+  /// The execution plan contains a list of the node indices in execution
+  /// order. execution_plan->size is the current number of nodes. And,
+  /// execution_plan->data[0] is the first node that needs to be run.
+  /// TfLiteDelegates can traverse the current execution plan by iterating
+  /// through each member of this array and using GetNodeAndRegistration() to
+  /// access details about a node. i.e.
+  ///
+  ///
+  ///     TfLiteIntArray* execution_plan;
+  ///     TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context,
+  ///                                                     &execution_plan));
+  ///     for (int exec_index = 0; exec_index < execution_plan->size;
+  ///           exec_index++) {
+  ///        int node_index = execution_plan->data[exec_index];
+  ///        TfLiteNode* node;
+  ///        TfLiteRegistration* reg;
+  ///        context->GetNodeAndRegistration(context, node_index, &node, &reg);
+  ///     }
+  ///
+  /// Note: the memory pointed by '`*execution_plan` is OWNED by TfLite runtime.
+  /// Future calls to GetExecutionPlan invalidates earlier outputs. The
+  /// following code snippet shows the issue of such an invocation pattern.
+  /// After calling CheckNode, subsequent access to `plan_1st` is undefined.
+  ///
+  ///     void CheckNode(const TfLiteNode* node) {
+  ///       ...
+  ///       TfLiteIntArray* plan_2nd;
+  ///       TF_LITE_ENSURE_STATUS(
+  ///           context->GetExecutionPlan(context, &plan_2nd)
+  ///       );
+  ///       ...
+  ///     }
+  ///
+  ///     TfLiteIntArray* plan_1st;
+  ///     TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan_1st));
+  ///     for (int exec_index = 0; exec_index < plan_1st->size; exec_index++) {
+  ///        int node_index = plan_1st->data[exec_index];
+  ///        TfLiteNode* node;
+  ///        TfLiteRegistration* reg;
+  ///        context->GetNodeAndRegistration(context, node_index, &node, &reg);
+  ///        CheckNode(node);
+  ///     }
+  ///
+  /// WARNING: This is an experimental interface that is subject to change.
   TfLiteStatus (*GetExecutionPlan)(struct TfLiteContext* context,
                                    TfLiteIntArray** execution_plan);
 
-  // An array of tensors in the interpreter context (of length `tensors_size`)
+  /// An array of tensors in the interpreter context (of length `tensors_size`)
   TfLiteTensor* tensors;
 
-  // opaque full context ptr (an opaque c++ data structure)
+  /// opaque full context ptr (an opaque c++ data structure)
   void* impl_;
 
-  // Request memory pointer be resized. Updates dimensions on the tensor.
-  // NOTE: ResizeTensor takes ownership of newSize.
+  /// Request memory pointer be resized. Updates dimensions on the tensor.
+  /// NOTE: ResizeTensor takes ownership of newSize.
   TfLiteStatus (*ResizeTensor)(struct TfLiteContext*, TfLiteTensor* tensor,
                                TfLiteIntArray* new_size);
-  // Request that an error be reported with format string msg.
+  /// Request that an error be reported with format string msg.
   void (*ReportError)(struct TfLiteContext*, const char* msg, ...);
 
-  // Add `tensors_to_add` tensors, preserving pre-existing Tensor entries.  If
-  // non-null, the value pointed to by `first_new_tensor_index` will be set to
-  // the index of the first new tensor.
+  /// Add `tensors_to_add` tensors, preserving pre-existing Tensor entries.  If
+  /// non-null, the value pointed to by `first_new_tensor_index` will be set to
+  /// the index of the first new tensor.
   TfLiteStatus (*AddTensors)(struct TfLiteContext*, int tensors_to_add,
                              int* first_new_tensor_index);
 
-  // Get a Tensor node by node_index.
-  // WARNING: This is an experimental interface that is subject to change.
+  /// Get a Tensor node by node_index.
+  ///
+  /// WARNING: This is an experimental interface that is subject to change.
   TfLiteStatus (*GetNodeAndRegistration)(
       struct TfLiteContext*, int node_index, TfLiteNode** node,
       struct TfLiteRegistration** registration);
 
-  // Replace ops with one or more stub delegate operations. This function
-  // does not take ownership of `nodes_to_replace`.
+  /// Replace ops with one or more stub delegate operations. This function
+  /// does not take ownership of `nodes_to_replace`.
   TfLiteStatus (*ReplaceNodeSubsetsWithDelegateKernels)(
       struct TfLiteContext*, struct TfLiteRegistration registration,
       const TfLiteIntArray* nodes_to_replace, struct TfLiteDelegate* delegate);
 
-  // Number of threads that are recommended to subsystems like gemmlowp and
-  // eigen.
+  /// Number of threads that are recommended to subsystems like gemmlowp and
+  /// eigen.
   int recommended_num_threads;
 
-  // Access external contexts by type.
-  // WARNING: This is an experimental interface that is subject to change.
+  /// Access external contexts by type.
+  ///
+  /// WARNING: This is an experimental interface that is subject to change.
   TfLiteExternalContext* (*GetExternalContext)(struct TfLiteContext*,
                                                TfLiteExternalContextType);
-  // Set the value of a external context. Does not take ownership of the
-  // pointer.
-  // WARNING: This is an experimental interface that is subject to change.
+  /// Set the value of a external context. Does not take ownership of the
+  /// pointer.
+  ///
+  /// WARNING: This is an experimental interface that is subject to change.
   void (*SetExternalContext)(struct TfLiteContext*, TfLiteExternalContextType,
                              TfLiteExternalContext*);
 
-  // Flag for allowing float16 precision for FP32 calculation.
-  // default: false.
-  // WARNING: This is an experimental API and subject to change.
+  /// Flag for allowing float16 precision for FP32 calculation.
+  /// default: false.
+  ///
+  /// WARNING: This is an experimental API and subject to change.
   bool allow_fp32_relax_to_fp16;
 
-  // Pointer to the op-level profiler, if set; nullptr otherwise.
+  /// Pointer to the op-level profiler, if set; nullptr otherwise.
   void* profiler;
 
-  // Allocate persistent buffer which has the same life time as the interpreter.
-  // Returns nullptr on failure.
-  // The memory is allocated from heap for TFL, and from tail in TFLM.
-  // This method is only available in Init or Prepare stage.
-  // WARNING: This is an experimental interface that is subject to change.
+  /// Allocate persistent buffer which has the same life time as the
+  /// interpreter. Returns `nullptr` on failure. The memory is allocated from
+  /// heap for TFL, and from tail in TFLM. This method is only available in
+  /// `Init` or `Prepare` stage.
+  ///
+  /// WARNING: This is an experimental interface that is subject
+  /// to change.
   void* (*AllocatePersistentBuffer)(struct TfLiteContext* ctx, size_t bytes);
 
-  // Allocate a buffer which will be deallocated right after invoke phase.
-  // The memory is allocated from heap in TFL, and from volatile arena in TFLM.
-  // This method is only available in invoke stage.
-  // NOTE: If possible use RequestScratchBufferInArena method to avoid memory
-  // allocation during inference time.
-  // WARNING: This is an experimental interface that is subject to change.
+  /// Allocate a buffer which will be deallocated right after invoke phase.
+  /// The memory is allocated from heap in TFL, and from volatile arena in TFLM.
+  /// This method is only available in invoke stage.
+  ///
+  /// NOTE: If possible use `RequestScratchBufferInArena` method to avoid memory
+  /// allocation during inference time.
+  ///
+  /// WARNING: This is an experimental interface that is subject to change.
   TfLiteStatus (*AllocateBufferForEval)(struct TfLiteContext* ctx, size_t bytes,
                                         void** ptr);
 
-  // Request a scratch buffer in the arena through static memory planning.
-  // This method is only available in Prepare stage and the buffer is allocated
-  // by the interpreter between Prepare and Eval stage. In Eval stage,
-  // GetScratchBuffer API can be used to fetch the address.
-  // WARNING: This is an experimental interface that is subject to change.
+  /// Request a scratch buffer in the arena through static memory planning.
+  /// This method is only available in `Prepare` stage and the buffer is
+  /// allocated by the interpreter between Prepare and Eval stage. In `Eval`
+  /// stage, `GetScratchBuffer` API can be used to fetch the address.
+  ///
+  /// WARNING: This is an experimental interface that is subject to change.
   TfLiteStatus (*RequestScratchBufferInArena)(struct TfLiteContext* ctx,
                                               size_t bytes, int* buffer_idx);
 
-  // Get the scratch buffer pointer.
-  // This method is only available in Eval stage.
-  // WARNING: This is an experimental interface that is subject to change.
+  /// Get the scratch buffer pointer.
+  /// This method is only available in Eval stage.
+  ///
+  /// WARNING: This is an experimental interface that is subject to change.
   void* (*GetScratchBuffer)(struct TfLiteContext* ctx, int buffer_idx);
 
-  // Resize the memory pointer of the `tensor`. This method behaves the same as
-  // `ResizeTensor`, except that it makes a copy of the shape array internally
-  // so the shape array could be deallocated right afterwards.
-  // WARNING: This is an experimental interface that is subject to change.
+  /// Resize the memory pointer of the `tensor`. This method behaves the same as
+  /// `ResizeTensor`, except that it makes a copy of the shape array internally
+  /// so the shape array could be deallocated right afterwards.
+  ///
+  /// WARNING: This is an experimental interface that is subject to change.
   TfLiteStatus (*ResizeTensorExplicit)(struct TfLiteContext* ctx,
                                        TfLiteTensor* tensor, int dims,
                                        const int* shape);
 
-  // This method provides a preview of post-delegation partitioning. Each
-  // TfLiteDelegateParams in the referenced array corresponds to one instance of
-  // the delegate kernel.
-  // Example usage:
-  //
-  // TfLiteIntArray* nodes_to_replace = ...;
-  // TfLiteDelegateParams* params_array;
-  // int num_partitions = 0;
-  // TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning(
-  //    context, delegate, nodes_to_replace, &params_array, &num_partitions));
-  // for (int idx = 0; idx < num_partitions; idx++) {
-  //    const auto& partition_params = params_array[idx];
-  //    ...
-  // }
-  //
-  // NOTE: The context owns the memory referenced by partition_params_array. It
-  // will be cleared with another call to PreviewDelegatePartitioning, or after
-  // TfLiteDelegateParams::Prepare returns.
-  //
-  // WARNING: This is an experimental interface that is subject to change.
+  /// This method provides a preview of post-delegation partitioning. Each
+  /// TfLiteDelegateParams in the referenced array corresponds to one instance
+  /// of the delegate kernel. Example usage:
+  ///
+  ///     TfLiteIntArray* nodes_to_replace = ...;
+  ///     TfLiteDelegateParams* params_array;
+  ///     int num_partitions = 0;
+  ///     TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning(
+  ///        context, delegate, nodes_to_replace, &params_array,
+  ///        &num_partitions));
+  ///     for (int idx = 0; idx < num_partitions; idx++) {
+  ///        const auto& partition_params = params_array[idx];
+  ///        ...
+  ///     }
+  ///
+  /// NOTE: The context owns the memory referenced by partition_params_array. It
+  /// will be cleared with another call to PreviewDelegatePartitioning, or after
+  /// TfLiteDelegateParams::Prepare returns.
+  ///
+  /// WARNING: This is an experimental interface that is subject to change.
   TfLiteStatus (*PreviewDelegatePartitioning)(
       struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
       TfLiteDelegateParams** partition_params_array, int* num_partitions);
 
-  // Returns a TfLiteTensor struct for a given index.
-  // WARNING: This is an experimental interface that is subject to change.
-  // WARNING: This method may not be available on all platforms.
+  /// Returns a TfLiteTensor struct for a given index.
+  ///
+  /// WARNING: This is an experimental interface that is subject to change.
+  ///
+  /// WARNING: This method may not be available on all platforms.
   TfLiteTensor* (*GetTensor)(const struct TfLiteContext* context,
                              int tensor_idx);
 
-  // Returns a TfLiteEvalTensor struct for a given index.
-  // WARNING: This is an experimental interface that is subject to change.
-  // WARNING: This method may not be available on all platforms.
+  /// Returns a TfLiteEvalTensor struct for a given index.
+  ///
+  /// WARNING: This is an experimental interface that is subject to change.
+  ///
+  /// WARNING: This method may not be available on all platforms.
   TfLiteEvalTensor* (*GetEvalTensor)(const struct TfLiteContext* context,
                                      int tensor_idx);
 
-  // Retrieves named metadata buffer from the TFLite model.
-  // Returns kTfLiteOk if metadata is successfully obtained from the flatbuffer
-  // Model: that is, there exists a `metadata` entry with given `name` string.
-  // (see TFLite's schema.fbs).
-  // The corresponding `buffer` information is populated in `ptr` & `bytes`.
-  // The data from `ptr` is valid for the lifetime of the Interpreter.
-  //
-  // WARNING: This is an experimental interface that is subject to change.
+  /// Retrieves named metadata buffer from the TFLite model.
+  /// Returns kTfLiteOk if metadata is successfully obtained from the flatbuffer
+  /// Model: that is, there exists a `metadata` entry with given `name` string.
+  /// (see TFLite's schema.fbs).
+  /// The corresponding `buffer` information is populated in `ptr` & `bytes`.
+  /// The data from `ptr` is valid for the lifetime of the Interpreter.
+  ///
+  /// WARNING: This is an experimental interface that is subject to change.
   TfLiteStatus (*GetModelMetadata)(const struct TfLiteContext* context,
                                    const char* name, const char** ptr,
                                    size_t* bytes);
 
-  // Retrieves the corresponding TfLiteContext of a subgraph that the given
-  // subgraph_index points to and switches to the delegate context for that
-  // subgraph. If an invalid subgraph index is given, returns kTfLiteError.
-  // NOTE: This function is expected to be paired with ReleaseSubgraphContext()
-  // once the delegate preparation is done and/or the delegate context functions
-  // are no longer needed.
-  //
-  // WARNING: This is an experimental interface that is subject to change.
+  /// Retrieves the corresponding TfLiteContext of a subgraph that the given
+  /// subgraph_index points to and switches to the delegate context for that
+  /// subgraph. If an invalid subgraph index is given, returns kTfLiteError.
+  ///
+  /// NOTE: This function is expected to be paired with ReleaseSubgraphContext()
+  /// once the delegate preparation is done and/or the delegate context
+  /// functions are no longer needed.
+  ///
+  /// WARNING: This is an experimental interface that is subject to change.
   TfLiteStatus (*AcquireSubgraphContext)(
       struct TfLiteContext* context, int subgraph_index,
       struct TfLiteContext** acquired_context);
-  // Releases the subgraph context by switching back to the TFLite kernel
-  // context for the subgraph that the given subgraph_index points to.
-  // NOTE: This function is expected to be used after AcquireSubgraphContext()
-  // once the delegate preparation is done and/or the delegate context functions
-  // are no longer needed.
-  //
-  // WARNING: This is an experimental interface that is subject to change.
+  /// Releases the subgraph context by switching back to the TFLite kernel
+  /// context for the subgraph that the given subgraph_index points to.
+  ///
+  /// NOTE: This function is expected to be used after AcquireSubgraphContext()
+  /// once the delegate preparation is done and/or the delegate context
+  /// functions are no longer needed.
+  ///
+  /// WARNING: This is an experimental interface that is subject to change.
   TfLiteStatus (*ReleaseSubgraphContext)(struct TfLiteContext* context,
                                          int subgraph_index);
 } TfLiteContext;
 
-// `TfLiteRegistrationExternal` is an external version of `TfLiteRegistration`
-// for C API which doesn't use internal types (such as `TfLiteContext`) but only
-// uses stable API types (such as `TfLiteOpaqueContext`). The purpose of each
-// field is the exactly the same as with `TfLiteRegistration`.
-typedef struct TfLiteRegistrationExternal TfLiteRegistrationExternal;
+/// `TfLiteOperator` is an external version of `TfLiteRegistration`
+/// for C API which doesn't use internal types (such as `TfLiteContext`) but
+/// only uses stable API types (such as `TfLiteOpaqueContext`). The purpose of
+/// each field is the exactly the same as with `TfLiteRegistration`.
+typedef struct TfLiteOperator TfLiteOperator;
 
-// The valid values of the `inplace_operator` field in `TfLiteRegistration`.
-// This allow an op to signal to the runtime that the same data pointer
-// may be passed as an input and output without impacting the result.
-// This does not mean that the memory can safely be reused, it is up to the
-// runtime to determine this, e.g. if another op consumes the same input or not
-// or if an input tensor has sufficient memory allocated to store the output
-// data.
-//
-// Setting these flags authorizes the runtime to set the data pointers of an
-// input and output tensor to the same value. In such cases, the memory required
-// by the output must be less than or equal to that required by the shared
-// input, never greater. If kTfLiteInplaceOpDataUnmodified is set, then the
-// runtime can share the same input tensor with multiple operator's outputs,
-// provided that kTfLiteInplaceOpDataUnmodified is set for all of them.
-// Otherwise, if an input tensor is consumed by multiple operators, it may only
-// be shared with the operator which is the last to consume it.
-//
-// Note that this is a bitmask, so the values should be 1, 2, 4, 8, ...etc.
+#ifndef DOXYGEN_SKIP
+// For backwards compatibility.
+// Deprecated. Use TfLiteOperator instead.
+typedef TfLiteOperator TfLiteRegistrationExternal;
+#endif
+
+/// The valid values of the `inplace_operator` field in `TfLiteRegistration`.
+/// This allow an op to signal to the runtime that the same data pointer
+/// may be passed as an input and output without impacting the result.
+/// This does not mean that the memory can safely be reused, it is up to the
+/// runtime to determine this, e.g. if another op consumes the same input or not
+/// or if an input tensor has sufficient memory allocated to store the output
+/// data.
+///
+/// Setting these flags authorizes the runtime to set the data pointers of an
+/// input and output tensor to the same value. In such cases, the memory
+/// required by the output must be less than or equal to that required by the
+/// shared input, never greater. If kTfLiteInplaceOpDataUnmodified is set, then
+/// the runtime can share the same input tensor with multiple operator's
+/// outputs, provided that kTfLiteInplaceOpDataUnmodified is set for all of
+/// them. Otherwise, if an input tensor is consumed by multiple operators, it
+/// may only be shared with the operator which is the last to consume it.
+///
+/// Note that this is a bitmask, so the values should be 1, 2, 4, 8, ...etc.
 typedef enum {
-  // The default value. This indicates that the same data pointer cannot safely
-  // be passed as an op's input and output.
+  /// The default value. This indicates that the same data pointer cannot safely
+  /// be passed as an op's input and output.
   kTfLiteInplaceOpNone = 0,
-  // This indicates that an op's first output's data is identical to its first
-  // input's data, for example Reshape.
+  /// This indicates that an op's first output's data is identical to its first
+  /// input's data, for example Reshape.
   kTfLiteInplaceOpDataUnmodified = 1,
-  // Setting kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput means
-  // that InputN may be shared with OutputN instead of with the first output.
-  // This flag requires one or more of kTfLiteInplaceOpInputNShared to be set.
+  /// Setting kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput means
+  /// that InputN may be shared with OutputN instead of with the first output.
+  /// This flag requires one or more of kTfLiteInplaceOpInputNShared to be set.
   kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput = 2,
-  // kTfLiteInplaceOpInputNShared indicates that it is safe for an op to share
-  // InputN's data pointer with an output tensor. If
-  // kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput is set then
-  // kTfLiteInplaceOpInputNShared indicates that InputN may be shared
-  // with OutputN, otherwise kTfLiteInplaceOpInputNShared indicates that InputN
-  // may be shared with the first output.
-  //
-  // Indicates that an op's first input may be shared with the first output
-  // tensor. kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput has
-  // no impact on the behavior allowed by this flag.
+  /// kTfLiteInplaceOpInputNShared indicates that it is safe for an op to share
+  /// InputN's data pointer with an output tensor. If
+  /// kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput is set then
+  /// kTfLiteInplaceOpInputNShared indicates that InputN may be shared
+  /// with OutputN, otherwise kTfLiteInplaceOpInputNShared indicates that InputN
+  /// may be shared with the first output.
+  ///
+  /// Indicates that an op's first input may be shared with the first output
+  /// tensor. kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput has
+  /// no impact on the behavior allowed by this flag.
   kTfLiteInplaceOpInput0Shared = 4,
-  // Indicates that an op's second input may be shared with the first output
-  // if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput is not set
-  // or second output if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput
-  // is set.
+  /// Indicates that an op's second input may be shared with the first output
+  /// if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput is not set
+  /// or second output if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput
+  /// is set.
   kTfLiteInplaceOpInput1Shared = 8,
-  // Indicates that an op's third input may be shared with the first output
-  // if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput is not set
-  // or third output if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput is
-  // set.
+  /// Indicates that an op's third input may be shared with the first output
+  /// if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput is not set
+  /// or third output if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput
+  /// is
+  /// set.
   kTfLiteInplaceOpInput2Shared = 16,
-  // Placeholder to ensure that enum can hold 64 bit values to accommodate
-  // future fields.
+  /// Placeholder to ensure that enum can hold 64 bit values to accommodate
+  /// future fields.
   kTfLiteInplaceOpMaxValue = UINT64_MAX,
 } TfLiteInPlaceOp;
 
-// The number of shareable inputs supported.
+/// The number of shareable inputs supported.
 static const int kTfLiteMaxSharableOpInputs = 3;
 
+/// `TfLiteRegistration` defines the implementation of an operation
+/// (a built-in op, custom op, or custom delegate kernel).
+///
+/// It is a struct containing "methods" (C function pointers) that will be
+/// invoked by the TF Lite runtime to evaluate instances of the operation.
+///
+/// See also `TfLiteOperator` which is a more ABI-stable equivalent.
 typedef struct TfLiteRegistration {
-  // Initializes the op from serialized data.
-  // Called only *once* for the lifetime of the op, so any one-time allocations
-  // should be made here (unless they depend on tensor sizes).
-  //
-  // If a built-in op:
-  //   `buffer` is the op's params data (TfLiteLSTMParams*).
-  //   `length` is zero.
-  // If custom op:
-  //   `buffer` is the op's `custom_options`.
-  //   `length` is the size of the buffer.
-  //
-  // Returns a type-punned (i.e. void*) opaque data (e.g. a primitive pointer
-  // or an instance of a struct).
-  //
-  // The returned pointer will be stored with the node in the `user_data` field,
-  // accessible within prepare and invoke functions below.
-  // NOTE: if the data is already in the desired format, simply implement this
-  // function to return `nullptr` and implement the free function to be a no-op.
+  /// Initializes the op from serialized data.
+  /// Called only *once* for the lifetime of the op, so any one-time allocations
+  /// should be made here (unless they depend on tensor sizes).
+  ///
+  /// * If a built-in op:
+  ///       * `buffer` is the op's params data (TfLiteLSTMParams*).
+  ///       * `length` is zero.
+  /// * If custom op:
+  ///       * `buffer` is the op's `custom_options`.
+  ///       * `length` is the size of the buffer.
+  ///
+  /// Returns a type-punned (i.e. void*) opaque data (e.g. a primitive pointer
+  /// or an instance of a struct).
+  ///
+  /// The returned pointer will be stored with the node in the `user_data`
+  /// field, accessible within prepare and invoke functions below.
+  ///
+  /// NOTE: if the data is already in the desired format, simply implement this
+  /// function to return `nullptr` and implement the free function to be a
+  /// no-op.
   void* (*init)(TfLiteContext* context, const char* buffer, size_t length);
 
-  // The pointer `buffer` is the data previously returned by an init invocation.
+  /// The pointer `buffer` is the data previously returned by an init
+  /// invocation.
   void (*free)(TfLiteContext* context, void* buffer);
 
-  // prepare is called when the inputs this node depends on have been resized.
-  // context->ResizeTensor() can be called to request output tensors to be
-  // resized.
-  // Can be called multiple times for the lifetime of the op.
-  //
-  // Returns kTfLiteOk on success.
+  /// prepare is called when the inputs this node depends on have been resized.
+  /// `context->ResizeTensor()` can be called to request output tensors to be
+  /// resized.
+  /// Can be called multiple times for the lifetime of the op.
+  ///
+  /// Returns `kTfLiteOk` on success.
   TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node);
 
-  // Execute the node (should read node->inputs and output to node->outputs).
-  // Returns kTfLiteOk on success.
+  /// Execute the node (should read `node->inputs` and output to
+  /// `node->outputs`).
+  ///
+  /// Returns `kTfLiteOk` on success.
   TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node);
 
-  // profiling_string is called during summarization of profiling information
-  // in order to group executions together. Providing a value here will cause a
-  // given op to appear multiple times is the profiling report. This is
-  // particularly useful for custom ops that can perform significantly
-  // different calculations depending on their `user-data`.
+  /// `profiling_string` is called during summarization of profiling information
+  /// in order to group executions together. Providing a value here will cause a
+  /// given op to appear multiple times is the profiling report. This is
+  /// particularly useful for custom ops that can perform significantly
+  /// different calculations depending on their `user-data`.
   const char* (*profiling_string)(const TfLiteContext* context,
                                   const TfLiteNode* node);
 
-  // Builtin codes. If this kernel refers to a builtin this is the code
-  // of the builtin. This is so we can do marshaling to other frameworks like
-  // NN API.
-  // Note: It is the responsibility of the registration binder to set this
-  // properly.
+  /// Builtin codes. If this kernel refers to a builtin this is the code
+  /// of the builtin. This is so we can do marshaling to other frameworks like
+  /// NN API.
+  ///
+  /// Note: It is the responsibility of the registration binder to set this
+  /// properly.
   int32_t builtin_code;
 
-  // Custom op name. If the op is a builtin, this will be null.
-  // Note: It is the responsibility of the registration binder to set this
-  // properly.
-  // WARNING: This is an experimental interface that is subject to change.
+  /// Custom op name. If the op is a builtin, this will be `null`.
+  ///
+  /// Note: It is the responsibility of the registration binder to set this
+  /// properly.
+  ///
+  /// WARNING: This is an experimental interface that is subject to change.
   const char* custom_name;
 
-  // The version of the op.
-  // Note: It is the responsibility of the registration binder to set this
-  // properly.
+  /// The version of the op.
+  /// Note: It is the responsibility of the registration binder to set this
+  /// properly.
   int version;
 
-  // The external version of `TfLiteRegistration`. Since we can't use internal
-  // types (such as `TfLiteContext`) for C API to maintain ABI stability.
-  // C API user will provide `TfLiteRegistrationExternal` to implement custom
-  // ops. We keep it inside of `TfLiteRegistration` and use it to route
-  // callbacks properly.
-  TfLiteRegistrationExternal* registration_external;
+  /// The external (i.e. ABI-stable) version of `TfLiteRegistration`.
+  /// Since we can't use internal types (such as `TfLiteContext`) for C API to
+  /// maintain ABI stability.  C API user will provide `TfLiteOperator` to
+  /// implement custom ops.  We keep it inside of `TfLiteRegistration` and use
+  /// it to route callbacks properly.
+  TfLiteOperator* registration_external;
 
-  // Retrieves asynchronous kernel.
-  //
-  // If the `async_kernel` field is nullptr, it means the operation described by
-  // this TfLiteRegistration object does not support asynchronous execution.
-  // Otherwise, the function that the field points to should only be called for
-  // delegate kernel nodes, i.e. `node` should be a delegate kernel node created
-  // by applying a delegate.
-  // If the function returns nullptr, that means that the underlying delegate
-  // does not support asynchronous execution for this `node`.
+  /// Retrieves asynchronous kernel.
+  ///
+  /// If the `async_kernel` field is nullptr, it means the operation described
+  /// by this TfLiteRegistration object does not support asynchronous execution.
+  /// Otherwise, the function that the field points to should only be called for
+  /// delegate kernel nodes, i.e. `node` should be a delegate kernel node
+  /// created by applying a delegate. If the function returns nullptr, that
+  /// means that the underlying delegate does not support asynchronous execution
+  /// for this `node`.
   struct TfLiteAsyncKernel* (*async_kernel)(TfLiteContext* context,
                                             TfLiteNode* node);
 
-  // Indicates if an operator's output may safely overwrite its inputs.
-  // See the comments in `TfLiteInPlaceOp`.
+  /// Indicates if an operator's output may safely overwrite its inputs.
+  /// See the comments in `TfLiteInPlaceOp`.
   uint64_t inplace_operator;
 } TfLiteRegistration;
 
 /// \private
-// Old version of `TfLiteRegistration` to maintain binary backward
-// compatibility.
-// The legacy registration type must be a POD struct type whose field types must
-// be a prefix of the field types in TfLiteRegistration, and offset of the first
-// field in TfLiteRegistration that is not present in the legacy registration
-// type must be greater than or equal to the size of the legacy registration
-// type.
-// WARNING: This structure is deprecated / not an official part of the
-// API. It should be only used for binary backward compatibility.
+/// Old version of `TfLiteRegistration` to maintain binary backward
+/// compatibility.
+/// The legacy registration type must be a POD struct type whose field types
+/// must be a prefix of the field types in TfLiteRegistration, and offset of the
+/// first field in TfLiteRegistration that is not present in the legacy
+/// registration type must be greater than or equal to the size of the legacy
+/// registration type.
+///
+/// WARNING: This structure is deprecated / not an official part of the
+/// API. It should be only used for binary backward compatibility.
 typedef struct TfLiteRegistration_V3 {
   void* (*init)(TfLiteContext* context, const char* buffer, size_t length);
   void (*free)(TfLiteContext* context, void* buffer);
@@ -1084,21 +1211,22 @@
   int32_t builtin_code;
   const char* custom_name;
   int version;
-  TfLiteRegistrationExternal* registration_external;
+  TfLiteOperator* registration_external;
   struct TfLiteAsyncKernel* (*async_kernel)(TfLiteContext* context,
                                             TfLiteNode* node);
 } TfLiteRegistration_V3;
 
 /// \private
-// Old version of `TfLiteRegistration` to maintain binary backward
-// compatibility.
-// The legacy registration type must be a POD struct type whose field types must
-// be a prefix of the field types in TfLiteRegistration, and offset of the first
-// field in TfLiteRegistration that is not present in the legacy registration
-// type must be greater than or equal to the size of the legacy registration
-// type.
-// WARNING: This structure is deprecated / not an official part of the
-// API. It should be only used for binary backward compatibility.
+/// Old version of `TfLiteRegistration` to maintain binary backward
+/// compatibility.
+/// The legacy registration type must be a POD struct type whose field types
+/// must be a prefix of the field types in TfLiteRegistration, and offset of the
+/// first field in TfLiteRegistration that is not present in the legacy
+/// registration type must be greater than or equal to the size of the legacy
+/// registration type.
+///
+/// WARNING: This structure is deprecated / not an official part of the
+/// API. It should be only used for binary backward compatibility.
 typedef struct TfLiteRegistration_V2 {
   void* (*init)(TfLiteContext* context, const char* buffer, size_t length);
   void (*free)(TfLiteContext* context, void* buffer);
@@ -1109,19 +1237,20 @@
   int32_t builtin_code;
   const char* custom_name;
   int version;
-  TfLiteRegistrationExternal* registration_external;
+  TfLiteOperator* registration_external;
 } TfLiteRegistration_V2;
 
 /// \private
-// Old version of `TfLiteRegistration` to maintain binary backward
-// compatibility.
-// The legacy registration type must be a POD struct type whose field types must
-// be a prefix of the field types in TfLiteRegistration, and offset of the first
-// field in TfLiteRegistration that is not present in the legacy registration
-// type must be greater than or equal to the size of the legacy registration
-// type.
-// WARNING: This structure is deprecated / not an official part of the
-// API. It should be only used for binary backward compatibility.
+/// Old version of `TfLiteRegistration` to maintain binary backward
+/// compatibility.
+/// The legacy registration type must be a POD struct type whose field types
+/// must be a prefix of the field types in TfLiteRegistration, and offset of the
+/// first field in TfLiteRegistration that is not present in the legacy
+/// registration type must be greater than or equal to the size of the legacy
+/// registration type.
+///
+/// WARNING: This structure is deprecated / not an official part of the
+/// API. It should be only used for binary backward compatibility.
 typedef struct TfLiteRegistration_V1 {
   void* (*init)(TfLiteContext* context, const char* buffer, size_t length);
   void (*free)(TfLiteContext* context, void* buffer);
@@ -1134,184 +1263,202 @@
   int version;
 } TfLiteRegistration_V1;
 
-// The flags used in `TfLiteDelegate`. Note that this is a bitmask, so the
-// values should be 1, 2, 4, 8, ...etc.
+/// The flags used in `TfLiteDelegate`. Note that this is a bitmask, so the
+/// values should be 1, 2, 4, 8, ...etc.
 typedef enum TfLiteDelegateFlags {
   kTfLiteDelegateFlagsNone = 0,
-  // The flag is set if the delegate can handle dynamic sized tensors.
-  // For example, the output shape of a `Resize` op with non-constant shape
-  // can only be inferred when the op is invoked.
-  // In this case, the Delegate is responsible for calling
-  // `SetTensorToDynamic` to mark the tensor as a dynamic tensor, and calling
-  // `ResizeTensor` when invoking the op.
-  //
-  // If the delegate isn't capable to handle dynamic tensors, this flag need
-  // to be set to false.
+  /// The flag is set if the delegate can handle dynamic sized tensors.
+  /// For example, the output shape of a `Resize` op with non-constant shape
+  /// can only be inferred when the op is invoked.
+  /// In this case, the Delegate is responsible for calling
+  /// `SetTensorToDynamic` to mark the tensor as a dynamic tensor, and calling
+  /// `ResizeTensor` when invoking the op.
+  ///
+  /// If the delegate isn't capable to handle dynamic tensors, this flag need
+  /// to be set to false.
   kTfLiteDelegateFlagsAllowDynamicTensors = 1,
 
-  // This flag can be used by delegates (that allow dynamic tensors) to ensure
-  // applicable tensor shapes are automatically propagated in the case of tensor
-  // resizing.
-  // This means that non-dynamic (allocation_type != kTfLiteDynamic) I/O tensors
-  // of a delegate kernel will have correct shapes before its Prepare() method
-  // is called. The runtime leverages TFLite builtin ops in the original
-  // execution plan to propagate shapes.
-  //
-  // A few points to note:
-  // 1. This requires kTfLiteDelegateFlagsAllowDynamicTensors. If that flag is
-  // false, this one is redundant since the delegate kernels are re-initialized
-  // every time tensors are resized.
-  // 2. Enabling this flag adds some overhead to AllocateTensors(), since extra
-  // work is required to prepare the original execution plan.
-  // 3. This flag requires that the original execution plan only have ops with
-  // valid registrations (and not 'dummy' custom ops like with Flex).
-  // WARNING: This feature is experimental and subject to change.
+  /// This flag can be used by delegates (that allow dynamic tensors) to ensure
+  /// applicable tensor shapes are automatically propagated in the case of
+  /// tensor resizing. This means that non-dynamic (allocation_type !=
+  /// kTfLiteDynamic) I/O tensors of a delegate kernel will have correct shapes
+  /// before its Prepare() method is called. The runtime leverages TFLite
+  /// builtin ops in the original execution plan to propagate shapes.
+  ///
+  /// A few points to note:
+  /// 1. This requires kTfLiteDelegateFlagsAllowDynamicTensors. If that flag is
+  /// false, this one is redundant since the delegate kernels are re-initialized
+  /// every time tensors are resized.
+  /// 2. Enabling this flag adds some overhead to AllocateTensors(), since extra
+  /// work is required to prepare the original execution plan.
+  /// 3. This flag requires that the original execution plan only have ops with
+  /// valid registrations (and not 'dummy' custom ops like with Flex).
+  ///
+  /// WARNING: This feature is experimental and subject to change.
   kTfLiteDelegateFlagsRequirePropagatedShapes = 2,
 
-  // This flag can be used by delegates to request per-operator profiling. If a
-  // node is a delegate node, this flag will be checked before profiling. If
-  // set, then the node will not be profiled. The delegate will then add per
-  // operator information using Profiler::EventType::OPERATOR_INVOKE_EVENT and
-  // the results will appear in the operator-wise Profiling section and not in
-  // the Delegate internal section.
+  /// This flag can be used by delegates to request per-operator profiling. If a
+  /// node is a delegate node, this flag will be checked before profiling. If
+  /// set, then the node will not be profiled. The delegate will then add per
+  /// operator information using `Profiler::EventType::OPERATOR_INVOKE_EVENT`
+  /// and the results will appear in the operator-wise Profiling section and not
+  /// in the Delegate internal section.
   kTfLiteDelegateFlagsPerOperatorProfiling = 4
 } TfLiteDelegateFlags;
 
-// WARNING: This is an experimental interface that is subject to change.
+/// WARNING: This is an experimental interface that is subject to change.
 typedef struct TfLiteDelegate {
-  // Data that delegate needs to identify itself. This data is owned by the
-  // delegate. The delegate is owned in the user code, so the delegate is
-  // responsible for deallocating this when it is destroyed.
+  /// Data that delegate needs to identify itself. This data is owned by the
+  /// delegate. The delegate is owned in the user code, so the delegate is
+  /// responsible for deallocating this when it is destroyed.
   void* data_;
 
-  // Invoked by ModifyGraphWithDelegate. This prepare is called, giving the
-  // delegate a view of the current graph through TfLiteContext*. It typically
-  // will look at the nodes and call ReplaceNodeSubsetsWithDelegateKernels()
-  // to ask the TensorFlow lite runtime to create macro-nodes to represent
-  // delegated subgraphs of the original graph.
+  /// Invoked by `ModifyGraphWithDelegate`. This prepare is called, giving the
+  /// delegate a view of the current graph through `TfLiteContext*`. It
+  /// typically will look at the nodes and call
+  /// `ReplaceNodeSubsetsWithDelegateKernels()` to ask the TensorFlow lite
+  /// runtime to create macro-nodes to represent delegated subgraphs of the
+  /// original graph.
   TfLiteStatus (*Prepare)(TfLiteContext* context,
                           struct TfLiteDelegate* delegate);
 
-  // Copy the data from delegate buffer handle into raw memory of the given
-  // 'tensor'. Note that the delegate is allowed to allocate the raw bytes as
-  // long as it follows the rules for kTfLiteDynamic tensors, in which case this
-  // cannot be null.
+  /// Copy the data from delegate buffer handle into raw memory of the given
+  /// `tensor`. Note that the delegate is allowed to allocate the raw bytes as
+  /// long as it follows the rules for `kTfLiteDynamic` tensors, in which case
+  /// this cannot be null.
   TfLiteStatus (*CopyFromBufferHandle)(TfLiteContext* context,
                                        struct TfLiteDelegate* delegate,
                                        TfLiteBufferHandle buffer_handle,
                                        TfLiteTensor* tensor);
 
-  // Copy the data from raw memory of the given 'tensor' to delegate buffer
-  // handle. This can be null if the delegate doesn't use its own buffer.
+  /// Copy the data from raw memory of the given `tensor` to delegate buffer
+  /// handle. This can be null if the delegate doesn't use its own buffer.
   TfLiteStatus (*CopyToBufferHandle)(TfLiteContext* context,
                                      struct TfLiteDelegate* delegate,
                                      TfLiteBufferHandle buffer_handle,
                                      TfLiteTensor* tensor);
 
-  // Free the Delegate Buffer Handle. Note: This only frees the handle, but
-  // this doesn't release the underlying resource (e.g. textures). The
-  // resources are either owned by application layer or the delegate.
-  // This can be null if the delegate doesn't use its own buffer.
+  /// Free the Delegate Buffer Handle. Note: This only frees the handle, but
+  /// this doesn't release the underlying resource (e.g. textures). The
+  /// resources are either owned by application layer or the delegate.
+  /// This can be null if the delegate doesn't use its own buffer.
   void (*FreeBufferHandle)(TfLiteContext* context,
                            struct TfLiteDelegate* delegate,
                            TfLiteBufferHandle* handle);
 
-  // Bitmask flags. See the comments in `TfLiteDelegateFlags`.
+  /// Bitmask flags. See the comments in `TfLiteDelegateFlags`.
   int64_t flags;
 
-  // The opaque delegate builder associated with this object.  If set then the
-  // TF Lite runtime will give precedence to this field.  E.g. instead of
-  // invoking 'Prepare' via the function pointer inside the 'TfLiteDelegate'
-  // object, the runtime will first check if the corresponding function
-  // pointer inside 'opaque_delegate_builder' is set and if so invoke that.
-  //
-  // If this field is non-null, then the 'Prepare' field (of the
-  // 'TfLiteDelegate') should be null.
+  /// The opaque delegate builder associated with this object.  If set then the
+  /// TF Lite runtime will give precedence to this field.  E.g. instead of
+  /// invoking `Prepare` via the function pointer inside the `TfLiteDelegate`
+  /// object, the runtime will first check if the corresponding function
+  /// pointer inside `opaque_delegate_builder` is set and if so invoke that.
+  ///
+  /// If this field is non-null, then the `Prepare` field (of the
+  /// `TfLiteDelegate`) should be null.
   struct TfLiteOpaqueDelegateBuilder* opaque_delegate_builder;
 } TfLiteDelegate;
 
-// Build a 'null' delegate, with all the fields properly set to their default
-// values.
+/// Build a `null` delegate, with all the fields properly set to their default
+/// values.
 TfLiteDelegate TfLiteDelegateCreate(void);
 
-// `TfLiteOpaqueDelegateBuilder` is used for constructing
-// `TfLiteOpaqueDelegate`, see `TfLiteOpaqueDelegateCreate` below.  Note:
-// This struct is not ABI stable.
-//
-// For forward source compatibility `TfLiteOpaqueDelegateBuilder` objects should
-// be brace-initialized, so that all fields (including any that might be added
-// in the future) get zero-initialized.  The purpose of each field is exactly
-// the same as with `TfLiteDelegate`.
-//
-// WARNING: This is an experimental interface that is subject to change.
+/// `TfLiteOpaqueDelegateBuilder` is used for constructing
+/// `TfLiteOpaqueDelegate`, see `TfLiteOpaqueDelegateCreate` in c_api_opaque.h.
+/// NOTE: This struct is not ABI stable.
+///
+/// For forward source compatibility `TfLiteOpaqueDelegateBuilder` objects
+/// should be brace-initialized, so that all fields (including any that might be
+/// added in the future) get zero-initialized.  The purpose of each field is
+/// exactly the same as with `TfLiteDelegate`.
+///
+/// NOTE: This type is part of the TensorFlow Lite Extension APIs.
+/// We reserve the right to make changes to this API in future releases,
+/// potentially including non-backwards-compatible changes, on a different
+/// schedule than for the other TensorFlow Lite APIs. See
+/// https://www.tensorflow.org/guide/versions#separate_version_number_for_tensorflow_lite_extension_apis.
 typedef struct TfLiteOpaqueDelegateBuilder {
-  // Data that delegate needs to identify itself. This data is owned by the
-  // delegate. The delegate is owned in the user code, so the delegate is
-  // responsible for deallocating this when it is destroyed.
+  /// Data that delegate needs to identify itself. This data is owned by the
+  /// delegate. The delegate is owned in the user code, so the delegate is
+  /// responsible for deallocating this when it is destroyed.
   void* data;
-  // Invoked by ModifyGraphWithDelegate. This prepare is called, giving the
-  // delegate a view of the current graph through TfLiteContext*. It typically
-  // will look at the nodes and call ReplaceNodeSubsetsWithDelegateKernels()
-  // to ask the TensorFlow lite runtime to create macro-nodes to represent
-  // delegated subgraphs of the original graph.
+  /// Invoked by ModifyGraphWithDelegate. This prepare is called, giving the
+  /// delegate a view of the current graph through `TfLiteContext*`. It
+  /// typically will look at the nodes and call
+  /// `ReplaceNodeSubsetsWithDelegateKernels()` to ask the TensorFlow lite
+  /// runtime to create macro-nodes to represent delegated subgraphs of the
+  /// original graph.
   TfLiteStatus (*Prepare)(TfLiteOpaqueContext* context,  // NOLINT
                           TfLiteOpaqueDelegate* delegate, void* data);
-  // Copies the data from delegate buffer handle into raw memory of the given
-  // 'tensor'. Note that the delegate is allowed to allocate the raw bytes as
-  // long as it follows the rules for kTfLiteDynamic tensors, in which case this
-  // cannot be null.
+  /// Copies the data from delegate buffer handle into raw memory of the given
+  /// `tensor`. Note that the delegate is allowed to allocate the raw bytes as
+  /// long as it follows the rules for kTfLiteDynamic tensors, in which case
+  /// this cannot be null.
   TfLiteStatus (*CopyFromBufferHandle)(  // NOLINT
       TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate, void* data,
       TfLiteBufferHandle buffer_handle, TfLiteOpaqueTensor* tensor);
-  // Copies the data from raw memory of the given 'tensor' to delegate buffer
-  // handle. This can be null if the delegate doesn't use its own buffer.
+  /// Copies the data from raw memory of the given `tensor` to delegate buffer
+  /// handle. This can be null if the delegate doesn't use its own buffer.
   TfLiteStatus (*CopyToBufferHandle)(  // NOLINT
       TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate, void* data,
       TfLiteBufferHandle buffer_handle, TfLiteOpaqueTensor* tensor);
-  // Frees the Delegate Buffer Handle. Note: This only frees the handle, but
-  // this doesn't release the underlying resource (e.g. textures). The
-  // resources are either owned by application layer or the delegate.
-  // This can be null if the delegate doesn't use its own buffer.
+  /// Frees the Delegate Buffer Handle. Note: This only frees the handle, but
+  /// this doesn't release the underlying resource (e.g. textures). The
+  /// resources are either owned by application layer or the delegate.
+  /// This can be null if the delegate doesn't use its own buffer.
   void (*FreeBufferHandle)(TfLiteOpaqueContext* context,  // NOLINT
                            TfLiteOpaqueDelegate* delegate, void* data,
                            TfLiteBufferHandle* handle);
-  // Bitmask flags. See the comments in `TfLiteDelegateFlags`.
+  /// Bitmask flags. See the comments in `TfLiteDelegateFlags`.
   int64_t flags;
 } TfLiteOpaqueDelegateBuilder;
 
 #ifndef TF_LITE_STATIC_MEMORY
-// Creates an opaque delegate and returns its address.  The opaque delegate will
-// behave according to the provided 'opaque_delegate_builder'.  The lifetime of
-// the objects pointed to by any of the fields within the
-// 'opaque_delegate_builder' must outlive the returned
-// 'TfLiteOpaqueDelegate' and any 'TfLiteInterpreter',
-// 'TfLiteInterpreterOptions', 'tflite::Interpreter', or
-// 'tflite::InterpreterBuilder' that the delegate is added to.  The returned
-// address should be passed to 'TfLiteOpaqueDelegateDelete' for deletion.  If
-// 'opaque_delegate_builder' is a null pointer, then a null pointer will be
-// returned.
+// See c_api_opaque.h.
+// This declaration in common.h is only for backwards compatibility.
+// NOTE: This function is part of the TensorFlow Lite Extension APIs, see above.
 TfLiteOpaqueDelegate* TfLiteOpaqueDelegateCreate(
     const TfLiteOpaqueDelegateBuilder* opaque_delegate_builder);
 
-// Deletes the provided opaque 'delegate'.  This function has no effect if the
-// 'delegate' is a null pointer.
+// See c_api_opaque.h.
+// This declaration in common.h is only for backwards compatibility.
+// NOTE: This function is part of the TensorFlow Lite Extension APIs, see above.
 void TfLiteOpaqueDelegateDelete(TfLiteOpaqueDelegate* delegate);
 #endif  // TF_LITE_STATIC_MEMORY
 
-// Returns a pointer to the data associated with the provided opaque 'delegate'.
-//
-// A null pointer will be returned when:
-// - The 'delegate' is null.
-// - The 'data' field of the 'TfLiteOpaqueDelegateBuilder' used to construct the
-//   'delegate' was null.
-// - Or in case of any other error.
-// - The 'delegate' has been constructed via a 'TfLiteOpaqueDelegateBuilder',
-//   but the 'data' field of the 'TfLiteOpaqueDelegateBuilder' is null.
-//
-//  The data_ field of 'delegate' will be returned if the
-//  'opaque_delegate_builder' field is null.
+// See c_api_opaque.h.
+// This declaration in common.h is only for backwards compatibility.
+// NOTE: This function is part of the TensorFlow Lite Extension APIs, see above.
 void* TfLiteOpaqueDelegateGetData(const TfLiteOpaqueDelegate* delegate);
 
+/// Returns a tensor data allocation strategy.
+TfLiteAllocationStrategy TfLiteTensorGetAllocationStrategy(
+    const TfLiteTensor* t);
+
+/// Returns how stable a tensor data buffer address is across runs.
+TfLiteRunStability TfLiteTensorGetBufferAddressStability(const TfLiteTensor* t);
+
+/// Returns how stable a tensor data values are across runs.
+TfLiteRunStability TfLiteTensorGetDataStability(const TfLiteTensor* t);
+
+/// Returns the operation step when the data of a tensor is populated.
+///
+/// Some operations can precompute their results before the evaluation step.
+/// This makes the data available earlier for subsequent operations.
+TfLiteRunStep TfLiteTensorGetDataKnownStep(const TfLiteTensor* t);
+
+/// Returns the operation steop when the shape of a tensor is computed.
+///
+/// Some operations can precompute the shape of their results before the
+/// evaluation step. This makes the shape available earlier for subsequent
+/// operations.
+TfLiteRunStep TfLiteTensorGetShapeKnownStep(const TfLiteTensor* t);
+
+/** @} */
+// Ends `\addtogroup`, it's important for the doc generator that this doesn't
+// include the CC code below.
+
 #ifdef __cplusplus
 }  // extern "C"
 
diff --git a/tensorflow/lite/core/macros.h b/tensorflow/lite/core/macros.h
index d329ded..86de4da 100644
--- a/tensorflow/lite/core/macros.h
+++ b/tensorflow/lite/core/macros.h
@@ -65,14 +65,4 @@
 #define TFLITE_HAS_ATTRIBUTE_WEAK 0
 #endif
 
-#ifndef TF_LITE_STATIC_MEMORY
-// maximum size of a valid flatbuffer
-inline constexpr unsigned int flatbuffer_size_max = 2147483648;
-// If none zero then the buffer is stored outside of the flatbuffers, string
-inline constexpr char tflite_metadata_buffer_location[] = "buffer_location";
-// field for minimum runtime version, string
-inline constexpr char tflite_metadata_min_runtime_version[] =
-    "min_runtime_version";
-#endif
-
 #endif  // TENSORFLOW_LITE_CORE_MACROS_H_
diff --git a/tensorflow/lite/experimental/microfrontend/lib/filterbank_io.c b/tensorflow/lite/experimental/microfrontend/lib/filterbank_io.c
index 6ce4c7c..ea45d1b 100644
--- a/tensorflow/lite/experimental/microfrontend/lib/filterbank_io.c
+++ b/tensorflow/lite/experimental/microfrontend/lib/filterbank_io.c
@@ -17,7 +17,7 @@
 static void PrintArray(FILE* fp, const char* name, const int16_t* values,
                        size_t size) {
   fprintf(fp, "static int16_t filterbank_%s[] = {", name);
-  int i;
+  size_t i;
   for (i = 0; i < size; ++i) {
     fprintf(fp, "%d", values[i]);
     if (i < size - 1) {
diff --git a/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.c b/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.c
index f18ebf5..430fc6e 100644
--- a/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.c
+++ b/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.c
@@ -28,7 +28,7 @@
   config->output_scale_shift = 7;
 }
 
-static float FreqToMel(float freq) { return 1127.0 * log1p(freq / 700.0); }
+static float FreqToMel(float freq) { return 1127.0f * log1pf(freq / 700.0f); }
 
 static void CalculateCenterFrequencies(const int num_channels,
                                        const float lower_frequency_limit,
@@ -49,8 +49,8 @@
 
 static void QuantizeFilterbankWeights(const float float_weight, int16_t* weight,
                                       int16_t* unweight) {
-  *weight = floor(float_weight * (1 << kFilterbankBits) + 0.5);
-  *unweight = floor((1.0 - float_weight) * (1 << kFilterbankBits) + 0.5);
+  *weight = floorf(float_weight * (1 << kFilterbankBits) + 0.5f);
+  *unweight = floorf((1.0f - float_weight) * (1 << kFilterbankBits) + 0.5f);
 }
 
 int FilterbankPopulateState(const struct FilterbankConfig* config,
@@ -95,8 +95,8 @@
                              config->upper_band_limit, center_mel_freqs);
 
   // Always exclude DC.
-  const float hz_per_sbin = 0.5 * sample_rate / ((float)spectrum_size - 1);
-  state->start_index = 1.5 + config->lower_band_limit / hz_per_sbin;
+  const float hz_per_sbin = 0.5f * sample_rate / ((float)spectrum_size - 1);
+  state->start_index = 1.5f + config->lower_band_limit / hz_per_sbin;
   state->end_index = 0;  // Initialized to zero here, but actually set below.
 
   // For each channel, we need to figure out what frequencies belong to it, and
diff --git a/tensorflow/lite/experimental/microfrontend/lib/frontend_main.c b/tensorflow/lite/experimental/microfrontend/lib/frontend_main.c
index 861778c..60868f8 100644
--- a/tensorflow/lite/experimental/microfrontend/lib/frontend_main.c
+++ b/tensorflow/lite/experimental/microfrontend/lib/frontend_main.c
@@ -56,7 +56,7 @@
     audio_file_size -= num_samples_read;
 
     if (output.values != NULL) {
-      int i;
+      size_t i;
       for (i = 0; i < output.size; ++i) {
         printf("%d ", output.values[i]);
       }
diff --git a/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.c b/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.c
index 19c32b3..8a5d7a4 100644
--- a/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.c
+++ b/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.c
@@ -16,7 +16,7 @@
 
 void NoiseReductionWriteMemmapPreamble(
     FILE* fp, const struct NoiseReductionState* state) {
-  fprintf(fp, "static uint32_t noise_reduction_estimate[%zu];\n",
+  fprintf(fp, "static uint32_t noise_reduction_estimate[%d];\n",
           state->num_channels);
   fprintf(fp, "\n");
 }
diff --git a/tensorflow/lite/experimental/microfrontend/lib/window_io.c b/tensorflow/lite/experimental/microfrontend/lib/window_io.c
index d12cac2..5a7b047 100644
--- a/tensorflow/lite/experimental/microfrontend/lib/window_io.c
+++ b/tensorflow/lite/experimental/microfrontend/lib/window_io.c
@@ -16,7 +16,7 @@
 
 void WindowWriteMemmapPreamble(FILE* fp, const struct WindowState* state) {
   fprintf(fp, "static int16_t window_coefficients[] = {\n");
-  int i;
+  size_t i;
   for (i = 0; i < state->size; ++i) {
     fprintf(fp, "%d", state->coefficients[i]);
     if (i < state->size - 1) {
diff --git a/tensorflow/lite/experimental/microfrontend/lib/window_util.c b/tensorflow/lite/experimental/microfrontend/lib/window_util.c
index eee6e7b..6fdffdc 100644
--- a/tensorflow/lite/experimental/microfrontend/lib/window_util.c
+++ b/tensorflow/lite/experimental/microfrontend/lib/window_util.c
@@ -41,13 +41,13 @@
   }
 
   // Populate the window values.
-  const float arg = M_PI * 2.0 / ((float)state->size);
-  int i;
+  const float arg = (float)M_PI * 2.0f / ((float)state->size);
+  size_t i;
   for (i = 0; i < state->size; ++i) {
-    float float_value = 0.5 - (0.5 * cos(arg * (i + 0.5)));
+    float float_value = 0.5f - (0.5f * cosf(arg * (i + 0.5f)));
     // Scale it to fixed point and round it.
     state->coefficients[i] =
-        floor(float_value * (1 << kFrontendWindowBits) + 0.5);
+        floorf(float_value * (1 << kFrontendWindowBits) + 0.5f);
   }
 
   state->input_used = 0;
diff --git a/tensorflow/lite/kernels/internal/common.cc b/tensorflow/lite/kernels/internal/common.cc
index 1654ab8..fabb020 100644
--- a/tensorflow/lite/kernels/internal/common.cc
+++ b/tensorflow/lite/kernels/internal/common.cc
@@ -17,6 +17,53 @@
 
 namespace tflite {
 
+// Single-rounding MultiplyByQuantizedMultiplier
+#if TFLITE_SINGLE_ROUNDING
+int32_t MultiplyByQuantizedMultiplier(int32_t x, int32_t quantized_multiplier,
+                                      int shift) {
+  TFLITE_DCHECK(quantized_multiplier >= 0);
+  TFLITE_DCHECK(shift >= -31 && shift <= 30);
+
+  const int64_t total_shift = 31 - shift;
+  const int64_t round = static_cast<int64_t>(1) << (total_shift - 1);
+  int64_t result = x * static_cast<int64_t>(quantized_multiplier) + round;
+  result = result >> total_shift;
+
+  TFLITE_DCHECK(result >= std::numeric_limits<int32_t>::min() &&
+                result <= std::numeric_limits<int32_t>::max());
+  return static_cast<int32_t>(result);
+}
+
+int32_t MultiplyByQuantizedMultiplier(int64_t x, int32_t quantized_multiplier,
+                                      int shift) {
+  // Inputs:
+  // - quantized_multiplier has fixed point at bit 31
+  // - shift is -31 to +7 (negative for right shift)
+  //
+  // Assumptions: The following input ranges are assumed
+  // - quantize_scale>=0  (the usual range is (1<<30) to (1>>31)-1)
+  // - scaling is chosen so final scaled result fits in int32_t
+  // - input x is in the range -(1<<47) <= x < (1<<47)
+  TFLITE_DCHECK(quantized_multiplier >= 0);
+  TFLITE_DCHECK(shift >= -31 && shift < 8);
+  TFLITE_DCHECK(x >= -(static_cast<int64_t>(1) << 47) &&
+                x < (static_cast<int64_t>(1) << 47));
+
+  const int32_t reduced_multiplier =
+      (quantized_multiplier < 0x7FFF0000)
+          ? ((quantized_multiplier + (1 << 15)) >> 16)
+          : 0x7FFF;
+  const int64_t total_shift = 15 - shift;
+  const int64_t round = static_cast<int64_t>(1) << (total_shift - 1);
+  int64_t result = x * static_cast<int64_t>(reduced_multiplier) + round;
+  result = result >> total_shift;
+
+  TFLITE_DCHECK(result >= std::numeric_limits<int32_t>::min() &&
+                result <= std::numeric_limits<int32_t>::max());
+  return static_cast<int32_t>(result);
+}
+// Double-rounding MultiplyByQuantizedMultiplier
+#else
 int32_t MultiplyByQuantizedMultiplier(int32_t x, int32_t quantized_multiplier,
                                       int shift) {
   using gemmlowp::RoundingDivideByPOT;
@@ -51,5 +98,6 @@
   int32_t result = x >> total_shift;
   return result;
 }
+#endif  // TFLITE_SINGLE_ROUNDING
 
 }  // namespace tflite
diff --git a/tensorflow/lite/kernels/internal/common.h b/tensorflow/lite/kernels/internal/common.h
index 05184df..9761a8c 100644
--- a/tensorflow/lite/kernels/internal/common.h
+++ b/tensorflow/lite/kernels/internal/common.h
@@ -16,6 +16,10 @@
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_COMMON_H_
 
 #include <algorithm>
+#include <cstddef>
+#include <cstdint>
+
+#include "tensorflow/lite/kernels/internal/runtime_shape.h"
 #ifndef ALLOW_SLOW_GENERIC_DEPTHWISECONV_FALLBACK
 #ifdef GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK
 #define ALLOW_SLOW_GENERIC_DEPTHWISECONV_FALLBACK
@@ -35,6 +39,117 @@
 
 constexpr int kReverseShift = -1;
 
+// Reduces and compresses dimensions so that broadcast handling becomes more
+// efficient. Returns true if the output shape is broadcastable; it doesn't
+// contain any degenerate dimension, i.e. shape dimension = 0. False otherwise.
+template <int MAX_DIM = 6>
+bool ReduceDimensionsForBroadcast(const RuntimeShape& input1_shape,
+                                  const RuntimeShape& input2_shape,
+                                  size_t* compressed_input1_stride,
+                                  size_t* compressed_input2_stride,
+                                  size_t* compressed_output_shape) {
+  size_t num_compressed_dims = 0;
+  size_t compressed_input1_shape[MAX_DIM];
+  size_t compressed_input2_shape[MAX_DIM];
+  std::fill(compressed_input1_shape, compressed_input1_shape + MAX_DIM, 1);
+  std::fill(compressed_input2_shape, compressed_input2_shape + MAX_DIM, 1);
+  std::fill(compressed_output_shape, compressed_output_shape + MAX_DIM, 1);
+  bool broadcast_input1 = false;
+  bool broadcast_input2 = false;
+  bool first_nonunit = true;
+  const size_t num_input1_dims = input1_shape.DimensionsCount();
+  const size_t num_input2_dims = input2_shape.DimensionsCount();
+  const int32_t* input1_dims = input1_shape.DimsData();
+  const int32_t* input2_dims = input2_shape.DimsData();
+  const size_t num_common_dims = std::min(num_input1_dims, num_input2_dims);
+  for (size_t i = 1; i <= num_common_dims; i++) {
+    const size_t input1_dim = input1_dims[num_input1_dims - i];
+    const size_t input2_dim = input2_dims[num_input2_dims - i];
+    if (input1_dim == 0 || input2_dim == 0) {
+      return false;
+    }
+    if (input1_dim == 1 && input2_dim == 1) {
+      continue;
+    }
+    assert(!broadcast_input1 || !broadcast_input2);
+
+    if (input1_dim == 1) {
+      if (!broadcast_input1) {
+        broadcast_input1 = true;
+        broadcast_input2 = false;
+        num_compressed_dims++;
+      }
+      compressed_input2_shape[num_compressed_dims - 1] *= input2_dim;
+      compressed_output_shape[num_compressed_dims - 1] *= input2_dim;
+    } else if (input2_dim == 1) {
+      if (!broadcast_input2) {
+        broadcast_input1 = false;
+        broadcast_input2 = true;
+        num_compressed_dims++;
+      }
+      compressed_input1_shape[num_compressed_dims - 1] *= input1_dim;
+      compressed_output_shape[num_compressed_dims - 1] *= input1_dim;
+    } else {
+      TFLITE_DCHECK(input1_dim == input2_dim);
+      if (broadcast_input1 || broadcast_input2 || first_nonunit) {
+        broadcast_input1 = false;
+        broadcast_input2 = false;
+        num_compressed_dims++;
+      }
+      compressed_input1_shape[num_compressed_dims - 1] *= input1_dim;
+      compressed_input2_shape[num_compressed_dims - 1] *= input1_dim;
+      compressed_output_shape[num_compressed_dims - 1] *= input1_dim;
+    }
+    first_nonunit = false;
+  }
+  if (num_input1_dims > num_input2_dims) {
+    if (!broadcast_input2) {
+      num_compressed_dims++;
+    }
+    for (size_t i = 0; i < num_input1_dims - num_input2_dims; i++) {
+      const size_t input1_dim = input1_dims[i];
+      if (input1_dim == 0) {
+        return false;
+      }
+      compressed_input1_shape[num_compressed_dims - 1] *= input1_dim;
+      compressed_output_shape[num_compressed_dims - 1] *= input1_dim;
+    }
+  } else if (num_input2_dims > num_input1_dims) {
+    if (!broadcast_input1) {
+      num_compressed_dims++;
+    }
+    for (size_t i = 0; i < num_input2_dims - num_input1_dims; i++) {
+      const size_t input2_dim = input2_dims[i];
+      if (input2_dim == 0) {
+        return false;
+      }
+      compressed_input2_shape[num_compressed_dims - 1] *= input2_dim;
+      compressed_output_shape[num_compressed_dims - 1] *= input2_dim;
+    }
+  }
+  num_compressed_dims = (num_compressed_dims > 1) ? num_compressed_dims : 1;
+
+  int input1_stride = 1;
+  int input2_stride = 1;
+  for (int i = 0; i < MAX_DIM; ++i) {
+    compressed_input1_stride[i] = input1_stride;
+    input1_stride *= compressed_input1_shape[i];
+    compressed_input2_stride[i] = input2_stride;
+    input2_stride *= compressed_input2_shape[i];
+  }
+  for (int i = 0; i < MAX_DIM; ++i) {
+    if (compressed_input1_shape[i] != compressed_input2_shape[i]) {
+      if (compressed_input1_shape[i] == 1) {
+        compressed_input1_stride[i] = 0;
+      } else {
+        TFLITE_DCHECK_EQ(compressed_input2_shape[i], 1);
+        compressed_input2_stride[i] = 0;
+      }
+    }
+  }
+  return true;
+}
+
 inline void GetActivationMinMax(FusedActivationFunctionType ac,
                                 float* output_activation_min,
                                 float* output_activation_max) {
@@ -142,24 +257,14 @@
 #endif
 }
 
+TFLITE_NOINLINE int32_t MultiplyByQuantizedMultiplier(
+    int32_t x, int32_t quantized_multiplier, int shift);
+
+TFLITE_NOINLINE int32_t MultiplyByQuantizedMultiplier(
+    int64_t x, int32_t quantized_multiplier, int shift);
+
 // Single-rounding MultiplyByQuantizedMultiplier
 #if TFLITE_SINGLE_ROUNDING
-inline int32_t MultiplyByQuantizedMultiplier(int32_t x,
-                                             int32_t quantized_multiplier,
-                                             int shift) {
-  TFLITE_DCHECK(quantized_multiplier >= 0);
-  TFLITE_DCHECK(shift >= -31 && shift <= 30);
-
-  const int64_t total_shift = 31 - shift;
-  const int64_t round = static_cast<int64_t>(1) << (total_shift - 1);
-  int64_t result = x * static_cast<int64_t>(quantized_multiplier) + round;
-  result = result >> total_shift;
-
-  TFLITE_DCHECK(result >= std::numeric_limits<int32_t>::min() &&
-                result <= std::numeric_limits<int32_t>::max());
-  return static_cast<int32_t>(result);
-}
-
 inline int32_t MultiplyByQuantizedMultiplierSmallerThanOneExp(
     int32_t x, int32_t quantized_multiplier, int shift) {
   TFLITE_DCHECK_LE(shift, 0);
@@ -172,36 +277,6 @@
   return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift);
 }
 
-inline int32_t MultiplyByQuantizedMultiplier(int64_t x,
-                                             int32_t quantized_multiplier,
-                                             int shift) {
-  // Inputs:
-  // - quantized_multiplier has fixed point at bit 31
-  // - shift is -31 to +7 (negative for right shift)
-  //
-  // Assumptions: The following input ranges are assumed
-  // - quantize_scale>=0  (the usual range is (1<<30) to (1>>31)-1)
-  // - scaling is chosen so final scaled result fits in int32_t
-  // - input x is in the range -(1<<47) <= x < (1<<47)
-  TFLITE_DCHECK(quantized_multiplier >= 0);
-  TFLITE_DCHECK(shift >= -31 && shift < 8);
-  TFLITE_DCHECK(x >= -(static_cast<int64_t>(1) << 47) &&
-                x < (static_cast<int64_t>(1) << 47));
-
-  const int32_t reduced_multiplier =
-      (quantized_multiplier < 0x7FFF0000)
-          ? ((quantized_multiplier + (1 << 15)) >> 16)
-          : 0x7FFF;
-  const int64_t total_shift = 15 - shift;
-  const int64_t round = static_cast<int64_t>(1) << (total_shift - 1);
-  int64_t result = x * static_cast<int64_t>(reduced_multiplier) + round;
-  result = result >> total_shift;
-
-  TFLITE_DCHECK(result >= std::numeric_limits<int32_t>::min() &&
-                result <= std::numeric_limits<int32_t>::max());
-  return static_cast<int32_t>(result);
-}
-
 #ifdef USE_NEON
 inline int32x4x4_t MultiplyByQuantizedMultiplier4Rows(
     int32x4x4_t input_val, int32_t quantized_multiplier, int shift) {
@@ -251,12 +326,6 @@
                                            quantized_multiplier);
 }
 
-TFLITE_NOINLINE int32_t MultiplyByQuantizedMultiplier(
-    int32_t x, int32_t quantized_multiplier, int shift);
-
-TFLITE_NOINLINE int32_t MultiplyByQuantizedMultiplier(
-    int64_t x, int32_t quantized_multiplier, int shift);
-
 #ifdef USE_NEON
 // Round uses ARM's rounding shift right.
 inline int32x4x4_t MultiplyByQuantizedMultiplier4Rows(
diff --git a/tensorflow/lite/kernels/internal/cppmath.h b/tensorflow/lite/kernels/internal/cppmath.h
index c97cc31..67ab461 100644
--- a/tensorflow/lite/kernels/internal/cppmath.h
+++ b/tensorflow/lite/kernels/internal/cppmath.h
@@ -32,8 +32,8 @@
     return TF_LITE_GLOBAL_STD_PREFIX::std_name(x);    \
   }
 
-DECLARE_STD_GLOBAL_SWITCH1(TfLiteRound, round);
-DECLARE_STD_GLOBAL_SWITCH1(TfLiteExpm1, expm1);
+DECLARE_STD_GLOBAL_SWITCH1(TfLiteRound, round)
+DECLARE_STD_GLOBAL_SWITCH1(TfLiteExpm1, expm1)
 
 }  // namespace tflite
 
diff --git a/tensorflow/lite/kernels/internal/portable_tensor.h b/tensorflow/lite/kernels/internal/portable_tensor.h
index 1eee621..a9f9551 100644
--- a/tensorflow/lite/kernels/internal/portable_tensor.h
+++ b/tensorflow/lite/kernels/internal/portable_tensor.h
@@ -15,6 +15,7 @@
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_H_
 
+#include <cstddef>
 #include <vector>
 
 #include "tensorflow/lite/core/c/common.h"
@@ -50,6 +51,26 @@
       all_shape_ptr_.push_back(&all_shape_[i]);
     }
   }
+
+  explicit VectorOfTensors(const std::vector<TfLiteTensor*>& tensors) {
+    int num_tensors = tensors.size();
+
+    all_data_.reserve(num_tensors);
+    all_shape_.reserve(num_tensors);
+    all_shape_ptr_.reserve(num_tensors);
+
+    for (auto* t : tensors) {
+      all_data_.push_back(GetTensorData<T>(t));
+      all_shape_.push_back(GetTensorShape(t));
+    }
+
+    // Taking the pointer from inside a std::vector is only OK if the vector is
+    // never modified, so we populate all_shape in the previous loop and then we
+    // are free to grab iterators here.
+    for (int i = 0; i < num_tensors; ++i) {
+      all_shape_ptr_.push_back(&all_shape_[i]);
+    }
+  }
   // Return a pointer to the data pointers of all tensors in the list. For
   // example:
   //   float* const* f = v.data();
@@ -62,6 +83,8 @@
   //   dims[1] are the dimensions of the second tensor in the list.
   const RuntimeShape* const* shapes() const { return all_shape_ptr_.data(); }
 
+  size_t size() const { return all_data_.size(); }
+
  private:
   std::vector<T*> all_data_;
   std::vector<RuntimeShape> all_shape_;
diff --git a/tensorflow/lite/kernels/internal/portable_tensor_utils.cc b/tensorflow/lite/kernels/internal/portable_tensor_utils.cc
index 024043d..577fc6b 100644
--- a/tensorflow/lite/kernels/internal/portable_tensor_utils.cc
+++ b/tensorflow/lite/kernels/internal/portable_tensor_utils.cc
@@ -70,6 +70,12 @@
 
 void UnpackDenseInt4IntoInt8(const int8_t* src_buffer, int num_elements,
                              int8_t* dst_buffer) {
+  // num_elements means the number of elements regardless of packed or unpacked.
+  // For example, 3 elements means both
+  //   1) Packed: 3 int4's = 12 bit -> 16 bits (padded) = 2 bytes.
+  //      stored in src_buffer[0] and src_buffer[1] (i = 0..1)
+  //   2) Unpacked: 3 int8's = 3 bytes.
+  //.     stored in dst_buffer[0], dst_buffer[1] and dst_buffer[2] (j = 0..2)
   for (int i = 0; i < num_elements / 2; i++) {
     int8_t byte = src_buffer[i];
     // Shift left first so that sign is properly extended when shifted right
diff --git a/tensorflow/lite/kernels/internal/portable_tensor_utils.h b/tensorflow/lite/kernels/internal/portable_tensor_utils.h
index c28892c..ed59fd0 100644
--- a/tensorflow/lite/kernels/internal/portable_tensor_utils.h
+++ b/tensorflow/lite/kernels/internal/portable_tensor_utils.h
@@ -241,7 +241,8 @@
     const int32_t* __restrict__ indices, int m_rows, int m_cols,
     const int8_t* __restrict__ vector, const int32_t* __restrict__ bias_vector,
     int n_batch, const int32_t input_offset, const int32_t output_multiplier,
-    const int32_t output_shift, const int32_t output_offset,
+    int32_t output_shift, const int32_t* per_channel_scale,
+    const int32_t* per_channel_shift, int32_t output_offset,
     const int32_t output_activation_min, const int32_t output_activation_max,
     int8_t* __restrict__ result);
 
@@ -259,7 +260,7 @@
     const int8_t* __restrict__ matrix, const uint8_t* __restrict__ ledger,
     const int m_rows, const int m_cols, const int8_t* __restrict__ vectors,
     const float* __restrict__ scaling_factors, int n_batch,
-    float* __restrict__ result);
+    float* __restrict__ result, const float* per_channel_scale = nullptr);
 
 // Same as the above 8, 8, 8 integer matmul except for the presence of zero
 // point and non-accumulative.
@@ -316,7 +317,7 @@
 void ApplySigmoid(const int16_t* input, int32_t n_batch, int32_t n_input,
                   int16_t* output);
 
-// Same as above but the internal calcualtion is float.
+// Same as above but the internal calculation is float.
 void ApplySigmoidFloat(const int16_t* input, int32_t n_batch, int32_t n_input,
                        int16_t* output);
 
@@ -332,7 +333,7 @@
 void ApplyTanh(int32_t intger_bits, const int16_t* input, int32_t n_batch,
                int32_t n_input, int16_t* output);
 
-// Apply Tanh to a quantized vector. Tbe internal calculation is in float.
+// Apply Tanh to a quantized vector. The internal calculation is in float.
 //    - Input has 2^(integer_bits) as scale.
 //    - Output has Q0.15 as scale.
 void ApplyTanhFloat(const int16_t* input, int32_t n_batch, int32_t n_input,
diff --git a/tensorflow/lite/kernels/internal/quantization_util.h b/tensorflow/lite/kernels/internal/quantization_util.h
index 0ee914b..eb4e840 100644
--- a/tensorflow/lite/kernels/internal/quantization_util.h
+++ b/tensorflow/lite/kernels/internal/quantization_util.h
@@ -20,7 +20,6 @@
 #include <limits>
 
 #include "tensorflow/lite/kernels/internal/compatibility.h"
-#include "tensorflow/lite/kernels/internal/cppmath.h"
 #include "tensorflow/lite/kernels/internal/types.h"
 
 namespace tflite {
@@ -103,6 +102,7 @@
   return ChooseQuantizationParams<T>(rmin, rmax, false);
 }
 
+// LINT.IfChange
 // Converts a floating-point number to an integer. For all inputs x where
 // static_cast<IntOut>(x) is legal according to the C++ standard, the result
 // is identical to that cast (i.e. the result is x with its fractional part
@@ -167,6 +167,7 @@
   return x < 0 ? std::numeric_limits<IntOut>::min()
                : std::numeric_limits<IntOut>::max();
 }
+// LINT.ThenChange(//tensorflow/compiler/mlir/lite/kernels/internal/quantization_util.h)
 
 // Decompose a double multiplier into a Q0.31 int32 representation of its
 // significand, and shift representation of NEGATIVE its exponent ---
diff --git a/tensorflow/lite/kernels/internal/reference/add.h b/tensorflow/lite/kernels/internal/reference/add.h
index b89a57b..5b520bd 100644
--- a/tensorflow/lite/kernels/internal/reference/add.h
+++ b/tensorflow/lite/kernels/internal/reference/add.h
@@ -16,10 +16,13 @@
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_H_
 
 #include <algorithm>
+#include <cstddef>
+#include <cstdint>
 #include <type_traits>
 
 #include "fixedpoint/fixedpoint.h"
 #include "tensorflow/lite/kernels/internal/common.h"
+#include "tensorflow/lite/kernels/internal/compatibility.h"
 
 namespace tflite {
 
@@ -194,21 +197,135 @@
   }
 }
 
+template <typename T>
+inline void AddBroadcast(const T* input_data, const T* broadcast_data,
+                         T* output_data, size_t size, T activation_min,
+                         T activation_max) {
+  for (size_t c = 0; c < size; ++c) {
+    output_data[c] = ActivationFunctionWithMinMax<T>(
+        input_data[c] + broadcast_data[0], activation_min, activation_max);
+  }
+}
+
+template <>
+inline void AddBroadcast<int32_t>(const int32_t* input_data,
+                                  const int32_t* broadcast_data,
+                                  int32_t* output_data, size_t size,
+                                  int32_t activation_min,
+                                  int32_t activation_max) {
+  size_t c = 0;
+#ifdef USE_NEON
+  const int32x4_t vmax = vdupq_n_s32(activation_max);
+  const int32x4_t vmin = vdupq_n_s32(activation_min);
+  const int32x4_t vb = vdupq_n_s32(broadcast_data[0]);
+  for (; c + 4 <= size; c += 4) {
+    const int32x4_t va = vld1q_s32(&input_data[c]);
+    int32x4_t vres = vaddq_s32(va, vb);
+    vres = vmaxq_s32(vmin, vres);
+    vres = vminq_s32(vmax, vres);
+    vst1q_s32(&output_data[c], vres);
+  }
+#endif
+  for (; c < size; ++c) {
+    output_data[c] = ActivationFunctionWithMinMax<int32_t>(
+        input_data[c] + broadcast_data[0], activation_min, activation_max);
+  }
+}
+
+template <typename T>
+void AddElementwise(const T* input1_data, const T* input2_data, T* output_data,
+                    size_t size, T activation_min, T activation_max) {
+  for (size_t c = 0; c < size; ++c) {
+    output_data[c] = ActivationFunctionWithMinMax<T>(
+        input1_data[c] + input2_data[c], activation_min, activation_max);
+  }
+}
+
+template <>
+inline void AddElementwise<int32_t>(const int32_t* input1_data,
+                                    const int32_t* input2_data,
+                                    int32_t* output_data, size_t size,
+                                    int32_t activation_min,
+                                    int32_t activation_max) {
+  size_t c = 0;
+#ifdef USE_NEON
+  const int32x4_t vmax = vdupq_n_s32(activation_max);
+  const int32x4_t vmin = vdupq_n_s32(activation_min);
+  for (; c + 4 <= size; c += 4) {
+    const int32x4_t va = vld1q_s32(&input1_data[c]);
+    const int32x4_t vb = vld1q_s32(&input2_data[c]);
+    int32x4_t vres = vaddq_s32(va, vb);
+    vres = vmaxq_s32(vmin, vres);
+    vres = vminq_s32(vmax, vres);
+    vst1q_s32(&output_data[c], vres);
+  }
+#endif
+  for (; c < size; ++c) {
+    output_data[c] = ActivationFunctionWithMinMax<int32_t>(
+        input1_data[c] + input2_data[c], activation_min, activation_max);
+  }
+}
+
+template <typename T>
+inline void BroadcastAddRecursiveDimensions(
+    int dimension, size_t* input1_offset_p, size_t* input2_offset_p,
+    size_t* output_offset, size_t* compressed_input1_stride,
+    size_t* compressed_input2_stride, size_t* compressed_output_shape,
+    T activation_min, T activation_max, const T* input1_data,
+    const T* input2_data, T* output_data) {
+  if (dimension > 0) {
+    for (size_t c = 0; c < compressed_output_shape[dimension]; ++c) {
+      size_t input1_offset_c = *input1_offset_p;
+      size_t input2_offset_c = *input2_offset_p;
+      BroadcastAddRecursiveDimensions(
+          dimension - 1, &input1_offset_c, &input2_offset_c, output_offset,
+          compressed_input1_stride, compressed_input2_stride,
+          compressed_output_shape, activation_min, activation_max, input1_data,
+          input2_data, output_data);
+      *input1_offset_p += compressed_input1_stride[dimension];
+      *input2_offset_p += compressed_input2_stride[dimension];
+    }
+  } else {
+    TFLITE_DCHECK(dimension == 0);
+    bool input1_is_broadcast = compressed_input1_stride[dimension] == 0;
+    bool input2_is_broadcast = compressed_input2_stride[dimension] == 0;
+    TFLITE_DCHECK(!(input1_is_broadcast && input2_is_broadcast));
+    const T* input1_data_ptr = input1_data + *input1_offset_p;
+    const T* input2_data_ptr = input2_data + *input2_offset_p;
+    T* output_data_ptr = output_data + *output_offset;
+    if (input1_is_broadcast) {
+      // input1 is broadcast.
+      AddBroadcast<T>(input2_data_ptr, input1_data_ptr, output_data_ptr,
+                      compressed_output_shape[dimension], activation_min,
+                      activation_max);
+      *input2_offset_p += compressed_output_shape[dimension];
+    } else if (input2_is_broadcast) {
+      // input2 is broadcast.
+      AddBroadcast<T>(input1_data_ptr, input2_data_ptr, output_data_ptr,
+                      compressed_output_shape[dimension], activation_min,
+                      activation_max);
+      *input1_offset_p += compressed_output_shape[dimension];
+    } else {
+      // Add element-wise.
+      AddElementwise<T>(input1_data_ptr, input2_data_ptr, output_data_ptr,
+                        compressed_output_shape[dimension], activation_min,
+                        activation_max);
+      *input1_offset_p += compressed_output_shape[dimension];
+      *input2_offset_p += compressed_output_shape[dimension];
+    }
+    *output_offset += compressed_output_shape[dimension];
+  }
+}
+
 template <typename T,
-          // For unquantized add for small integers, explictly set to true.
+          // For unquantized add for small integers, explicitly set to true.
           bool dummy = false>
 inline typename std::enable_if<!is_small_integer<T>::value || dummy, void>::type
 BroadcastAdd6DSlow(const ArithmeticParams& params,
                    const RuntimeShape& input1_shape, const T* input1_data,
                    const RuntimeShape& input2_shape, const T* input2_data,
                    const RuntimeShape& output_shape, T* output_data) {
-  NdArrayDesc<6> desc1;
-  NdArrayDesc<6> desc2;
-  NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
-                                      &desc2);
-  const RuntimeShape extended_output_shape =
-      RuntimeShape::ExtendedShape(6, output_shape);
-
+  constexpr int kMaxBroadcastDim = 6;
   T activation_min, activation_max;
   GetActivationParams(params, &activation_min, &activation_max);
 
@@ -223,64 +340,74 @@
   // We name our variables by their Tensorflow convention, but generate C code
   // nesting loops such that the innermost loop has the smallest stride for the
   // best cache behavior.
-  size_t input1_offset_a = 0;
-  size_t input2_offset_a = 0;
-  size_t output_offset_a = 0;
-  for (int a = 0; a < extended_output_shape.Dims(0); ++a) {
-    size_t input1_offset_d = input1_offset_a;
-    size_t input2_offset_d = input2_offset_a;
-    size_t output_offset_d = output_offset_a;
-    for (int d = 0; d < extended_output_shape.Dims(1); ++d) {
-      size_t input1_offset_b = input1_offset_d;
-      size_t input2_offset_b = input2_offset_d;
-      size_t output_offset_b = output_offset_d;
-      for (int b = 0; b < extended_output_shape.Dims(2); ++b) {
-        size_t input1_offset_y = input1_offset_b;
-        size_t input2_offset_y = input2_offset_b;
-        size_t output_offset_y = output_offset_b;
-        for (int y = 0; y < extended_output_shape.Dims(3); ++y) {
-          size_t input1_offset_x = input1_offset_y;
-          size_t input2_offset_x = input2_offset_y;
-          size_t output_offset_x = output_offset_y;
-          for (int x = 0; x < extended_output_shape.Dims(4); ++x) {
-            size_t input1_offset_c = input1_offset_x;
-            size_t input2_offset_c = input2_offset_x;
-            size_t output_offset_c = output_offset_x;
-            for (int c = 0; c < extended_output_shape.Dims(5); ++c) {
-              output_data[output_offset_c] = ActivationFunctionWithMinMax<T>(
-                  input1_data[input1_offset_c] + input2_data[input2_offset_c],
-                  activation_min, activation_max);
-              input1_offset_c += desc1.strides[5];
-              input2_offset_c += desc2.strides[5];
-              ++output_offset_c;
-            }
-            input1_offset_x += desc1.strides[4];
-            input2_offset_x += desc2.strides[4];
-            output_offset_x += extended_output_shape.Dims(5);
-          }
-          input1_offset_y += desc1.strides[3];
-          input2_offset_y += desc2.strides[3];
-          output_offset_y +=
-              extended_output_shape.Dims(4) * extended_output_shape.Dims(5);
-        }
-        input1_offset_b += desc1.strides[2];
-        input2_offset_b += desc2.strides[2];
-        output_offset_b += extended_output_shape.Dims(3) *
-                           extended_output_shape.Dims(4) *
-                           extended_output_shape.Dims(5);
-      }
-      input1_offset_d += desc1.strides[1];
-      input2_offset_d += desc2.strides[1];
-      output_offset_d +=
-          extended_output_shape.Dims(2) * extended_output_shape.Dims(3) *
-          extended_output_shape.Dims(4) * extended_output_shape.Dims(5);
+  size_t compressed_input1_stride[kMaxBroadcastDim];
+  size_t compressed_input2_stride[kMaxBroadcastDim];
+  size_t compressed_output_shape[kMaxBroadcastDim];
+  bool broadcastable_shape = ReduceDimensionsForBroadcast<kMaxBroadcastDim>(
+      input1_shape, input2_shape, compressed_input1_stride,
+      compressed_input2_stride, compressed_output_shape);
+  // Skip broadcasting for degenerate shapes.
+  if (!broadcastable_shape) {
+    return;
+  }
+
+  size_t input1_offset = 0;
+  size_t input2_offset = 0;
+  size_t output_offset = 0;
+  BroadcastAddRecursiveDimensions<T>(
+      kMaxBroadcastDim - 1, &input1_offset, &input2_offset, &output_offset,
+      compressed_input1_stride, compressed_input2_stride,
+      compressed_output_shape, activation_min, activation_max, input1_data,
+      input2_data, output_data);
+}
+
+// This function is used for 8-bit as well as for 16-bit, but the accumulator
+// is 32-bit for both cases. The overflow does not happen due to the
+// choice of the shift (20 or 15, accordingly - see add.cc for more comments).
+template <typename T>
+inline void BroadcastAddRecursiveDimensions(
+    const ArithmeticParams& params, int dimension, size_t* input1_offset_p,
+    size_t* input2_offset_p, size_t* output_offset,
+    size_t* compressed_input1_stride, size_t* compressed_input2_stride,
+    size_t* compressed_output_shape, const T* input1_data, const T* input2_data,
+    T* output_data) {
+  for (size_t c = 0; c < compressed_output_shape[dimension]; ++c) {
+    if (dimension > 0) {
+      size_t input1_offset_c = *input1_offset_p;
+      size_t input2_offset_c = *input2_offset_p;
+      BroadcastAddRecursiveDimensions(
+          params, dimension - 1, &input1_offset_c, &input2_offset_c,
+          output_offset, compressed_input1_stride, compressed_input2_stride,
+          compressed_output_shape, input1_data, input2_data, output_data);
+    } else {
+      TFLITE_DCHECK(dimension == 0);
+      const int32_t input1_val =
+          params.input1_offset + input1_data[*input1_offset_p];
+      const int32_t input2_val =
+          params.input2_offset + input2_data[*input2_offset_p];
+      const int32_t shifted_input1_val = input1_val * (1 << params.left_shift);
+      const int32_t shifted_input2_val = input2_val * (1 << params.left_shift);
+      const int32_t scaled_input1_val =
+          MultiplyByQuantizedMultiplierSmallerThanOneExp(
+              shifted_input1_val, params.input1_multiplier,
+              params.input1_shift);
+      const int32_t scaled_input2_val =
+          MultiplyByQuantizedMultiplierSmallerThanOneExp(
+              shifted_input2_val, params.input2_multiplier,
+              params.input2_shift);
+      const int32_t raw_sum = scaled_input1_val + scaled_input2_val;
+      const int32_t raw_output =
+          MultiplyByQuantizedMultiplierSmallerThanOneExp(
+              raw_sum, params.output_multiplier, params.output_shift) +
+          params.output_offset;
+      const int32_t clamped_output =
+          std::min(params.quantized_activation_max,
+                   std::max(params.quantized_activation_min, raw_output));
+      output_data[*output_offset] = static_cast<T>(clamped_output);
+      ++(*output_offset);
     }
-    input1_offset_a += desc1.strides[0];
-    input2_offset_a += desc2.strides[0];
-    output_offset_a +=
-        extended_output_shape.Dims(1) * extended_output_shape.Dims(2) *
-        extended_output_shape.Dims(3) * extended_output_shape.Dims(4) *
-        extended_output_shape.Dims(5);
+    *input1_offset_p += compressed_input1_stride[dimension];
+    *input2_offset_p += compressed_input2_stride[dimension];
   }
 }
 
@@ -293,12 +420,7 @@
                    const RuntimeShape& input1_shape, const T* input1_data,
                    const RuntimeShape& input2_shape, const T* input2_data,
                    const RuntimeShape& output_shape, T* output_data) {
-  NdArrayDesc<6> desc1;
-  NdArrayDesc<6> desc2;
-  NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
-                                      &desc2);
-  const RuntimeShape extended_output_shape =
-      RuntimeShape::ExtendedShape(6, output_shape);
+  constexpr int kMaxBroadcastDim = 6;
 
   // In Tensorflow, the dimensions are canonically named (batch_number, row,
   // col, channel), with extents (batches, height, width, depth), with the
@@ -311,87 +433,24 @@
   // We name our variables by their Tensorflow convention, but generate C code
   // nesting loops such that the innermost loop has the smallest stride for the
   // best cache behavior.
-  size_t input1_offset_a = 0;
-  size_t input2_offset_a = 0;
-  size_t output_offset_a = 0;
-  for (int a = 0; a < extended_output_shape.Dims(0); ++a) {
-    size_t input1_offset_d = input1_offset_a;
-    size_t input2_offset_d = input2_offset_a;
-    size_t output_offset_d = output_offset_a;
-    for (int d = 0; d < extended_output_shape.Dims(1); ++d) {
-      size_t input1_offset_b = input1_offset_d;
-      size_t input2_offset_b = input2_offset_d;
-      size_t output_offset_b = output_offset_d;
-      for (int b = 0; b < extended_output_shape.Dims(2); ++b) {
-        size_t input1_offset_y = input1_offset_b;
-        size_t input2_offset_y = input2_offset_b;
-        size_t output_offset_y = output_offset_b;
-        for (int y = 0; y < extended_output_shape.Dims(3); ++y) {
-          size_t input1_offset_x = input1_offset_y;
-          size_t input2_offset_x = input2_offset_y;
-          size_t output_offset_x = output_offset_y;
-          for (int x = 0; x < extended_output_shape.Dims(4); ++x) {
-            size_t input1_offset_c = input1_offset_x;
-            size_t input2_offset_c = input2_offset_x;
-            size_t output_offset_c = output_offset_x;
-            for (int c = 0; c < extended_output_shape.Dims(5); ++c) {
-              const int32_t input1_val =
-                  params.input1_offset + input1_data[input1_offset_c];
-              const int32_t input2_val =
-                  params.input2_offset + input2_data[input2_offset_c];
-              const int32_t shifted_input1_val =
-                  input1_val * (1 << params.left_shift);
-              const int32_t shifted_input2_val =
-                  input2_val * (1 << params.left_shift);
-              const int32_t scaled_input1_val =
-                  MultiplyByQuantizedMultiplierSmallerThanOneExp(
-                      shifted_input1_val, params.input1_multiplier,
-                      params.input1_shift);
-              const int32_t scaled_input2_val =
-                  MultiplyByQuantizedMultiplierSmallerThanOneExp(
-                      shifted_input2_val, params.input2_multiplier,
-                      params.input2_shift);
-              const int32_t raw_sum = scaled_input1_val + scaled_input2_val;
-              const int32_t raw_output =
-                  MultiplyByQuantizedMultiplierSmallerThanOneExp(
-                      raw_sum, params.output_multiplier, params.output_shift) +
-                  params.output_offset;
-              const int32_t clamped_output = std::min(
-                  params.quantized_activation_max,
-                  std::max(params.quantized_activation_min, raw_output));
-              output_data[output_offset_c] = static_cast<T>(clamped_output);
-              input1_offset_c += desc1.strides[5];
-              input2_offset_c += desc2.strides[5];
-              ++output_offset_c;
-            }
-            input1_offset_x += desc1.strides[4];
-            input2_offset_x += desc2.strides[4];
-            output_offset_x += extended_output_shape.Dims(5);
-          }
-          input1_offset_y += desc1.strides[3];
-          input2_offset_y += desc2.strides[3];
-          output_offset_y +=
-              extended_output_shape.Dims(4) * extended_output_shape.Dims(5);
-        }
-        input1_offset_b += desc1.strides[2];
-        input2_offset_b += desc2.strides[2];
-        output_offset_b += extended_output_shape.Dims(3) *
-                           extended_output_shape.Dims(4) *
-                           extended_output_shape.Dims(5);
-      }
-      input1_offset_d += desc1.strides[1];
-      input2_offset_d += desc2.strides[1];
-      output_offset_d +=
-          extended_output_shape.Dims(2) * extended_output_shape.Dims(3) *
-          extended_output_shape.Dims(4) * extended_output_shape.Dims(5);
-    }
-    input1_offset_a += desc1.strides[0];
-    input2_offset_a += desc2.strides[0];
-    output_offset_a +=
-        extended_output_shape.Dims(1) * extended_output_shape.Dims(2) *
-        extended_output_shape.Dims(3) * extended_output_shape.Dims(4) *
-        extended_output_shape.Dims(5);
+  size_t compressed_input1_stride[kMaxBroadcastDim];
+  size_t compressed_input2_stride[kMaxBroadcastDim];
+  size_t compressed_output_shape[kMaxBroadcastDim];
+  bool broadcastable_shape = ReduceDimensionsForBroadcast<kMaxBroadcastDim>(
+      input1_shape, input2_shape, compressed_input1_stride,
+      compressed_input2_stride, compressed_output_shape);
+  // Skip broadcasting for degenerate shapes.
+  if (!broadcastable_shape) {
+    return;
   }
+
+  size_t input1_offset = 0;
+  size_t input2_offset = 0;
+  size_t output_offset = 0;
+  BroadcastAddRecursiveDimensions(
+      params, kMaxBroadcastDim - 1, &input1_offset, &input2_offset,
+      &output_offset, compressed_input1_stride, compressed_input2_stride,
+      compressed_output_shape, input1_data, input2_data, output_data);
 }
 
 template <typename T>
diff --git a/tensorflow/lite/kernels/internal/reference/comparisons.h b/tensorflow/lite/kernels/internal/reference/comparisons.h
index 3558319..366b378 100644
--- a/tensorflow/lite/kernels/internal/reference/comparisons.h
+++ b/tensorflow/lite/kernels/internal/reference/comparisons.h
@@ -257,12 +257,12 @@
         op_params, input1_shape, input1_data, input2_shape, input2_data,       \
         output_shape, output_data);                                            \
   }
-TFLITE_COMPARISON_OP(Equal);
-TFLITE_COMPARISON_OP(NotEqual);
-TFLITE_COMPARISON_OP(Greater);
-TFLITE_COMPARISON_OP(GreaterEqual);
-TFLITE_COMPARISON_OP(Less);
-TFLITE_COMPARISON_OP(LessEqual);
+TFLITE_COMPARISON_OP(Equal)
+TFLITE_COMPARISON_OP(NotEqual)
+TFLITE_COMPARISON_OP(Greater)
+TFLITE_COMPARISON_OP(GreaterEqual)
+TFLITE_COMPARISON_OP(Less)
+TFLITE_COMPARISON_OP(LessEqual)
 #undef TFLITE_COMPARISON_OP
 
 }  // namespace reference_ops
diff --git a/tensorflow/lite/kernels/internal/reference/integer_ops/add.h b/tensorflow/lite/kernels/internal/reference/integer_ops/add.h
index 579964d..c2a0e0f 100644
--- a/tensorflow/lite/kernels/internal/reference/integer_ops/add.h
+++ b/tensorflow/lite/kernels/internal/reference/integer_ops/add.h
@@ -16,6 +16,7 @@
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_ADD_H_
 
 #include <algorithm>
+#include <cstddef>
 #include <limits>
 
 #include "tensorflow/lite/kernels/internal/common.h"
@@ -35,7 +36,30 @@
   TFLITE_DCHECK_LE(-params.input2_offset, std::numeric_limits<int8_t>::max());
 }
 
-// TODO(b/270589088): move to a more appropriate file (b/270589088#comment2)
+// TODO: b/270589088 - move to a more appropriate file (b/270589088#comment2)
+template <typename T>
+void BroadcastInput1(int size, const ArithmeticParams& params,
+                     const T* input1_data, const T* input2_data, T* output_data,
+                     void (*check_arithmetic_params)(const ArithmeticParams&),
+                     T (*binary_func)(T, T, const ArithmeticParams&)) {
+  CheckArithmeticParams(params);
+  for (int i = 0; i < size; ++i) {
+    output_data[i] = binary_func(input1_data[0], input2_data[i], params);
+  }
+}
+
+template <typename T>
+void BroadcastInput2(int size, const ArithmeticParams& params,
+                     const T* input1_data, const T* input2_data, T* output_data,
+                     void (*check_arithmetic_params)(const ArithmeticParams&),
+                     T (*binary_func)(T, T, const ArithmeticParams&)) {
+  CheckArithmeticParams(params);
+  for (int i = 0; i < size; ++i) {
+    output_data[i] = binary_func(input1_data[i], input2_data[0], params);
+  }
+}
+
+// TODO: b/270589088 - move to a more appropriate file (b/270589088#comment2)
 template <typename T>
 void ElementWise(int size, const ArithmeticParams& params, const T* input1_data,
                  const T* input2_data, T* output_data,
@@ -46,7 +70,60 @@
     output_data[i] = binary_func(input1_data[i], input2_data[i], params);
   }
 }
-// TODO(b/270589088): move to a more appropriate file. (b/270589088#comment2)
+
+template <typename T>
+inline void BroadcastAddRecursiveDimensions(
+    const ArithmeticParams& params, int dimension, size_t* input1_offset_p,
+    size_t* input2_offset_p, size_t* output_offset,
+    size_t* compressed_input1_stride, size_t* compressed_input2_stride,
+    size_t* compressed_output_shape, const T* input1_data, const T* input2_data,
+    T* output_data, void (*check_arithmetic_params)(const ArithmeticParams&),
+    T (*binary_func)(T, T, const ArithmeticParams&)) {
+  if (dimension > 0) {
+    for (size_t c = 0; c < compressed_output_shape[dimension]; ++c) {
+      size_t input1_offset_c = *input1_offset_p;
+      size_t input2_offset_c = *input2_offset_p;
+      BroadcastAddRecursiveDimensions(
+          params, dimension - 1, &input1_offset_c, &input2_offset_c,
+          output_offset, compressed_input1_stride, compressed_input2_stride,
+          compressed_output_shape, input1_data, input2_data, output_data,
+          check_arithmetic_params, binary_func);
+      *input1_offset_p += compressed_input1_stride[dimension];
+      *input2_offset_p += compressed_input2_stride[dimension];
+    }
+  } else {
+    TFLITE_DCHECK(dimension == 0);
+    bool input1_is_broadcast = compressed_input1_stride[dimension] == 0;
+    bool input2_is_broadcast = compressed_input2_stride[dimension] == 0;
+    TFLITE_DCHECK(!(input1_is_broadcast && input2_is_broadcast));
+    const T* input1_data_ptr = input1_data + *input1_offset_p;
+    const T* input2_data_ptr = input2_data + *input2_offset_p;
+    T* output_data_ptr = output_data + *output_offset;
+    if (input1_is_broadcast) {
+      // input1 is broadcast.
+      BroadcastInput1<T>(compressed_output_shape[dimension], params,
+                         input1_data_ptr, input2_data_ptr, output_data_ptr,
+                         check_arithmetic_params, binary_func);
+      *input2_offset_p += compressed_output_shape[dimension];
+    } else if (input2_is_broadcast) {
+      // input2 is broadcast.
+      BroadcastInput2<T>(compressed_output_shape[dimension], params,
+                         input1_data_ptr, input2_data_ptr, output_data_ptr,
+                         check_arithmetic_params, binary_func);
+      *input1_offset_p += compressed_output_shape[dimension];
+    } else {
+      // Add element-wise.
+      ElementWise<T>(compressed_output_shape[dimension], params,
+                     input1_data_ptr, input2_data_ptr, output_data_ptr,
+                     check_arithmetic_params, binary_func);
+      *input1_offset_p += compressed_output_shape[dimension];
+      *input2_offset_p += compressed_output_shape[dimension];
+    }
+    *output_offset += compressed_output_shape[dimension];
+  }
+}
+
+// TODO: b/270589088 - move to a more appropriate file. (b/270589088#comment2)
 template <typename T>
 void BroadcastBinaryFunction6DSlow(
     const ArithmeticParams& params, const RuntimeShape& input1_shape,
@@ -54,12 +131,7 @@
     const T* input2_data, const RuntimeShape& output_shape, T* output_data,
     void (*check_arithmetic_params)(const ArithmeticParams&),
     T (*binary_func)(T, T, const ArithmeticParams&)) {
-  NdArrayDesc<6> desc1;
-  NdArrayDesc<6> desc2;
-  NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
-                                      &desc2);
-  const RuntimeShape extended_output_shape =
-      RuntimeShape::ExtendedShape(6, output_shape);
+  constexpr int kMaxBroadcastDim = 6;
 
   // In Tensorflow, the dimensions are canonically named (batch_number, row,
   // col, channel), with extents (batches, height, width, depth), with the
@@ -72,65 +144,25 @@
   // We name our variables by their Tensorflow convention, but generate C code
   // nesting loops such that the innermost loop has the smallest stride for the
   // best cache behavior.
-  size_t input1_offset_a = 0;
-  size_t input2_offset_a = 0;
-  size_t output_offset_a = 0;
-  for (int a = 0; a < extended_output_shape.Dims(0); ++a) {
-    size_t input1_offset_d = input1_offset_a;
-    size_t input2_offset_d = input2_offset_a;
-    size_t output_offset_d = output_offset_a;
-    for (int d = 0; d < extended_output_shape.Dims(1); ++d) {
-      size_t input1_offset_b = input1_offset_d;
-      size_t input2_offset_b = input2_offset_d;
-      size_t output_offset_b = output_offset_d;
-      for (int b = 0; b < extended_output_shape.Dims(2); ++b) {
-        size_t input1_offset_y = input1_offset_b;
-        size_t input2_offset_y = input2_offset_b;
-        size_t output_offset_y = output_offset_b;
-        for (int y = 0; y < extended_output_shape.Dims(3); ++y) {
-          size_t input1_offset_x = input1_offset_y;
-          size_t input2_offset_x = input2_offset_y;
-          size_t output_offset_x = output_offset_y;
-          for (int x = 0; x < extended_output_shape.Dims(4); ++x) {
-            size_t input1_offset_c = input1_offset_x;
-            size_t input2_offset_c = input2_offset_x;
-            size_t output_offset_c = output_offset_x;
-            for (int c = 0; c < extended_output_shape.Dims(5); ++c) {
-              output_data[output_offset_c] =
-                  binary_func(input1_data[input1_offset_c],
-                              input2_data[input2_offset_c], params);
-              input1_offset_c += desc1.strides[5];
-              input2_offset_c += desc2.strides[5];
-              ++output_offset_c;
-            }
-            input1_offset_x += desc1.strides[4];
-            input2_offset_x += desc2.strides[4];
-            output_offset_x += extended_output_shape.Dims(5);
-          }
-          input1_offset_y += desc1.strides[3];
-          input2_offset_y += desc2.strides[3];
-          output_offset_y +=
-              extended_output_shape.Dims(4) * extended_output_shape.Dims(5);
-        }
-        input1_offset_b += desc1.strides[2];
-        input2_offset_b += desc2.strides[2];
-        output_offset_b += extended_output_shape.Dims(3) *
-                           extended_output_shape.Dims(4) *
-                           extended_output_shape.Dims(5);
-      }
-      input1_offset_d += desc1.strides[1];
-      input2_offset_d += desc2.strides[1];
-      output_offset_d +=
-          extended_output_shape.Dims(2) * extended_output_shape.Dims(3) *
-          extended_output_shape.Dims(4) * extended_output_shape.Dims(5);
-    }
-    input1_offset_a += desc1.strides[0];
-    input2_offset_a += desc2.strides[0];
-    output_offset_a +=
-        extended_output_shape.Dims(1) * extended_output_shape.Dims(2) *
-        extended_output_shape.Dims(3) * extended_output_shape.Dims(4) *
-        extended_output_shape.Dims(5);
+  size_t compressed_input1_stride[kMaxBroadcastDim];
+  size_t compressed_input2_stride[kMaxBroadcastDim];
+  size_t compressed_output_shape[kMaxBroadcastDim];
+  bool broadcastable_shape = ReduceDimensionsForBroadcast<kMaxBroadcastDim>(
+      input1_shape, input2_shape, compressed_input1_stride,
+      compressed_input2_stride, compressed_output_shape);
+  // Skip broadcasting for degenerate shapes.
+  if (!broadcastable_shape) {
+    return;
   }
+
+  size_t input1_offset = 0;
+  size_t input2_offset = 0;
+  size_t output_offset = 0;
+  BroadcastAddRecursiveDimensions(
+      params, kMaxBroadcastDim - 1, &input1_offset, &input2_offset,
+      &output_offset, compressed_input1_stride, compressed_input2_stride,
+      compressed_output_shape, input1_data, input2_data, output_data,
+      check_arithmetic_params, binary_func);
 }
 
 template <typename T>
diff --git a/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h b/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h
index 0506618..a57056d 100644
--- a/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h
+++ b/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h
@@ -24,6 +24,9 @@
 namespace tflite {
 namespace reference_integer_ops {
 
+// Maximum dimension supported by the broadcast mul operation.
+constexpr int kMaxMulBroadcastDim = 6;
+
 template <typename InputType, typename OutputType>
 void MulElementwise(int size, const ArithmeticParams& params,
                     const InputType* input1_data, const InputType* input2_data,
@@ -88,46 +91,104 @@
 }
 
 template <typename T>
-inline void BroadcastMul4DSlow(
+inline void BroadcastMul6DSlow(
     const ArithmeticParams& params, const RuntimeShape& input1_shape,
     const T* input1_data, const RuntimeShape& input2_shape,
     const T* input2_data, const RuntimeShape& output_shape, T* output_data) {
-  ruy::profiler::ScopeLabel label("BroadcastMul4DSlow");
+  ruy::profiler::ScopeLabel label("BroadcastMul6DSlow");
 
-  NdArrayDesc<4> desc1;
-  NdArrayDesc<4> desc2;
+  NdArrayDesc<kMaxMulBroadcastDim> desc1;
+  NdArrayDesc<kMaxMulBroadcastDim> desc2;
   // The input shapes are extended as part of NdArrayDesc initialization.
   NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
                                       &desc2);
   const RuntimeShape extended_output_shape =
-      RuntimeShape::ExtendedShape(4, output_shape);
+      RuntimeShape::ExtendedShape(kMaxMulBroadcastDim, output_shape);
+  // Cache output shape dimensions.
+  int32_t extended_output_shape_dims[kMaxMulBroadcastDim];
+  std::memcpy(extended_output_shape_dims, extended_output_shape.DimsData(),
+              sizeof(extended_output_shape_dims));
 
-  for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
-    for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
-      for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
-        for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
-          const int32_t input1_val =
-              params.input1_offset +
-              input1_data[SubscriptToIndex(desc1, b, y, x, c)];
-          const int32_t input2_val =
-              params.input2_offset +
-              input2_data[SubscriptToIndex(desc2, b, y, x, c)];
-          const int32_t unclamped_result =
-              params.output_offset +
-              MultiplyByQuantizedMultiplier(input1_val * input2_val,
-                                            params.output_multiplier,
-                                            params.output_shift);
-          const int32_t clamped_output = std::min(
-              params.quantized_activation_max,
-              std::max(params.quantized_activation_min, unclamped_result));
-          output_data[Offset(extended_output_shape, b, y, x, c)] =
-              static_cast<T>(clamped_output);
+  size_t input1_offset_a = 0;
+  size_t input2_offset_a = 0;
+  size_t output_offset_a = 0;
+  for (int a = 0; a < extended_output_shape_dims[0]; ++a) {
+    size_t input1_offset_d = input1_offset_a;
+    size_t input2_offset_d = input2_offset_a;
+    size_t output_offset_d = output_offset_a;
+    for (int d = 0; d < extended_output_shape_dims[1]; ++d) {
+      size_t input1_offset_b = input1_offset_d;
+      size_t input2_offset_b = input2_offset_d;
+      size_t output_offset_b = output_offset_d;
+      for (int b = 0; b < extended_output_shape_dims[2]; ++b) {
+        size_t input1_offset_y = input1_offset_b;
+        size_t input2_offset_y = input2_offset_b;
+        size_t output_offset_y = output_offset_b;
+        for (int y = 0; y < extended_output_shape_dims[3]; ++y) {
+          size_t input1_offset_x = input1_offset_y;
+          size_t input2_offset_x = input2_offset_y;
+          size_t output_offset_x = output_offset_y;
+          for (int x = 0; x < extended_output_shape_dims[4]; ++x) {
+            size_t input1_offset_c = input1_offset_x;
+            size_t input2_offset_c = input2_offset_x;
+            size_t output_offset_c = output_offset_x;
+            for (int c = 0; c < extended_output_shape_dims[5]; ++c) {
+              const int32_t input1_val =
+                  params.input1_offset + input1_data[input1_offset_c];
+              const int32_t input2_val =
+                  params.input2_offset + input2_data[input2_offset_c];
+              const int32_t unclamped_result =
+                  params.output_offset +
+                  MultiplyByQuantizedMultiplier(input1_val * input2_val,
+                                                params.output_multiplier,
+                                                params.output_shift);
+              const int32_t clamped_output = std::min(
+                  params.quantized_activation_max,
+                  std::max(params.quantized_activation_min, unclamped_result));
+              output_data[output_offset_c] = static_cast<T>(clamped_output);
+              input1_offset_c += desc1.strides[5];
+              input2_offset_c += desc2.strides[5];
+              ++output_offset_c;
+            }
+            input1_offset_x += desc1.strides[4];
+            input2_offset_x += desc2.strides[4];
+            output_offset_x += extended_output_shape_dims[5];
+          }
+          input1_offset_y += desc1.strides[3];
+          input2_offset_y += desc2.strides[3];
+          output_offset_y +=
+              extended_output_shape_dims[4] * extended_output_shape_dims[5];
         }
+        input1_offset_b += desc1.strides[2];
+        input2_offset_b += desc2.strides[2];
+        output_offset_b += extended_output_shape_dims[3] *
+                           extended_output_shape_dims[4] *
+                           extended_output_shape_dims[5];
       }
+      input1_offset_d += desc1.strides[1];
+      input2_offset_d += desc2.strides[1];
+      output_offset_d +=
+          extended_output_shape_dims[2] * extended_output_shape_dims[3] *
+          extended_output_shape_dims[4] * extended_output_shape_dims[5];
     }
+    input1_offset_a += desc1.strides[0];
+    input2_offset_a += desc2.strides[0];
+    output_offset_a +=
+        extended_output_shape_dims[1] * extended_output_shape_dims[2] *
+        extended_output_shape_dims[3] * extended_output_shape_dims[4] *
+        extended_output_shape_dims[5];
   }
 }
 
+template <typename T>
+inline void BroadcastMul4DSlow(
+    const ArithmeticParams& params, const RuntimeShape& input1_shape,
+    const T* input1_data, const RuntimeShape& input2_shape,
+    const T* input2_data, const RuntimeShape& output_shape, T* output_data) {
+  BroadcastMul6DSlow(params, input1_shape, input1_data, input2_shape,
+                     input2_data, output_shape, output_data);
+}
+
 }  // namespace reference_integer_ops
 }  // namespace tflite
 #endif  // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MUL_H_
diff --git a/tensorflow/lite/kernels/internal/reference/mul.h b/tensorflow/lite/kernels/internal/reference/mul.h
index 2767fef..fca74a3 100644
--- a/tensorflow/lite/kernels/internal/reference/mul.h
+++ b/tensorflow/lite/kernels/internal/reference/mul.h
@@ -24,6 +24,9 @@
 
 namespace reference_ops {
 
+// Maximum dimension supported by the broadcast mul operation.
+constexpr int kMaxMulBroadcastDim = 6;
+
 // Element-wise mul that can often be used for inner loop of broadcast Mul as
 // well as the non-broadcast Mul.
 inline void MulElementwise(int size, const ArithmeticParams& params,
@@ -88,128 +91,174 @@
   MulElementwise(flat_size, params, input1_data, input2_data, output_data);
 }
 
-inline void BroadcastMul4DSlow(const ArithmeticParams& params,
+template <typename T, typename F>
+void BroadcastMulRecursiveDimensions(
+    const ArithmeticParams& params, int dimension, const T* input1_data,
+    const T* input2_data, T* output_data, size_t* input1_offset_p,
+    size_t* input2_offset_p, size_t* output_offset,
+    const NdArrayDesc<kMaxMulBroadcastDim>& desc1,
+    const NdArrayDesc<kMaxMulBroadcastDim>& desc2,
+    const int32_t extended_output_shape_dims[kMaxMulBroadcastDim],
+    F binary_func) {
+  if (dimension == kMaxMulBroadcastDim - 1) {
+    for (int c = 0; c < extended_output_shape_dims[dimension]; ++c) {
+      const T input1_val = input1_data[*input1_offset_p];
+      const T input2_val = input2_data[*input2_offset_p];
+      output_data[*output_offset] = binary_func(params, input1_val, input2_val);
+      *input1_offset_p += desc1.strides[dimension];
+      *input2_offset_p += desc2.strides[dimension];
+      ++(*output_offset);
+    }
+  } else {
+    for (int a = 0; a < extended_output_shape_dims[dimension]; ++a) {
+      size_t input1_offset_c = *input1_offset_p;
+      size_t input2_offset_c = *input2_offset_p;
+      BroadcastMulRecursiveDimensions(
+          params, dimension + 1, input1_data, input2_data, output_data,
+          &input1_offset_c, &input2_offset_c, output_offset, desc1, desc2,
+          extended_output_shape_dims, binary_func);
+      *input1_offset_p += desc1.strides[dimension];
+      *input2_offset_p += desc2.strides[dimension];
+    }
+  }
+}
+
+inline void BroadcastMul6DSlow(const ArithmeticParams& params,
                                const RuntimeShape& input1_shape,
                                const uint8_t* input1_data,
                                const RuntimeShape& input2_shape,
                                const uint8_t* input2_data,
                                const RuntimeShape& output_shape,
                                uint8_t* output_data) {
-  NdArrayDesc<4> desc1;
-  NdArrayDesc<4> desc2;
+  NdArrayDesc<kMaxMulBroadcastDim> desc1;
+  NdArrayDesc<kMaxMulBroadcastDim> desc2;
   NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
                                       &desc2);
   const RuntimeShape extended_output_shape =
-      RuntimeShape::ExtendedShape(4, output_shape);
+      RuntimeShape::ExtendedShape(kMaxMulBroadcastDim, output_shape);
+  // Cache output shape dimensions.
+  int32_t extended_output_shape_dims[kMaxMulBroadcastDim];
+  std::memcpy(extended_output_shape_dims, extended_output_shape.DimsData(),
+              sizeof(extended_output_shape_dims));
 
-  for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
-    for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
-      for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
-        for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
-          const int32_t input1_val =
-              params.input1_offset +
-              input1_data[SubscriptToIndex(desc1, b, y, x, c)];
-          const int32_t input2_val =
-              params.input2_offset +
-              input2_data[SubscriptToIndex(desc2, b, y, x, c)];
-          const int32_t unclamped_result =
-              params.output_offset +
-              MultiplyByQuantizedMultiplier(input1_val * input2_val,
-                                            params.output_multiplier,
-                                            params.output_shift);
-          const int32_t clamped_output = std::min(
-              params.quantized_activation_max,
-              std::max(params.quantized_activation_min, unclamped_result));
-          output_data[Offset(extended_output_shape, b, y, x, c)] =
-              static_cast<uint8_t>(clamped_output);
-        }
-      }
-    }
-  }
+  size_t input1_offset = 0;
+  size_t input2_offset = 0;
+  size_t output_offset = 0;
+  BroadcastMulRecursiveDimensions(
+      params, 0, input1_data, input2_data, output_data, &input1_offset,
+      &input2_offset, &output_offset, desc1, desc2, extended_output_shape_dims,
+      [](const ArithmeticParams& params, const uint8_t input1_val,
+         const uint8_t input2_val) {
+        const int32_t offsetted_input1_val = params.input1_offset + input1_val;
+        const int32_t offsetted_input2_val = params.input2_offset + input2_val;
+        const int32_t unclamped_result =
+            params.output_offset +
+            MultiplyByQuantizedMultiplier(
+                offsetted_input1_val * offsetted_input2_val,
+                params.output_multiplier, params.output_shift);
+        const int32_t clamped_output = std::min(
+            params.quantized_activation_max,
+            std::max(params.quantized_activation_min, unclamped_result));
+        return static_cast<uint8_t>(clamped_output);
+      });
 }
 
 template <typename T,
-          // For unquantized mul on small integers, explictly set to true.
+          // For unquantized mul on small integers, explicitly set to true.
           bool enable_for_short_integers = false>
 inline typename std::enable_if<
     !is_small_integer<T>::value || enable_for_short_integers, void>::type
-BroadcastMul4DSlow(const ArithmeticParams& params,
+BroadcastMul6DSlow(const ArithmeticParams& params,
                    const RuntimeShape& unextended_input1_shape,
                    const T* input1_data,
                    const RuntimeShape& unextended_input2_shape,
                    const T* input2_data,
                    const RuntimeShape& unextended_output_shape,
                    T* output_data) {
-  T output_activation_min;
-  T output_activation_max;
-  GetActivationParams(params, &output_activation_min, &output_activation_max);
-
-  TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4);
-  TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4);
-  TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
-  const RuntimeShape output_shape =
-      RuntimeShape::ExtendedShape(4, unextended_output_shape);
-
-  NdArrayDesc<4> desc1;
-  NdArrayDesc<4> desc2;
+  TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 6);
+  TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 6);
+  TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 6);
+  NdArrayDesc<kMaxMulBroadcastDim> desc1;
+  NdArrayDesc<kMaxMulBroadcastDim> desc2;
   NdArrayDescsForElementwiseBroadcast(unextended_input1_shape,
                                       unextended_input2_shape, &desc1, &desc2);
+  const RuntimeShape extended_output_shape =
+      RuntimeShape::ExtendedShape(kMaxMulBroadcastDim, unextended_output_shape);
+  // Cache output shape dimensions.
+  int32_t extended_output_shape_dims[kMaxMulBroadcastDim];
+  std::memcpy(extended_output_shape_dims, extended_output_shape.DimsData(),
+              sizeof(extended_output_shape_dims));
 
   // In Tensorflow, the dimensions are canonically named (batch_number, row,
   // col, channel), with extents (batches, height, width, depth), with the
-  // trailing dimension changing most rapidly (channels has the smallest stride,
-  // typically 1 element).
+  // trailing dimension changing most rapidly (channels has the smallest
+  // stride, typically 1 element).
   //
   // In generated C code, we store arrays with the dimensions reversed. The
   // first dimension has smallest stride.
   //
   // We name our variables by their Tensorflow convention, but generate C code
-  // nesting loops such that the innermost loop has the smallest stride for the
-  // best cache behavior.
-  for (int b = 0; b < output_shape.Dims(0); ++b) {
-    for (int y = 0; y < output_shape.Dims(1); ++y) {
-      for (int x = 0; x < output_shape.Dims(2); ++x) {
-        for (int c = 0; c < output_shape.Dims(3); ++c) {
-          output_data[Offset(output_shape, b, y, x, c)] =
-              ActivationFunctionWithMinMax<T>(
-                  input1_data[SubscriptToIndex(desc1, b, y, x, c)] *
-                      input2_data[SubscriptToIndex(desc2, b, y, x, c)],
-                  output_activation_min, output_activation_max);
-        }
-      }
-    }
-  }
+  // nesting loops such that the innermost loop has the smallest stride for
+  // the best cache behavior.
+  size_t input1_offset = 0;
+  size_t input2_offset = 0;
+  size_t output_offset = 0;
+  BroadcastMulRecursiveDimensions(
+      params, 0, input1_data, input2_data, output_data, &input1_offset,
+      &input2_offset, &output_offset, desc1, desc2, extended_output_shape_dims,
+      [](const ArithmeticParams& params, const T input1_val,
+         const T input2_val) {
+        T output_activation_min;
+        T output_activation_max;
+        GetActivationParams(params, &output_activation_min,
+                            &output_activation_max);
+        return ActivationFunctionWithMinMax<T>(input1_val * input2_val,
+                                               output_activation_min,
+                                               output_activation_max);
+      });
 }
 
-inline void BroadcastMul4DSlow(const ArithmeticParams& params,
+inline void BroadcastMul6DSlow(const ArithmeticParams& params,
                                const RuntimeShape& unextended_input1_shape,
                                const std::complex<float>* input1_data,
                                const RuntimeShape& unextended_input2_shape,
                                const std::complex<float>* input2_data,
                                const RuntimeShape& unextended_output_shape,
                                std::complex<float>* output_data) {
-  TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4);
-  TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4);
-  TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
-  const RuntimeShape output_shape =
-      RuntimeShape::ExtendedShape(4, unextended_output_shape);
+  TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 6);
+  TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 6);
+  TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 6);
 
-  NdArrayDesc<4> desc1;
-  NdArrayDesc<4> desc2;
+  NdArrayDesc<kMaxMulBroadcastDim> desc1;
+  NdArrayDesc<kMaxMulBroadcastDim> desc2;
   NdArrayDescsForElementwiseBroadcast(unextended_input1_shape,
                                       unextended_input2_shape, &desc1, &desc2);
+  const RuntimeShape extended_output_shape =
+      RuntimeShape::ExtendedShape(kMaxMulBroadcastDim, unextended_output_shape);
+  // Cache output shape dimensions.
+  int32_t extended_output_shape_dims[kMaxMulBroadcastDim];
+  std::memcpy(extended_output_shape_dims, extended_output_shape.DimsData(),
+              sizeof(extended_output_shape_dims));
 
-  for (int b = 0; b < output_shape.Dims(0); ++b) {
-    for (int y = 0; y < output_shape.Dims(1); ++y) {
-      for (int x = 0; x < output_shape.Dims(2); ++x) {
-        for (int c = 0; c < output_shape.Dims(3); ++c) {
-          output_data[Offset(output_shape, b, y, x, c)] =
-              input1_data[SubscriptToIndex(desc1, b, y, x, c)] *
-              input2_data[SubscriptToIndex(desc2, b, y, x, c)];
-        }
-      }
-    }
-  }
+  size_t input1_offset = 0;
+  size_t input2_offset = 0;
+  size_t output_offset = 0;
+  BroadcastMulRecursiveDimensions(
+      params, 0, input1_data, input2_data, output_data, &input1_offset,
+      &input2_offset, &output_offset, desc1, desc2, extended_output_shape_dims,
+      [](const ArithmeticParams& params, const std::complex<float> input1_val,
+         const std::complex<float> input2_val) {
+        return input1_val * input2_val;
+      });
+}
+
+template <typename T>
+inline void BroadcastMul4DSlow(
+    const ArithmeticParams& params, const RuntimeShape& input1_shape,
+    const T* input1_data, const RuntimeShape& input2_shape,
+    const T* input2_data, const RuntimeShape& output_shape, T* output_data) {
+  return BroadcastMul6DSlow(params, input1_shape, input1_data, input2_shape,
+                            input2_data, output_shape, output_data);
 }
 
 }  // namespace reference_ops
diff --git a/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cc b/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cc
index d386203..d529463 100644
--- a/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cc
+++ b/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cc
@@ -37,6 +37,7 @@
 const int32_t kInt16Min = std::numeric_limits<int16_t>::min();
 }  // namespace
 
+// LINT.IfChange(portable_symmetric_quantize_floats)
 void PortableSymmetricQuantizeFloats(const float* values, const int size,
                                      int8_t* quantized_values, float* min_value,
                                      float* max_value, float* scaling_factor) {
@@ -68,6 +69,7 @@
         std::min(kScale, std::max(-kScale, quantized_value)));
   }
 }
+// LINT.ThenChange(//tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/portable_tensor_utils.cc:portable_symmetric_quantize_floats)
 
 void PortableAsymmetricQuantizeFloats(const float* values, const int size,
                                       int8_t* quantized_values,
@@ -157,7 +159,7 @@
       *result += dotprod * batch_scaling_factor;
       ++result;
     }  // for row
-  }    // for batch
+  }  // for batch
 }
 
 void PortableMatrixBatchVectorMultiplyAccumulate(
@@ -200,7 +202,7 @@
       *result += dotprod * scale;
       ++result;
     }  // for row
-  }    // for batch
+  }  // for batch
 }
 
 void PortableSparseMatrixBatchVectorMultiplyAccumulate1x4(
@@ -232,7 +234,8 @@
     const int32_t* __restrict__ indices, int m_rows, int m_cols,
     const int8_t* __restrict__ vector, const int32_t* __restrict__ bias_vector,
     int n_batch, const int32_t input_offset, const int32_t output_multiplier,
-    const int32_t output_shift, const int32_t output_offset,
+    const int32_t output_shift, const int32_t* per_channel_scale,
+    const int32_t* per_channel_shift, const int32_t output_offset,
     const int32_t output_activation_min, const int32_t output_activation_max,
     int8_t* __restrict__ result) {
   const int kBlockSize = 16;
@@ -252,8 +255,10 @@
         }
       }
       const int32_t bias_value = bias_vector != nullptr ? bias_vector[row] : 0;
-      dot_prod = MultiplyByQuantizedMultiplier(dot_prod + bias_value,
-                                               output_multiplier, output_shift);
+      dot_prod = MultiplyByQuantizedMultiplier(
+          dot_prod + bias_value,
+          per_channel_scale ? per_channel_scale[row] : output_multiplier,
+          per_channel_shift ? per_channel_shift[row] : output_shift);
       dot_prod += output_offset;
       result[batch * m_rows + row] =
           static_cast<int8_t>(ActivationFunctionWithMinMax(
@@ -294,7 +299,8 @@
 void PortableSparseMatrixBatchVectorMultiplyAccumulate(
     const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows,
     const int m_cols, const int8_t* __restrict__ vectors,
-    const float* scaling_factors, int n_batch, float* __restrict__ result) {
+    const float* scaling_factors, int n_batch, float* __restrict__ result,
+    const float* per_channel_scale) {
   static const int kBlockSize = 16;
   TFLITE_DCHECK_EQ(  // NOLINT
       m_cols % kBlockSize, 0);
@@ -318,10 +324,14 @@
         for (int c = 0; c < kBlockSize; c++) {
           dotprod += (*row_ptr++) * (*vector_block_ptr++);
         }  // for block
-      }    // for num_nonzero_blocks
-      result[batch * m_rows + row] += dotprod * batch_scaling_factor;
+      }  // for num_nonzero_blocks
+      float scaling_factor = batch_scaling_factor;
+      if (per_channel_scale) {
+        scaling_factor *= per_channel_scale[row];
+      }
+      result[batch * m_rows + row] += dotprod * scaling_factor;
     }  // for row
-  }    // for batch
+  }  // for batch
 }
 
 template <typename T>
diff --git a/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h b/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h
index 0416db0..7c623f7 100644
--- a/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h
+++ b/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h
@@ -116,23 +116,26 @@
     const int32_t* __restrict__ indices, int m_rows, int m_cols,
     const int8_t* __restrict__ vector, const int32_t* __restrict__ bias_vector,
     int n_batch, const int32_t input_offset, const int32_t output_multiplier,
-    const int32_t output_shift, const int32_t output_offset,
+    const int32_t output_shift, const int32_t* per_channel_scale,
+    const int32_t* per_channel_shift, const int32_t output_offset,
     const int32_t output_activation_min, const int32_t output_activation_max,
 
     int8_t* __restrict__ result) {
   PortableSparseMatrixBatchVectorMultiplyAccumulate1x16(
       matrix, segments, indices, m_rows, m_cols, vector, bias_vector, n_batch,
-      input_offset, output_multiplier, output_shift, output_offset,
-      output_activation_min, output_activation_max, result);
+      input_offset, output_multiplier, output_shift, per_channel_scale,
+      per_channel_shift, output_offset, output_activation_min,
+      output_activation_max, result);
 }
 
 void SparseMatrixBatchVectorMultiplyAccumulate(
     const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows,
     const int m_cols, const int8_t* __restrict__ vectors,
-    const float* scaling_factors, int n_batch, float* __restrict__ result) {
+    const float* scaling_factors, int n_batch, float* __restrict__ result,
+    const float* per_channel_scale) {
   PortableSparseMatrixBatchVectorMultiplyAccumulate(
-      matrix, ledger, m_rows, m_cols, vectors, scaling_factors, n_batch,
-      result);
+      matrix, ledger, m_rows, m_cols, vectors, scaling_factors, n_batch, result,
+      per_channel_scale);
 }
 
 void MatrixBatchVectorMultiplyAccumulate(
diff --git a/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h b/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h
index 6c404d5..5e228bb 100644
--- a/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h
+++ b/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h
@@ -40,6 +40,7 @@
   return true;
 }
 
+// LINT.IfChange(portable_symmetric_quantize_floats)
 void PortableSymmetricQuantizeFloats(const float* values, const int size,
                                      int8_t* quantized_values, float* min_value,
                                      float* max_value, float* scaling_factor);
@@ -47,6 +48,7 @@
 void PortableSymmetricQuantizeFloats(const float* values, const int size,
                                      int8_t* quantized_values, float min_value,
                                      float max_value, float* scaling_factor);
+// LINT.ThenChange(//tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/portable_tensor_utils.h:portable_symmetric_quantize_floats)
 
 void PortableAsymmetricQuantizeFloats(const float* values, const int size,
                                       int8_t* quantized_values,
@@ -92,14 +94,16 @@
     const int32_t* __restrict__ indices, int m_rows, int m_cols,
     const int8_t* __restrict__ vector, const int32_t* __restrict__ bias_vector,
     int n_batch, const int32_t input_offset, const int32_t output_multiplier,
-    const int32_t output_shift, const int32_t output_offset,
+    int32_t output_shift, const int32_t* per_channel_scale,
+    const int32_t* per_channel_shift, int32_t output_offset,
     const int32_t output_activation_min, const int32_t output_activation_max,
     int8_t* __restrict__ result);
 
 void PortableSparseMatrixBatchVectorMultiplyAccumulate(
     const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows,
     const int m_cols, const int8_t* __restrict__ vectors,
-    const float* scaling_factors, int n_batch, float* __restrict__ result);
+    const float* scaling_factors, int n_batch, float* __restrict__ result,
+    const float* per_channel_scale);
 
 // Dot product of two vectors.
 float PortableVectorVectorDotProduct(const float* vector1, const float* vector2,
diff --git a/tensorflow/lite/kernels/internal/reference/softmax.h b/tensorflow/lite/kernels/internal/reference/softmax.h
index c09a7ea..2930217 100644
--- a/tensorflow/lite/kernels/internal/reference/softmax.h
+++ b/tensorflow/lite/kernels/internal/reference/softmax.h
@@ -115,6 +115,9 @@
     FixedPoint0 shifted_scale = FixedPoint0::FromRaw(GetReciprocal(
         sum_of_exps.raw(), kAccumulationIntegerBits, &num_bits_over_unit));
 
+    const int exponent = num_bits_over_unit + 31 - (sizeof(OutputT) * 8);
+    TFLITE_CHECK(0 <= exponent && exponent <= 31);
+
     for (int c = 0; c < depth; ++c) {
       int32_t input_diff =
           static_cast<int32_t>(input_data[i * depth + c]) - max_in_row;
@@ -127,8 +130,7 @@
 
         FixedPoint0 exp_in_0 = exp_on_negative_values(scaled_diff_f8);
         int32_t unsat_output = gemmlowp::RoundingDivideByPOT(
-            (shifted_scale * exp_in_0).raw(),
-            num_bits_over_unit + 31 - (sizeof(OutputT) * 8));
+            (shifted_scale * exp_in_0).raw(), exponent);
 
         const int32_t shifted_output =
             unsat_output +
diff --git a/tensorflow/lite/kernels/internal/reference/sub.h b/tensorflow/lite/kernels/internal/reference/sub.h
index d0ebc95..1a74aeb 100644
--- a/tensorflow/lite/kernels/internal/reference/sub.h
+++ b/tensorflow/lite/kernels/internal/reference/sub.h
@@ -18,6 +18,7 @@
 #include <stdint.h>
 
 #include <algorithm>
+#include <cstddef>
 #include <limits>
 
 #include "ruy/profiler/instrumentation.h"  // from @ruy
@@ -29,199 +30,245 @@
 
 namespace reference_ops {
 
-inline void SubNonBroadcast(const ArithmeticParams& params,
-                            const RuntimeShape& input1_shape,
-                            const float* input1_data,
-                            const RuntimeShape& input2_shape,
-                            const float* input2_data,
-                            const RuntimeShape& output_shape,
-                            float* output_data) {
-  const int flat_size =
-      MatchingElementsSize(input1_shape, input2_shape, output_shape);
-  for (int i = 0; i < flat_size; ++i) {
-    output_data[i] = ActivationFunctionWithMinMax(
-        input1_data[i] - input2_data[i], params.float_activation_min,
-        params.float_activation_max);
+template <class T>
+struct SubImpl {
+  template <class F>
+  static void BroadcastInput1(const ArithmeticParams& params,
+                              const T* input1_data, const T* input2_data,
+                              T* output_data, size_t size, F binary_func) {
+    for (size_t c = 0; c < size; ++c) {
+      output_data[c] = binary_func(input1_data[0], input2_data[c], params);
+    }
+  }
+
+  template <class F>
+  static void BroadcastInput2(const ArithmeticParams& params,
+                              const T* input1_data, const T* input2_data,
+                              T* output_data, size_t size, F binary_func) {
+    for (size_t c = 0; c < size; ++c) {
+      output_data[c] = binary_func(input1_data[c], input2_data[0], params);
+    }
+  }
+
+  template <class F>
+  static void ElementWise(const ArithmeticParams& params, const T* input1_data,
+                          const T* input2_data, T* output_data, size_t size,
+                          F binary_func) {
+    for (size_t c = 0; c < size; ++c) {
+      output_data[c] = binary_func(input1_data[c], input2_data[c], params);
+    }
+  }
+};
+
+template <>
+struct SubImpl<int32_t> {
+  template <class F>
+  static void BroadcastInput1(const ArithmeticParams& params,
+                              const int32_t* input1_data,
+                              const int32_t* input2_data, int32_t* output_data,
+                              size_t size, F binary_func) {
+    size_t c = 0;
+    int32_t activation_min, activation_max;
+    GetActivationParams(params, &activation_min, &activation_max);
+#ifdef USE_NEON
+    const int32x4_t vmax = vdupq_n_s32(activation_max);
+    const int32x4_t vmin = vdupq_n_s32(activation_min);
+    const int32x4_t va = vdupq_n_s32(input1_data[0]);
+    for (; c + 4 <= size; c += 4) {
+      const int32x4_t vb = vld1q_s32(&input2_data[c]);
+      int32x4_t vres = vsubq_s32(va, vb);
+      vres = vmaxq_s32(vmin, vres);
+      vres = vminq_s32(vmax, vres);
+      vst1q_s32(&output_data[c], vres);
+    }
+#endif
+    for (; c < size; ++c) {
+      output_data[c] = binary_func(input1_data[0], input2_data[c], params);
+    }
+  }
+
+  template <class F>
+  static void BroadcastInput2(const ArithmeticParams& params,
+                              const int32_t* input1_data,
+                              const int32_t* input2_data, int32_t* output_data,
+                              size_t size, F binary_func) {
+    size_t c = 0;
+    int32_t activation_min, activation_max;
+    GetActivationParams(params, &activation_min, &activation_max);
+#ifdef USE_NEON
+    const int32x4_t vmax = vdupq_n_s32(activation_max);
+    const int32x4_t vmin = vdupq_n_s32(activation_min);
+    const int32x4_t vb = vdupq_n_s32(input2_data[0]);
+    for (; c + 4 <= size; c += 4) {
+      const int32x4_t va = vld1q_s32(&input1_data[c]);
+      int32x4_t vres = vsubq_s32(va, vb);
+      vres = vmaxq_s32(vmin, vres);
+      vres = vminq_s32(vmax, vres);
+      vst1q_s32(&output_data[c], vres);
+    }
+#endif
+    for (; c < size; ++c) {
+      output_data[c] = binary_func(input1_data[c], input2_data[0], params);
+    }
+  }
+
+  template <class F>
+  static void ElementWise(const ArithmeticParams& params,
+                          const int32_t* input1_data,
+                          const int32_t* input2_data, int32_t* output_data,
+                          size_t size, F binary_func) {
+    size_t c = 0;
+    int32_t activation_min, activation_max;
+    GetActivationParams(params, &activation_min, &activation_max);
+#ifdef USE_NEON
+    int32x4_t vmax = vdupq_n_s32(activation_max);
+    int32x4_t vmin = vdupq_n_s32(activation_min);
+    for (; c + 4 <= size; c += 4) {
+      const int32x4_t va = vld1q_s32(&input1_data[c]);
+      const int32x4_t vb = vld1q_s32(&input2_data[c]);
+      int32x4_t vres = vsubq_s32(va, vb);
+      vres = vmaxq_s32(vmin, vres);
+      vres = vminq_s32(vmax, vres);
+      vst1q_s32(&output_data[c], vres);
+    }
+#endif
+    for (; c < size; ++c) {
+      output_data[c] = binary_func(input1_data[c], input2_data[c], params);
+    }
+  }
+};
+
+template <typename T, typename F>
+inline void BroadcastSubRecursiveDimensions(
+    int dimension, const ArithmeticParams& params, const T* input1_data,
+    const T* input2_data, T* output_data, size_t* input1_offset_p,
+    size_t* input2_offset_p, size_t* output_offset,
+    size_t* compressed_input1_stride, size_t* compressed_input2_stride,
+    size_t* compressed_output_shape, F binary_func) {
+  if (dimension > 0) {
+    for (size_t c = 0; c < compressed_output_shape[dimension]; ++c) {
+      size_t input1_offset_c = *input1_offset_p;
+      size_t input2_offset_c = *input2_offset_p;
+      BroadcastSubRecursiveDimensions(
+          dimension - 1, params, input1_data, input2_data, output_data,
+          &input1_offset_c, &input2_offset_c, output_offset,
+          compressed_input1_stride, compressed_input2_stride,
+          compressed_output_shape, binary_func);
+      *input1_offset_p += compressed_input1_stride[dimension];
+      *input2_offset_p += compressed_input2_stride[dimension];
+    }
+  } else {
+    TFLITE_DCHECK(dimension == 0);
+    bool input1_is_broadcast = compressed_input1_stride[dimension] == 0;
+    bool input2_is_broadcast = compressed_input2_stride[dimension] == 0;
+    TFLITE_DCHECK(!(input1_is_broadcast && input2_is_broadcast));
+    const T* input1_data_ptr = input1_data + *input1_offset_p;
+    const T* input2_data_ptr = input2_data + *input2_offset_p;
+    T* output_data_ptr = output_data + *output_offset;
+    if (input1_is_broadcast) {
+      // input1 is broadcast.
+      SubImpl<T>::BroadcastInput1(
+          params, input1_data_ptr, input2_data_ptr, output_data_ptr,
+          compressed_output_shape[dimension], binary_func);
+      *input2_offset_p += compressed_output_shape[dimension];
+    } else if (input2_is_broadcast) {
+      // input2 is broadcast.
+      SubImpl<T>::BroadcastInput2(
+          params, input1_data_ptr, input2_data_ptr, output_data_ptr,
+          compressed_output_shape[dimension], binary_func);
+      *input1_offset_p += compressed_output_shape[dimension];
+    } else {
+      // Add element-wise.
+      SubImpl<T>::ElementWise(params, input1_data_ptr, input2_data_ptr,
+                              output_data_ptr,
+                              compressed_output_shape[dimension], binary_func);
+      *input1_offset_p += compressed_output_shape[dimension];
+      *input2_offset_p += compressed_output_shape[dimension];
+    }
+    *output_offset += compressed_output_shape[dimension];
   }
 }
 
-inline void SubNonBroadcast(const ArithmeticParams& params,
-                            const RuntimeShape& input1_shape,
-                            const int32_t* input1_data,
-                            const RuntimeShape& input2_shape,
-                            const int32_t* input2_data,
-                            const RuntimeShape& output_shape,
-                            int32_t* output_data) {
-  const int flat_size =
-      MatchingElementsSize(input1_shape, input2_shape, output_shape);
-  for (int i = 0; i < flat_size; ++i) {
-    output_data[i] = ActivationFunctionWithMinMax(
-        input1_data[i] - input2_data[i], params.quantized_activation_min,
-        params.quantized_activation_max);
+// TODO: b/296510380 - we may be able to factor out this to common.h for all
+// binary arithmetic ops (add, sub, mul).
+template <typename T, typename F>
+inline void BroadcastSubCommon(const ArithmeticParams& params,
+                               const RuntimeShape& input1_shape,
+                               const T* input1_data,
+                               const RuntimeShape& input2_shape,
+                               const T* input2_data,
+                               const RuntimeShape& output_shape, T* output_data,
+                               F binary_func) {
+  constexpr int kMaxBroadcastDim = 6;
+  TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), kMaxBroadcastDim);
+  TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), kMaxBroadcastDim);
+  TFLITE_DCHECK_LE(output_shape.DimensionsCount(), kMaxBroadcastDim);
+
+  // In Tensorflow, the dimensions are canonically named (batch_number, row,
+  // col, channel), with extents (batches, height, width, depth), with the
+  // trailing dimension changing most rapidly (channels has the smallest stride,
+  // typically 1 element).
+  //
+  // In generated C code, we store arrays with the dimensions reversed. The
+  // first dimension has smallest stride.
+  //
+  // We name our variables by their Tensorflow convention, but generate C code
+  // nesting loops such that the innermost loop has the smallest stride for the
+  // best cache behavior.
+
+  // In Tensorflow, the dimensions are canonically named (batch_number, row,
+  // col, channel), with extents (batches, height, width, depth), with the
+  // trailing dimension changing most rapidly (channels has the smallest stride,
+  // typically 1 element).
+  //
+  // In generated C code, we store arrays with the dimensions reversed. The
+  // first dimension has smallest stride.
+  //
+  // We name our variables by their Tensorflow convention, but generate C code
+  // nesting loops such that the innermost loop has the smallest stride for the
+  // best cache behavior.
+
+  size_t compressed_input1_stride[kMaxBroadcastDim];
+  size_t compressed_input2_stride[kMaxBroadcastDim];
+  size_t compressed_output_shape[kMaxBroadcastDim];
+  bool broadcastable_shape = ReduceDimensionsForBroadcast<kMaxBroadcastDim>(
+      input1_shape, input2_shape, compressed_input1_stride,
+      compressed_input2_stride, compressed_output_shape);
+  // Skip broadcasting for degenerate shapes.
+  if (!broadcastable_shape) {
+    return;
   }
+
+  size_t input1_offset = 0;
+  size_t input2_offset = 0;
+  size_t output_offset = 0;
+  BroadcastSubRecursiveDimensions(
+      kMaxBroadcastDim - 1, params, input1_data, input2_data, output_data,
+      &input1_offset, &input2_offset, &output_offset, compressed_input1_stride,
+      compressed_input2_stride, compressed_output_shape, binary_func);
 }
 
 // TODO(b/151345304): We can implement BroadcastSub on buffers of arbitrary
 // dimensionality if the runtime code does a single loop over one dimension
 // that handles broadcasting as the base case. The code generator would then
 // generate max(D1, D2) nested for loops.
-template <int N = 5>
-inline void BroadcastSubSlow(const ArithmeticParams& params,
-                             const RuntimeShape& input1_shape,
-                             const float* input1_data,
-                             const RuntimeShape& input2_shape,
-                             const float* input2_data,
-                             const RuntimeShape& output_shape,
-                             float* output_data) {
-  ruy::profiler::ScopeLabel label("BroadcastSubSlow/float");
-  TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), N);
-  TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), N);
-  TFLITE_DCHECK_LE(output_shape.DimensionsCount(), N);
-  NdArrayDesc<N> desc1;
-  NdArrayDesc<N> desc2;
-  NdArrayDesc<N> output_desc;
-  NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
-                                      &desc2);
-  CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc);
-
-  // In Tensorflow, the dimensions are canonically named (batch_number, row,
-  // col, channel), with extents (batches, height, width, depth), with the
-  // trailing dimension changing most rapidly (channels has the smallest stride,
-  // typically 1 element).
-  //
-  // In generated C code, we store arrays with the dimensions reversed. The
-  // first dimension has smallest stride.
-  //
-  // We name our variables by their Tensorflow convention, but generate C code
-  // nesting loops such that the innermost loop has the smallest stride for the
-  // best cache behavior.
-  auto sub_func = [&](int indexes[N]) {
-    output_data[SubscriptToIndex(output_desc, indexes)] =
-        ActivationFunctionWithMinMax(
-            input1_data[SubscriptToIndex(desc1, indexes)] -
-                input2_data[SubscriptToIndex(desc2, indexes)],
-            params.float_activation_min, params.float_activation_max);
-  };
-  NDOpsHelper<N>(output_desc, sub_func);
-}
-
-template <int N = 5>
-inline void BroadcastSubSlow(const ArithmeticParams& params,
-                             const RuntimeShape& input1_shape,
-                             const int32_t* input1_data,
-                             const RuntimeShape& input2_shape,
-                             const int32_t* input2_data,
-                             const RuntimeShape& output_shape,
-                             int32_t* output_data) {
-  ruy::profiler::ScopeLabel label("BroadcastSubSlow/int32_t");
-  TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), N);
-  TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), N);
-  TFLITE_DCHECK_LE(output_shape.DimensionsCount(), N);
-  NdArrayDesc<N> desc1;
-  NdArrayDesc<N> desc2;
-  NdArrayDesc<N> output_desc;
-  NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
-                                      &desc2);
-  CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc);
-
-  // In Tensorflow, the dimensions are canonically named (batch_number, row,
-  // col, channel), with extents (batches, height, width, depth), with the
-  // trailing dimension changing most rapidly (channels has the smallest stride,
-  // typically 1 element).
-  //
-  // In generated C code, we store arrays with the dimensions reversed. The
-  // first dimension has smallest stride.
-  //
-  // We name our variables by their Tensorflow convention, but generate C code
-  // nesting loops such that the innermost loop has the smallest stride for the
-  // best cache behavior.
-  auto sub_func = [&](int indexes[N]) {
-    output_data[SubscriptToIndex(output_desc, indexes)] =
-        ActivationFunctionWithMinMax(
-            input1_data[SubscriptToIndex(desc1, indexes)] -
-                input2_data[SubscriptToIndex(desc2, indexes)],
-            params.quantized_activation_min, params.quantized_activation_max);
-  };
-  NDOpsHelper<N>(output_desc, sub_func);
-}
-
-template <int N = 5>
-void BroadcastSubSlow(const ArithmeticParams& params,
-                      const RuntimeShape& input1_shape,
-                      const int64_t* input1_data,
-                      const RuntimeShape& input2_shape,
-                      const int64_t* input2_data,
-                      const RuntimeShape& output_shape, int64_t* output_data) {
-  ruy::profiler::ScopeLabel label("BroadcastSubSlow/int64_t");
-  TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), N);
-  TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), N);
-  TFLITE_DCHECK_LE(output_shape.DimensionsCount(), N);
-  NdArrayDesc<N> desc1;
-  NdArrayDesc<N> desc2;
-  NdArrayDesc<N> output_desc;
-  NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
-                                      &desc2);
-  CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc);
-
-  // In Tensorflow, the dimensions are canonically named (batch_number, row,
-  // col, channel), with extents (batches, height, width, depth), with the
-  // trailing dimension changing most rapidly (channels has the smallest stride,
-  // typically 1 element).
-  //
-  // In generated C code, we store arrays with the dimensions reversed. The
-  // first dimension has smallest stride.
-  //
-  // We name our variables by their Tensorflow convention, but generate C code
-  // nesting loops such that the innermost loop has the smallest stride for the
-  // best cache behavior.
-  auto sub_func = [&](int indexes[N]) {
-    output_data[SubscriptToIndex(output_desc, indexes)] =
-        ActivationFunctionWithMinMax(
-            input1_data[SubscriptToIndex(desc1, indexes)] -
-                input2_data[SubscriptToIndex(desc2, indexes)],
-            params.int64_activation_min, params.int64_activation_max);
-  };
-  NDOpsHelper<N>(output_desc, sub_func);
-}
-
-template <typename T, int N = 5>
+template <typename T>
 void BroadcastSubSlow(const ArithmeticParams& params,
                       const RuntimeShape& input1_shape, const T* input1_data,
                       const RuntimeShape& input2_shape, const T* input2_data,
                       const RuntimeShape& output_shape, T* output_data) {
-  ruy::profiler::ScopeLabel label("BroadcastSubSlow/templated");
-  TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), N);
-  TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), N);
-  TFLITE_DCHECK_LE(output_shape.DimensionsCount(), N);
-  NdArrayDesc<N> desc1;
-  NdArrayDesc<N> desc2;
-  NdArrayDesc<N> output_desc;
-  NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
-                                      &desc2);
-  CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc);
-
-  // In Tensorflow, the dimensions are canonically named (batch_number, row,
-  // col, channel), with extents (batches, height, width, depth), with the
-  // trailing dimension changing most rapidly (channels has the smallest stride,
-  // typically 1 element).
-  //
-  // In generated C code, we store arrays with the dimensions reversed. The
-  // first dimension has smallest stride.
-  //
-  // We name our variables by their Tensorflow convention, but generate C code
-  // nesting loops such that the innermost loop has the smallest stride for the
-  // best cache behavior.
-  auto sub_func = [&](int indexes[N]) {
-    output_data[SubscriptToIndex(output_desc, indexes)] =
-        ActivationFunctionWithMinMax(
-            input1_data[SubscriptToIndex(desc1, indexes)] -
-                input2_data[SubscriptToIndex(desc2, indexes)],
-            params.quantized_activation_min, params.quantized_activation_max);
-  };
-  NDOpsHelper<N>(output_desc, sub_func);
+  ruy::profiler::ScopeLabel label("BroadcastSubSlow/T");
+  BroadcastSubCommon<T>(
+      params, input1_shape, input1_data, input2_shape, input2_data,
+      output_shape, output_data,
+      [](T input1_val, T input2_val, const ArithmeticParams& params) {
+        T activation_min, activation_max;
+        GetActivationParams(params, &activation_min, &activation_max);
+        return ActivationFunctionWithMinMax(input1_val - input2_val,
+                                            activation_min, activation_max);
+      });
 }
 
-template <int N = 5>
 inline void BroadcastSub16POTSlow(const ArithmeticParams& params,
                                   const RuntimeShape& input1_shape,
                                   const int16_t* input1_data,
@@ -230,42 +277,24 @@
                                   const RuntimeShape& output_shape,
                                   int16_t* output_data) {
   ruy::profiler::ScopeLabel label("BroadcastSub16POTSlow/int16_t");
-  NdArrayDesc<N> desc1;
-  NdArrayDesc<N> desc2;
-  NdArrayDesc<N> output_desc;
-  NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
-                                      &desc2);
-  CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc);
-
-  // In Tensorflow, the dimensions are canonically named (batch_number, row,
-  // col, channel), with extents (batches, height, width, depth), with the
-  // trailing dimension changing most rapidly (channels has the smallest stride,
-  // typically 1 element).
-  //
-  // In generated C code, we store arrays with the dimensions reversed. The
-  // first dimension has smallest stride.
-  //
-  // We name our variables by their Tensorflow convention, but generate C code
-  // nesting loops such that the innermost loop has the smallest stride for the
-  // best cache behavior.
-  auto sub_func = [&](int indexes[N]) {
-    const int32_t input1_val = input1_data[SubscriptToIndex(desc1, indexes)];
-    const int32_t input2_val = input2_data[SubscriptToIndex(desc2, indexes)];
-    const int32_t scaled_input1_val =
-        gemmlowp::RoundingDivideByPOT(input1_val, -params.input1_shift);
-    const int32_t scaled_input2_val =
-        gemmlowp::RoundingDivideByPOT(input2_val, -params.input2_shift);
-    const int32_t raw_output = scaled_input1_val - scaled_input2_val;
-    const int32_t clamped_output =
-        std::min(params.quantized_activation_max,
-                 std::max(params.quantized_activation_min, raw_output));
-    output_data[SubscriptToIndex(output_desc, indexes)] =
-        static_cast<int16_t>(clamped_output);
-  };
-  NDOpsHelper<N>(output_desc, sub_func);
+  BroadcastSubCommon<int16_t>(
+      params, input1_shape, input1_data, input2_shape, input2_data,
+      output_shape, output_data,
+      [](int16_t input1_val, int16_t input2_val,
+         const ArithmeticParams& params) {
+        const int32_t scaled_input1_val =
+            gemmlowp::RoundingDivideByPOT(input1_val, -params.input1_shift);
+        const int32_t scaled_input2_val =
+            gemmlowp::RoundingDivideByPOT(input2_val, -params.input2_shift);
+        const int32_t raw_output = scaled_input1_val - scaled_input2_val;
+        const int32_t clamped_output =
+            std::min(params.quantized_activation_max,
+                     std::max(params.quantized_activation_min, raw_output));
+        return static_cast<int16_t>(clamped_output);
+      });
 }
 
-template <typename T, int N = 5>
+template <typename T>
 void BroadcastQuantSubSlow(const ArithmeticParams& params,
                            const RuntimeShape& input1_shape,
                            const T* input1_data,
@@ -273,52 +302,32 @@
                            const T* input2_data,
                            const RuntimeShape& output_shape, T* output_data) {
   ruy::profiler::ScopeLabel label("BroadcastQuantSubSlow/T");
-  TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), N);
-  TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), N);
-  TFLITE_DCHECK_LE(output_shape.DimensionsCount(), N);
-  NdArrayDesc<N> desc1;
-  NdArrayDesc<N> desc2;
-  NdArrayDesc<N> output_desc;
-  NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
-                                      &desc2);
-  CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc);
-
-  // In Tensorflow, the dimensions are canonically named (batch_number, row,
-  // col, channel), with extents (batches, height, width, depth), with the
-  // trailing dimension changing most rapidly (channels has the smallest stride,
-  // typically 1 element).
-  //
-  // In generated C code, we store arrays with the dimensions reversed. The
-  // first dimension has smallest stride.
-  //
-  // We name our variables by their Tensorflow convention, but generate C code
-  // nesting loops such that the innermost loop has the smallest stride for the
-  // best cache behavior.
-  auto sub_func = [&](int indexes[N]) {
-    const int32_t input1_val =
-        params.input1_offset + input1_data[SubscriptToIndex(desc1, indexes)];
-    const int32_t input2_val =
-        params.input2_offset + input2_data[SubscriptToIndex(desc2, indexes)];
-    const int32_t shifted_input1_val = input1_val * (1 << params.left_shift);
-    const int32_t shifted_input2_val = input2_val * (1 << params.left_shift);
-    const int32_t scaled_input1_val =
-        MultiplyByQuantizedMultiplierSmallerThanOneExp(
-            shifted_input1_val, params.input1_multiplier, params.input1_shift);
-    const int32_t scaled_input2_val =
-        MultiplyByQuantizedMultiplierSmallerThanOneExp(
-            shifted_input2_val, params.input2_multiplier, params.input2_shift);
-    const int32_t raw_sub = scaled_input1_val - scaled_input2_val;
-    const int32_t raw_output =
-        MultiplyByQuantizedMultiplierSmallerThanOneExp(
-            raw_sub, params.output_multiplier, params.output_shift) +
-        params.output_offset;
-    const int32_t clamped_output =
-        std::min(params.quantized_activation_max,
-                 std::max(params.quantized_activation_min, raw_output));
-    output_data[SubscriptToIndex(output_desc, indexes)] =
-        static_cast<T>(clamped_output);
-  };
-  NDOpsHelper<N>(output_desc, sub_func);
+  BroadcastSubCommon<T>(
+      params, input1_shape, input1_data, input2_shape, input2_data,
+      output_shape, output_data,
+      [](T input1_val, T input2_val, const ArithmeticParams& params) {
+        const int32_t shifted_input1_val =
+            (params.input1_offset + input1_val) * (1 << params.left_shift);
+        const int32_t shifted_input2_val =
+            (params.input2_offset + input2_val) * (1 << params.left_shift);
+        const int32_t scaled_input1_val =
+            MultiplyByQuantizedMultiplierSmallerThanOneExp(
+                shifted_input1_val, params.input1_multiplier,
+                params.input1_shift);
+        const int32_t scaled_input2_val =
+            MultiplyByQuantizedMultiplierSmallerThanOneExp(
+                shifted_input2_val, params.input2_multiplier,
+                params.input2_shift);
+        const int32_t raw_sub = scaled_input1_val - scaled_input2_val;
+        const int32_t raw_output =
+            MultiplyByQuantizedMultiplierSmallerThanOneExp(
+                raw_sub, params.output_multiplier, params.output_shift) +
+            params.output_offset;
+        const int32_t clamped_output =
+            std::min(params.quantized_activation_max,
+                     std::max(params.quantized_activation_min, raw_output));
+        return static_cast<T>(clamped_output);
+      });
 }
 
 // Element-wise add that can often be used for inner loop of broadcast add as
@@ -405,35 +414,12 @@
          const T* input1_data, const RuntimeShape& input2_shape,
          const T* input2_data, const RuntimeShape& output_shape,
          T* output_data) {
-  NdArrayDesc<4> desc1;
-  NdArrayDesc<4> desc2;
-  NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
-                                      &desc2);
-  const RuntimeShape extended_output_shape =
-      RuntimeShape::ExtendedShape(4, output_shape);
-
-  // In Tensorflow, the dimensions are canonically named (batch_number, row,
-  // col, channel), with extents (batches, height, width, depth), with the
-  // trailing dimension changing most rapidly (channels has the smallest stride,
-  // typically 1 element).
-  //
-  // In generated C code, we store arrays with the dimensions reversed. The
-  // first dimension has smallest stride.
-  //
-  // We name our variables by their Tensorflow convention, but generate C code
-  // nesting loops such that the innermost loop has the smallest stride for the
-  // best cache behavior.
-  for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
-    for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
-      for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
-        for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
-          output_data[Offset(extended_output_shape, b, y, x, c)] =
-              input1_data[SubscriptToIndex(desc1, b, y, x, c)] -
-              input2_data[SubscriptToIndex(desc2, b, y, x, c)];
-        }
-      }
-    }
-  }
+  BroadcastSubCommon<T>(
+      params, input1_shape, input1_data, input2_shape, input2_data,
+      output_shape, output_data,
+      [](T input1_val, T input2_val, const ArithmeticParams& params) {
+        return input1_val - input2_val;
+      });
 }
 
 inline void SetActivationMinMax(const ArithmeticParams& params,
diff --git a/tensorflow/lite/kernels/internal/reference/transpose_conv.h b/tensorflow/lite/kernels/internal/reference/transpose_conv.h
index 8a51e0f..744ed0f 100644
--- a/tensorflow/lite/kernels/internal/reference/transpose_conv.h
+++ b/tensorflow/lite/kernels/internal/reference/transpose_conv.h
@@ -219,6 +219,103 @@
   }
 }
 
+inline void HybridTransposeConv(
+    const ConvParams& params, float* scaling_factors_ptr,
+    const RuntimeShape& input_shape, const int8_t* input_data,
+    const RuntimeShape& filter_shape, const int8_t* filter_data,
+    const RuntimeShape& bias_shape, const float* bias_data,
+    const RuntimeShape& output_shape, float* output_data,
+    const float* per_channel_scale, int32_t* input_offset) {
+  const int stride_width = params.stride_width;
+  const int stride_height = params.stride_height;
+  const int pad_width = params.padding_values.width;
+  const int pad_height = params.padding_values.height;
+  TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
+  TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
+  TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
+
+  const int batches = MatchingDim(input_shape, 0, output_shape, 0);
+  const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3);
+  const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3);
+  const int input_height = input_shape.Dims(1);
+  const int input_width = input_shape.Dims(2);
+  const int filter_height = filter_shape.Dims(1);
+  const int filter_width = filter_shape.Dims(2);
+  const int output_height = output_shape.Dims(1);
+  const int output_width = output_shape.Dims(2);
+  const float output_activation_min = params.float_activation_min;
+  const float output_activation_max = params.float_activation_max;
+  if (bias_data) {
+    TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
+  }
+
+  // Although transpose convolution simplifies to convolution with transposed
+  // weights for strides of 1, non-unitary striding complicates matters. To
+  // keep this reference implementation as clear as possible, we use a
+  // "scatter" access pattern, where we loop through all the input elements,
+  // computing their influence on the output, rather than looping through the
+  // output elements in the typical "gather" access pattern of a conv. We
+  // therefore must initialize the output array to zero.
+  const int num_elements = output_shape.FlatSize();
+  for (int i = 0; i < num_elements; i++) {
+    output_data[i] = 0.0f;
+  }
+
+  // Loop through input elements one at a time.
+  for (int batch = 0; batch < batches; ++batch) {
+    const float scaling_factor = scaling_factors_ptr[batch];
+    for (int in_y = 0; in_y < input_height; ++in_y) {
+      for (int in_x = 0; in_x < input_width; ++in_x) {
+        for (int in_channel = 0; in_channel < input_depth; ++in_channel) {
+          // Loop through the output elements it will influence
+          const int out_x_origin = (in_x * stride_width) - pad_width;
+          const int out_y_origin = (in_y * stride_height) - pad_height;
+          for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
+            for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
+              for (int out_channel = 0; out_channel < output_depth;
+                   ++out_channel) {
+                // Compute output element location
+                const int out_x = out_x_origin + filter_x;
+                const int out_y = out_y_origin + filter_y;
+                // We cannot accumulate out of bounds
+                if ((out_x >= 0) && (out_x < output_width) && (out_y >= 0) &&
+                    (out_y < output_height)) {
+                  int32_t input_value = input_data[Offset(
+                      input_shape, batch, in_y, in_x, in_channel)];
+                  int32_t filter_value =
+                      filter_data[Offset(filter_shape, out_channel, filter_y,
+                                         filter_x, in_channel)];
+                  int32_t acc =
+                      (input_value - input_offset[batch]) * filter_value;
+                  output_data[Offset(output_shape, batch, out_y, out_x,
+                                     out_channel)] +=
+                      acc * per_channel_scale[out_channel] * scaling_factor;
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+
+  for (int batch = 0; batch < batches; ++batch) {
+    for (int out_y = 0; out_y < output_height; ++out_y) {
+      for (int out_x = 0; out_x < output_width; ++out_x) {
+        for (int out_channel = 0; out_channel < output_depth; ++out_channel) {
+          float acc = output_data[Offset(output_shape, batch, out_y, out_x,
+                                         out_channel)];
+          if (bias_data) acc += bias_data[out_channel];
+
+          output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] =
+              ActivationFunctionWithMinMax(acc, output_activation_min,
+                                           output_activation_max);
+        }
+      }
+    }
+  }
+}
+
 }  // namespace reference_ops
 }  // namespace tflite
 
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.cc b/tensorflow/lite/kernels/internal/runtime_shape.cc
similarity index 67%
copy from tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.cc
copy to tensorflow/lite/kernels/internal/runtime_shape.cc
index e2cf661..dd12278 100644
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.cc
+++ b/tensorflow/lite/kernels/internal/runtime_shape.cc
@@ -1,4 +1,4 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -13,11 +13,11 @@
 limitations under the License.
 ==============================================================================*/
 
-#include "tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.h"
+#include "tensorflow/lite/kernels/internal/runtime_shape.h"
 
-const char* kCategoryLabels[kCategoryCount] = {
-    "silence",
-    "unknown",
-    "yes",
-    "no",
-};
+namespace tflite {
+
+// Defining a constexpr static class member is necessary in C++11
+constexpr int tflite::RuntimeShape::kMaxSmallSize;
+
+}  // namespace tflite
diff --git a/tensorflow/lite/kernels/internal/runtime_shape.h b/tensorflow/lite/kernels/internal/runtime_shape.h
index 0e4df2c..bc786bd 100644
--- a/tensorflow/lite/kernels/internal/runtime_shape.h
+++ b/tensorflow/lite/kernels/internal/runtime_shape.h
@@ -15,6 +15,8 @@
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_RUNTIME_SHAPE_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_RUNTIME_SHAPE_H_
 
+#include <cstring>
+
 #include "tensorflow/lite/kernels/internal/compatibility.h"
 
 namespace tflite {
diff --git a/tensorflow/lite/kernels/internal/types.h b/tensorflow/lite/kernels/internal/types.h
index b775ca8..f2cc160 100644
--- a/tensorflow/lite/kernels/internal/types.h
+++ b/tensorflow/lite/kernels/internal/types.h
@@ -157,7 +157,8 @@
 };
 
 // Gets next index to iterate through a multidimensional array.
-inline bool NextIndex(const int num_dims, const int* dims, int* current) {
+template <typename IndexType = int>
+inline bool NextIndex(const int num_dims, const int* dims, IndexType* current) {
   if (num_dims == 0) {
     return false;
   }
@@ -165,7 +166,7 @@
   TFLITE_DCHECK(current != nullptr);
   int carry = 1;
   for (int idx = num_dims - 1; idx >= 0; --idx) {
-    int current_val = current[idx] + carry;
+    IndexType current_val = current[idx] + carry;
     TFLITE_DCHECK_GE(dims[idx], current_val);
     if (dims[idx] == current_val) {
       current[idx] = 0;
diff --git a/tensorflow/lite/kernels/kernel_util.cc b/tensorflow/lite/kernels/kernel_util.cc
index 58fd99f..39f7bc7 100644
--- a/tensorflow/lite/kernels/kernel_util.cc
+++ b/tensorflow/lite/kernels/kernel_util.cc
@@ -572,12 +572,11 @@
 bool IsMobilePlatform() {
 #if defined(ANDROID) || defined(__ANDROID__)
   return true;
-#elif defined(__APPLE__)
-#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
+#elif defined(__APPLE__) && (TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE)
   return true;
-#endif
-#endif
+#else
   return false;
+#endif
 }
 
 bool HasUnspecifiedDimension(const TfLiteTensor* tensor) {
diff --git a/tensorflow/lite/micro/BUILD b/tensorflow/lite/micro/BUILD
index 13cb8de..58ea22d 100644
--- a/tensorflow/lite/micro/BUILD
+++ b/tensorflow/lite/micro/BUILD
@@ -39,8 +39,8 @@
     deps = [
         ":memory_helpers",
         ":micro_allocator",
-        ":micro_context",
-        ":micro_graph",
+        ":micro_interpreter_context",
+        ":micro_interpreter_graph",
         ":micro_profiler_interface",
         ":op_resolvers",
         "//tensorflow/lite:type_to_tflitetype",
@@ -63,9 +63,27 @@
     ],
     copts = micro_copts(),
     deps = [
+        ":micro_common",
+        ":micro_graph",
+        ":micro_log",
+        "//tensorflow/lite/c:common",
+    ],
+)
+
+cc_library(
+    name = "micro_interpreter_context",
+    srcs = [
+        "micro_interpreter_context.cc",
+    ],
+    hdrs = [
+        "micro_interpreter_context.h",
+    ],
+    copts = micro_copts(),
+    deps = [
         ":memory_helpers",
         ":micro_allocator",
-        ":micro_graph",
+        ":micro_context",
+        ":micro_interpreter_graph",
         ":micro_log",
         ":micro_profiler_interface",
         "//tensorflow/lite/c:common",
@@ -94,22 +112,36 @@
     copts = micro_copts(),
     deps = [
         ":memory_helpers",
-        ":micro_allocator",
+        ":micro_arena_constants",
         ":micro_context",
         ":micro_log",
         ":mock_micro_graph",
         "//tensorflow/lite/c:common",
+        "//tensorflow/lite/micro/arena_allocator:simple_memory_allocator",
     ],
 )
 
 cc_library(
     name = "micro_graph",
-    srcs = ["micro_graph.cc"],
     hdrs = ["micro_graph.h"],
+    copts = micro_copts(),
+    deps = [
+        ":micro_common",
+        ":micro_resource_variable",
+        "//tensorflow/lite/kernels/internal:compatibility",
+    ],
+)
+
+cc_library(
+    name = "micro_interpreter_graph",
+    srcs = ["micro_interpreter_graph.cc"],
+    hdrs = ["micro_interpreter_graph.h"],
+    copts = micro_copts(),
     deps = [
         ":memory_helpers",
         ":micro_allocator",
         ":micro_common",
+        ":micro_graph",
         ":micro_log",
         ":micro_profiler",
         ":micro_resource_variable",
@@ -124,6 +156,7 @@
     name = "mock_micro_graph",
     srcs = ["mock_micro_graph.cc"],
     hdrs = ["mock_micro_graph.h"],
+    copts = micro_copts(),
     deps = [
         ":micro_allocator",
         ":micro_graph",
@@ -158,6 +191,7 @@
         "//tensorflow/lite/micro/arena_allocator:persistent_arena_buffer_allocator",
         "//tensorflow/lite/micro/arena_allocator:simple_memory_allocator",
         "//tensorflow/lite/micro/memory_planner:greedy_memory_planner",
+        "//tensorflow/lite/micro/memory_planner:linear_memory_planner",
         "//tensorflow/lite/micro/memory_planner:micro_memory_planner",
         "//tensorflow/lite/micro/tflite_bridge:flatbuffer_conversions_bridge",
         "//tensorflow/lite/schema:schema_fbs",
@@ -179,6 +213,7 @@
     name = "flatbuffer_utils",
     srcs = ["flatbuffer_utils.cc"],
     hdrs = ["flatbuffer_utils.h"],
+    copts = micro_copts(),
     deps = [
         "//tensorflow/lite/c:common",
         "//tensorflow/lite/schema:schema_fbs",
@@ -190,6 +225,7 @@
     name = "memory_helpers",
     srcs = ["memory_helpers.cc"],
     hdrs = ["memory_helpers.h"],
+    copts = micro_copts(),
     deps = [
         "//tensorflow/lite/c:common",
         "//tensorflow/lite/kernels/internal:reference",
@@ -369,6 +405,21 @@
 )
 
 cc_library(
+    name = "span",
+    hdrs = ["span.h"],
+    copts = micro_copts(),
+)
+
+cc_library(
+    name = "static_vector",
+    hdrs = ["static_vector.h"],
+    copts = micro_copts(),
+    deps = [
+        "//tensorflow/lite/kernels:op_macros",
+    ],
+)
+
+cc_library(
     name = "system_setup",
     srcs = [
         "system_setup.cc",
@@ -387,6 +438,7 @@
     deps = [
         ":micro_log",
         ":system_setup",
+        "//tensorflow/lite/micro/testing:micro_test",
     ],
 )
 
@@ -403,13 +455,14 @@
 )
 
 cc_test(
-    name = "micro_context_test",
+    name = "micro_interpreter_context_test",
     srcs = [
-        "micro_context_test.cc",
+        "micro_interpreter_context_test.cc",
     ],
     deps = [
         ":micro_allocator",
-        ":micro_context",
+        ":micro_interpreter_context",
+        ":micro_interpreter_graph",
         ":test_helpers",
         "//tensorflow/lite/micro/testing:micro_test",
     ],
@@ -423,6 +476,7 @@
     deps = [
         ":fake_micro_context",
         ":micro_allocator",
+        ":mock_micro_graph",
         ":test_helpers",
         "//tensorflow/lite/micro/testing:micro_test",
     ],
@@ -518,6 +572,18 @@
 )
 
 cc_test(
+    name = "span_test",
+    size = "small",
+    srcs = [
+        "span_test.cc",
+    ],
+    deps = [
+        ":span",
+        "//tensorflow/lite/micro/testing:micro_test",
+    ],
+)
+
+cc_test(
     name = "testing_helpers_test",
     srcs = [
         "testing_helpers_test.cc",
@@ -574,6 +640,18 @@
     ],
 )
 
+cc_test(
+    name = "static_vector_test",
+    size = "small",
+    srcs = [
+        "static_vector_test.cc",
+    ],
+    deps = [
+        ":static_vector",
+        "//tensorflow/lite/micro/testing:micro_test",
+    ],
+)
+
 bzl_library(
     name = "build_def_bzl",
     srcs = ["build_def.bzl"],
diff --git a/tensorflow/lite/micro/arc_emsdp/debug_log.cc b/tensorflow/lite/micro/arc_emsdp/debug_log.cc
index 55cf420..a61e3c2 100644
--- a/tensorflow/lite/micro/arc_emsdp/debug_log.cc
+++ b/tensorflow/lite/micro/arc_emsdp/debug_log.cc
@@ -122,4 +122,12 @@
   vsnprintf_(log_buffer, kMaxLogLen, format, args);
   LogDebugString(log_buffer);
 #endif
-}
\ No newline at end of file
+}
+
+#ifndef TF_LITE_STRIP_ERROR_STRINGS
+// Only called from MicroVsnprintf (micro_log.h)
+extern "C" int DebugVsnprintf(char* buffer, size_t buf_size, const char* format,
+                              va_list vlist) {
+  return vsnprintf_(buffer, buf_size, format, vlist);
+}
+#endif
diff --git a/tensorflow/lite/micro/benchmarks/BUILD b/tensorflow/lite/micro/benchmarks/BUILD
index 808f6ac..d0f67b6 100644
--- a/tensorflow/lite/micro/benchmarks/BUILD
+++ b/tensorflow/lite/micro/benchmarks/BUILD
@@ -65,9 +65,7 @@
     hdrs = [
         "//tensorflow/lite/micro/models:generated_keyword_scrambled_8bit_model_hdr",
     ],
-    visibility = [
-        ":micro_top_level",
-    ],
+    visibility = ["//visibility:private"],
 )
 
 cc_binary(
diff --git a/tensorflow/lite/micro/benchmarks/README.md b/tensorflow/lite/micro/benchmarks/README.md
index 1031a58..1ac5d25 100644
--- a/tensorflow/lite/micro/benchmarks/README.md
+++ b/tensorflow/lite/micro/benchmarks/README.md
@@ -70,29 +70,20 @@
 For more info about the Corstone-300 software see:
 [tensorflow/lite/micro/cortex_m_corstone_300/README.md](../cortex_m_corstone_300/README.md).
 
-Disclaimer: Executing the benchmark test on the Corstone-300 software will
-provide a general metric of instructions executed. The estimates are not cycle
-accurate, however it aligns to instruction per cycle, and is a consistent
-environment. This means it can detect if code changes changed performance.
+Disclaimer: The FVP can not be used to measure CPU performance.
+The results are not reliable, not even for relative measurements.
+FVP may however be used for performance measurements when running on NPU and only NPU PMU numbers can be used. The NPU model is cycle accurate within approximately +-10%.
 
-The person detection benchmark can also run with Ethos-U enabled, as the
-downloaded model will be optimized for Ethos-U. For more info see:
+As an example, the person detect downloaded model will be optimized for Ethos-U. For more info see:
 [tensorflow/lite/micro/kernels/ethos_u/README.md](../kernels/ethos_u/README.md).
+And since it only makes sense to measure performance on the NPU, only the person detection benchmark should be run and only with Ethos-U enabled.
+See also network tester example, where person detect model is used in the same way when Ethos-U is enabled:
+[tensorflow/lite/micro/examples/network_tester/README.md](../examples/network_tester/README.md).
 
-To run the keyword benchmark on FVP:
-
-```
-make -j -f tensorflow/lite/micro/tools/make/Makefile TARGET=cortex_m_corstone_300 TARGET_ARCH=cortex-m55 run_keyword_benchmark
-```
-
-To run the person detection benchmark on FVP:
-
-```
-make -j -f tensorflow/lite/micro/tools/make/Makefile TARGET=cortex_m_corstone_300 TARGET_ARCH=cortex-m55 run_person_detection_benchmark
-```
+The person detect model is not an optimial model for Ethos-U since it quite small. Also note that only the NPU PMU cycles are logged even though the CPU is setting up the Ethos-U driver in each iteration.
 
 To run the person detection benchmark on FVP with Ethos-U:
 
 ```
-make -j -f tensorflow/lite/micro/tools/make/Makefile CO_PROCESSOR=ethos_u TARGET=cortex_m_corstone_300 TARGET_ARCH=cortex-m55 run_person_detection_benchmark
+make -f tensorflow/lite/micro/tools/make/Makefile CO_PROCESSOR=ethos_u TARGET=cortex_m_corstone_300 TARGET_ARCH=cortex-m55 run_person_detection_benchmark
 ```
diff --git a/tensorflow/lite/micro/benchmarks/person_detection_benchmark.cc b/tensorflow/lite/micro/benchmarks/person_detection_benchmark.cc
index e21789b..29d30ee 100644
--- a/tensorflow/lite/micro/benchmarks/person_detection_benchmark.cc
+++ b/tensorflow/lite/micro/benchmarks/person_detection_benchmark.cc
@@ -1,4 +1,4 @@
-/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -37,7 +37,11 @@
 
 namespace tflite {
 
+#ifdef ETHOS_U
+using PersonDetectionOpResolver = MicroMutableOpResolver<1>;
+#else
 using PersonDetectionOpResolver = MicroMutableOpResolver<6>;
+#endif
 using PersonDetectionBenchmarkRunner = MicroBenchmarkRunner<int8_t>;
 
 // Create an area of memory to use for input, output, and intermediate arrays.
@@ -57,12 +61,16 @@
   // PersonDetectionBenchmarkRunner object.
   PersonDetectionOpResolver* op_resolver =
       new (op_resolver_buffer) PersonDetectionOpResolver();
+#ifdef ETHOS_U
+  op_resolver->AddEthosU();
+#else
   op_resolver->AddFullyConnected(tflite::Register_FULLY_CONNECTED_INT8());
   op_resolver->AddConv2D(tflite::Register_CONV_2D_INT8REF());
   op_resolver->AddDepthwiseConv2D();
   op_resolver->AddSoftmax();
   op_resolver->AddAveragePool2D(tflite::Register_AVERAGE_POOL_2D_INT8());
   op_resolver->AddReshape();
+#endif
   return new (benchmark_runner_buffer)
       PersonDetectionBenchmarkRunner(g_person_detect_model_data, op_resolver,
                                      tensor_arena, kTensorArenaSize, profiler);
diff --git a/tensorflow/lite/micro/bluepill/debug_log.cc b/tensorflow/lite/micro/bluepill/debug_log.cc
index 6d0d53b..720410b 100644
--- a/tensorflow/lite/micro/bluepill/debug_log.cc
+++ b/tensorflow/lite/micro/bluepill/debug_log.cc
@@ -45,3 +45,11 @@
   SysWriteDebugConsole(log_buffer);
 #endif  // TF_LITE_STRIP_ERROR_STRINGS
 }
+
+#ifndef TF_LITE_STRIP_ERROR_STRINGS
+// Only called from MicroVsnprintf (micro_log.h)
+extern "C" int DebugVsnprintf(char* buffer, size_t buf_size, const char* format,
+                              va_list vlist) {
+  return vsnprintf_(buffer, buf_size, format, vlist);
+}
+#endif
diff --git a/tensorflow/lite/micro/build_def.bzl b/tensorflow/lite/micro/build_def.bzl
index b9ecf77..82768f1 100644
--- a/tensorflow/lite/micro/build_def.bzl
+++ b/tensorflow/lite/micro/build_def.bzl
@@ -1,6 +1,7 @@
 def micro_copts():
     return [
         "-Wall",
+        "-Wno-unused-parameter",
         "-Wnon-virtual-dtor",
         "-DFLATBUFFERS_LOCALE_INDEPENDENT=0",
     ]
diff --git a/tensorflow/lite/micro/chre/debug_log.cc b/tensorflow/lite/micro/chre/debug_log.cc
index b599ebe..99f7c67 100644
--- a/tensorflow/lite/micro/chre/debug_log.cc
+++ b/tensorflow/lite/micro/chre/debug_log.cc
@@ -29,4 +29,12 @@
   vsnprintf_(log_buffer, kMaxLogLen, format, args);
   chreLog(CHRE_LOG_DEBUG, "[TFL_MICRO] %s", log_buffer);
 #endif
-}
\ No newline at end of file
+}
+
+#ifndef TF_LITE_STRIP_ERROR_STRINGS
+// Only called from MicroVsnprintf (micro_log.h)
+extern "C" int DebugVsnprintf(char* buffer, size_t buf_size, const char* format,
+                              va_list vlist) {
+  return vsnprintf_(buffer, buf_size, format, vlist);
+}
+#endif
diff --git a/tensorflow/lite/micro/compression.h b/tensorflow/lite/micro/compression.h
new file mode 100644
index 0000000..43965c2
--- /dev/null
+++ b/tensorflow/lite/micro/compression.h
@@ -0,0 +1,70 @@
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_LITE_MICRO_MICRO_COMPRESSION_H_
+#define TENSORFLOW_LITE_MICRO_MICRO_COMPRESSION_H_
+
+#ifdef USE_TFLM_COMPRESSION
+
+#include "tensorflow/lite/c/common.h"
+
+namespace tflite {
+
+//
+// Compressed tensors
+//
+
+static constexpr const char* kCompressionMetadataString = "TFLM_COMPRESSION";
+
+enum class CompressionScheme : uint8_t {
+  kBinQuant,
+};
+
+// TODO(ddavis-2015): pack struct
+struct LookupTableData {
+  static constexpr size_t kMaxBitWidth = 7;
+  static constexpr size_t kMaxValueTableChannelStride = 128;
+
+  const void* value_table;             // Pointer into FlatBuffer Values.
+  uint8_t value_table_channel_stride;  // elements per channel
+  uint8_t compressed_bit_width : 3;    // 1 to 7 bits
+  bool is_per_channel_quantized : 1;   // tensor is per-channel quantized
+  bool use_alternate_axis : 1;         // shape default channel:
+                                       // 0 = first, 1 = last
+  uint8_t reserved : 3;
+};
+
+union CompressionData {
+  LookupTableData* lut_data;
+};
+
+// TODO(ddavis-2015): pack struct
+struct CompressionTensorData {
+  CompressionScheme scheme;
+  CompressionData data;
+};
+
+// TODO(ddavis-2015): pack struct
+struct CompressedTensorList {
+  // Sparsely populated array with the same number of elements as there are
+  // tensors in the Subgraph. An alternative would include a tensor index in
+  // the struct for each and walk the list on look up. This could be slow.
+  CompressionTensorData** tensors;
+};
+
+}  // namespace tflite
+
+#endif  // USE_TFLM_COMPRESSION
+#endif  // TENSORFLOW_LITE_MICRO_MICRO_COMPRESSION_H_
diff --git a/tensorflow/lite/micro/cortex_m_corstone_300/README.md b/tensorflow/lite/micro/cortex_m_corstone_300/README.md
index 94935ac..663f636 100644
--- a/tensorflow/lite/micro/cortex_m_corstone_300/README.md
+++ b/tensorflow/lite/micro/cortex_m_corstone_300/README.md
@@ -37,12 +37,12 @@
 Some examples:
 
 ```
-make -j -f tensorflow/lite/micro/tools/make/Makefile CO_PROCESSOR=ethos_u TARGET=cortex_m_corstone_300 TARGET_ARCH=cortex-m55 test_network_tester_test
-make -j -f tensorflow/lite/micro/tools/make/Makefile OPTIMIZED_KERNEL_DIR=cmsis_nn TARGET=cortex_m_corstone_300 TARGET_ARCH=cortex-m55 test_network_tester_test
-make -j -f tensorflow/lite/micro/tools/make/Makefile CO_PROCESSOR=ethos_u OPTIMIZED_KERNEL_DIR=cmsis_nn TARGET=cortex_m_corstone_300 TARGET_ARCH=cortex-m55 test_network_tester_test
-make -j -f tensorflow/lite/micro/tools/make/Makefile TARGET=cortex_m_corstone_300 TARGET_ARCH=cortex-m55 test_network_tester_test
-make -j -f tensorflow/lite/micro/tools/make/Makefile TARGET=cortex_m_corstone_300 TARGET_ARCH=cortex-m55 test_kernel_fully_connected_test
-make -j -f tensorflow/lite/micro/tools/make/Makefile OPTIMIZED_KERNEL_DIR=cmsis_nn TARGET=cortex_m_corstone_300 TARGET_ARCH=cortex-m7+fp test_kernel_fully_connected_test
-make -j -f tensorflow/lite/micro/tools/make/Makefile TARGET=cortex_m_corstone_300 TARGET_ARCH=cortex-m3 test_kernel_fully_connected_test
-make -j -f tensorflow/lite/micro/tools/make/Makefile TARGET=cortex_m_corstone_300 TARGET_ARCH=cortex-m55 BUILD_TYPE=release_with_logs TOOLCHAIN=armclang test_network_tester_test
+make -f tensorflow/lite/micro/tools/make/Makefile CO_PROCESSOR=ethos_u TARGET=cortex_m_corstone_300 TARGET_ARCH=cortex-m55 test_network_tester_test
+make -f tensorflow/lite/micro/tools/make/Makefile OPTIMIZED_KERNEL_DIR=cmsis_nn TARGET=cortex_m_corstone_300 TARGET_ARCH=cortex-m55 test_network_tester_test
+make -f tensorflow/lite/micro/tools/make/Makefile CO_PROCESSOR=ethos_u OPTIMIZED_KERNEL_DIR=cmsis_nn TARGET=cortex_m_corstone_300 TARGET_ARCH=cortex-m55 test_network_tester_test
+make -f tensorflow/lite/micro/tools/make/Makefile TARGET=cortex_m_corstone_300 TARGET_ARCH=cortex-m55 test_network_tester_test
+make -f tensorflow/lite/micro/tools/make/Makefile TARGET=cortex_m_corstone_300 TARGET_ARCH=cortex-m55 test_kernel_fully_connected_test
+make -f tensorflow/lite/micro/tools/make/Makefile OPTIMIZED_KERNEL_DIR=cmsis_nn TARGET=cortex_m_corstone_300 TARGET_ARCH=cortex-m7+fp test_kernel_fully_connected_test
+make -f tensorflow/lite/micro/tools/make/Makefile TARGET=cortex_m_corstone_300 TARGET_ARCH=cortex-m3 test_kernel_fully_connected_test
+make -f tensorflow/lite/micro/tools/make/Makefile TARGET=cortex_m_corstone_300 TARGET_ARCH=cortex-m55 BUILD_TYPE=release_with_logs TOOLCHAIN=armclang test_network_tester_test
 ```
diff --git a/tensorflow/lite/micro/cortex_m_corstone_300/system_setup.cc b/tensorflow/lite/micro/cortex_m_corstone_300/system_setup.cc
index 95a11b2..6473340 100644
--- a/tensorflow/lite/micro/cortex_m_corstone_300/system_setup.cc
+++ b/tensorflow/lite/micro/cortex_m_corstone_300/system_setup.cc
@@ -1,4 +1,4 @@
-/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -14,33 +14,90 @@
 ==============================================================================*/
 
 #ifdef ETHOS_U
+#include <inttypes.h>
+
+#include <algorithm>
+
 #include "ethosu_driver.h"
+#include "pmu_ethosu.h"
 #endif
 
 // This is set in micro/tools/make/targets/cortex_m_corstone_300_makefile.inc.
-// It is needed for the calls to NVIC_SetVector()/NVIC_EnableIR() and for the
-// DWT and PMU counters.
+// It is needed for the calls to NVIC_SetVector()/NVIC_EnableIR(),
 #include CMSIS_DEVICE_ARM_CORTEX_M_XX_HEADER_FILE
 
 #include "tensorflow/lite/micro/micro_log.h"
 #include "tensorflow/lite/micro/micro_time.h"
 #include "tensorflow/lite/micro/system_setup.h"
 
+#ifdef ETHOS_U
+
+bool npuPmuCycleCounterIsSet;
+uint64_t npuPmuCycleCounter;
+
+extern "C" {
+void ethosu_inference_begin(struct ethosu_driver* drv, void* userArg) {
+  // Enable PMU
+  ETHOSU_PMU_Enable(drv);
+
+  // Enable cycle counter
+  ETHOSU_PMU_PMCCNTR_CFG_Set_Stop_Event(drv, ETHOSU_PMU_NPU_IDLE);
+  ETHOSU_PMU_PMCCNTR_CFG_Set_Start_Event(drv, ETHOSU_PMU_NPU_ACTIVE);
+  ETHOSU_PMU_CNTR_Enable(drv, ETHOSU_PMU_CCNT_Msk);
+  ETHOSU_PMU_CYCCNT_Reset(drv);
+
+  // Reset all counters
+  ETHOSU_PMU_EVCNTR_ALL_Reset(drv);
+}
+
+void ethosu_inference_end(struct ethosu_driver* drv, void* userArg) {
+  // Save cycle counter
+  npuPmuCycleCounter += ETHOSU_PMU_Get_CCNTR(drv);
+  npuPmuCycleCounterIsSet = true;
+
+  // Disable PMU
+  ETHOSU_PMU_Disable(drv);
+}
+}
+#endif
+
 namespace tflite {
 
 namespace {
+#ifdef ETHOS_U
+constexpr uint32_t kClocksPerSecond = 200e6;
+#else
 constexpr uint32_t kClocksPerSecond = 25e6;
+#endif
 }  // namespace
 
 uint32_t ticks_per_second() { return kClocksPerSecond; }
 
 uint32_t GetCurrentTimeTicks() {
-#if (!defined(TF_LITE_STRIP_ERROR_STRINGS) && !defined(ARMCM0))
+#if (!defined(TF_LITE_STRIP_ERROR_STRINGS))
+#ifdef ETHOS_U
+  uint32_t ticks = static_cast<uint32_t>(npuPmuCycleCounter);
+
+  // Note cycle counter will be reset here for next iteration
+  if (npuPmuCycleCounterIsSet) {
+    npuPmuCycleCounter = 0;
+    npuPmuCycleCounterIsSet = false;
+  }
+
+  return ticks;
+#else
+
+#if defined(ARMCM0)
+  return 0;
+#else
 #ifdef ARMCM55
   return ARM_PMU_Get_CCNTR();
 #else
   return DWT->CYCCNT;
 #endif
+#endif
+
+#endif
 #else
   return 0;
 #endif
@@ -88,14 +145,17 @@
 #ifdef ETHOS_U
   constexpr int ethosu_base_address = 0x48102000;
   constexpr int ethosu_irq = 56;
+  constexpr int ethosu_irq_priority = 5;
 
   // Initialize Ethos-U NPU driver.
   if (ethosu_init(&ethosu0_driver, reinterpret_cast<void*>(ethosu_base_address),
                   ethosu0_scratch, ETHOSU_FAST_MEMORY_SIZE, 1, 1)) {
     MicroPrintf("Failed to initialize Ethos-U driver");
+    return;
   }
   NVIC_SetVector(static_cast<IRQn_Type>(ethosu_irq),
                  (uint32_t)&ethosuIrqHandler0);
+  NVIC_SetPriority(static_cast<IRQn_Type>(ethosu_irq), ethosu_irq_priority);
   NVIC_EnableIRQ(static_cast<IRQn_Type>(ethosu_irq));
 #endif
 }
diff --git a/tensorflow/lite/micro/cortex_m_generic/debug_log.cc b/tensorflow/lite/micro/cortex_m_generic/debug_log.cc
index 2c237fe..b7182a5 100644
--- a/tensorflow/lite/micro/cortex_m_generic/debug_log.cc
+++ b/tensorflow/lite/micro/cortex_m_generic/debug_log.cc
@@ -16,16 +16,16 @@
 // Implementation for the DebugLog() function that prints to the debug logger on
 // an generic Cortex-M device.
 
+#include "tensorflow/lite/micro/debug_log.h"
+
 #ifdef __cplusplus
 extern "C" {
 #endif  // __cplusplus
 
-#include "tensorflow/lite/micro/debug_log.h"
-
 #include "tensorflow/lite/micro/cortex_m_generic/debug_log_callback.h"
 
 #ifndef TF_LITE_STRIP_ERROR_STRINGS
-#include "eyalroz_printf/src/printf/printf.h"
+#include <stdio.h>
 #endif
 
 static DebugLogCallback debug_log_callback = nullptr;
@@ -49,11 +49,19 @@
   constexpr int kMaxLogLen = 256;
   char log_buffer[kMaxLogLen];
 
-  vsnprintf_(log_buffer, kMaxLogLen, format, args);
+  vsnprintf(log_buffer, kMaxLogLen, format, args);
   InvokeDebugLogCallback(log_buffer);
 #endif
 }
 
+#ifndef TF_LITE_STRIP_ERROR_STRINGS
+// Only called from MicroVsnprintf (micro_log.h)
+int DebugVsnprintf(char* buffer, size_t buf_size, const char* format,
+                   va_list vlist) {
+  return vsnprintf(buffer, buf_size, format, vlist);
+}
+#endif
+
 #ifdef __cplusplus
 }  // extern "C"
 #endif  // __cplusplus
diff --git a/tensorflow/lite/micro/debug_log.cc b/tensorflow/lite/micro/debug_log.cc
index a8ef36b..9823196 100644
--- a/tensorflow/lite/micro/debug_log.cc
+++ b/tensorflow/lite/micro/debug_log.cc
@@ -44,3 +44,11 @@
   vfprintf(stderr, format, args);
 #endif
 }
+
+#ifndef TF_LITE_STRIP_ERROR_STRINGS
+// Only called from MicroVsnprintf (micro_log.h)
+extern "C" int DebugVsnprintf(char* buffer, size_t buf_size, const char* format,
+                              va_list vlist) {
+  return vsnprintf(buffer, buf_size, format, vlist);
+}
+#endif
diff --git a/tensorflow/lite/micro/debug_log.h b/tensorflow/lite/micro/debug_log.h
index c0cf699..6e2e69e 100644
--- a/tensorflow/lite/micro/debug_log.h
+++ b/tensorflow/lite/micro/debug_log.h
@@ -17,19 +17,23 @@
 
 #ifdef __cplusplus
 #include <cstdarg>
+#include <cstddef>
 #else
 #include <stdarg.h>
+#include <stddef.h>
 #endif  // __cplusplus
 
 #ifdef __cplusplus
 extern "C" {
 #endif  // __cplusplus
 
-// This function should be implemented by each target platform, and provide a
+// These functions should be implemented by each target platform, and provide a
 // way for strings to be output to some text stream. For more information, see
-// the tensorflow/lite/micro/debug_log.cc file.  This function should support
+// the tensorflow/lite/micro/debug_log.cc file.  These functions should support
 // standard C/C++ stdio style formatting operations.
 void DebugLog(const char* format, va_list args);
+int DebugVsnprintf(char* buffer, size_t buf_size, const char* format,
+                   va_list vlist);
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/tensorflow/lite/micro/docs/new_platform_support.md b/tensorflow/lite/micro/docs/new_platform_support.md
index 692d98e..8752c8b 100644
--- a/tensorflow/lite/micro/docs/new_platform_support.md
+++ b/tensorflow/lite/micro/docs/new_platform_support.md
@@ -25,7 +25,7 @@
 Prior to integrating TFLM with a specific hardware involves tasks that is
 outside the scope of the TFLM project, including:
 
- * Toolchain setup - TFLM requires support for C++11
+ * Toolchain setup - TFLM requires support for C++17
  * Set up and installation of board-specific SDKs and IDEs
  * Compiler flags and Linker setup
  * Integrating peripherals such as cameras, microphones and accelerometers to
@@ -77,7 +77,7 @@
 that when linking TFLM into a binary, the implementations of the functions in
 [debug\_log.h](https://github.com/tensorflow/tflite-micro/blob/main/tensorflow/lite/micro/debug_log.h),
 [micro\_time.h](https://github.com/tensorflow/tflite-micro/blob/main/tensorflow/lite/micro/micro_time.h)
-and [system\_setup.h](https://github.com/tensorflow/tflite-micro/blob/main/tensorflow/lite/micro/debug_log.h)
+and [system\_setup.h](https://github.com/tensorflow/tflite-micro/blob/main/tensorflow/lite/micro/system_setup.h)
 can be found.
 
 For example, the implementations of these functions for:
diff --git a/tensorflow/lite/micro/docs/optimized_kernel_implementations.md b/tensorflow/lite/micro/docs/optimized_kernel_implementations.md
index 4a5c81a..8eefb55 100644
--- a/tensorflow/lite/micro/docs/optimized_kernel_implementations.md
+++ b/tensorflow/lite/micro/docs/optimized_kernel_implementations.md
@@ -169,6 +169,12 @@
     *   Build a static libtensorflow-microlite.a using the TFLM makefile with:
         `make -f tensorflow/lite/micro/tools/make/Makefile TARGET=<target>
         OPTIMIZED_KERNEL_DIR=<optimize_dir> microlite`
+    *   Optionally build for size or speed. Translated to a valid make command it will be any of these two:
+        `make -f tensorflow/lite/micro/tools/make/Makefile TARGET=<target>
+        OPTIMIZED_KERNEL_DIR=<optimize_dir> OPTIMIZE_KERNELS_FOR=KERNELS_OPTIMIZED_FOR_SIZE microlite`
+        `make -f tensorflow/lite/micro/tools/make/Makefile TARGET=<target>
+        OPTIMIZED_KERNEL_DIR=<optimize_dir> OPTIMIZE_KERNELS_FOR=KERNELS_OPTIMIZED_FOR_SPEED microlite`
+        Check relevant README for given optimization library if this is applicable.
     *   Use the static library and any TFLM headers as part of the overall
         application (with its own build system).
 
diff --git a/tensorflow/lite/micro/examples/dtln/Makefile.inc b/tensorflow/lite/micro/examples/dtln/Makefile.inc
new file mode 100644
index 0000000..e88cf09
--- /dev/null
+++ b/tensorflow/lite/micro/examples/dtln/Makefile.inc
@@ -0,0 +1,30 @@
+
+DTLN_TEST_SRCS := \
+$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/dtln/dtln_test.cc \
+$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/dtln/dtln_inout_data.cc
+
+DTLN_TEST_HDRS := \
+$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/dtln/dtln_inout_data.h
+
+DTLN_GENERATOR_INPUTS := \
+$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/dtln/dtln_noise_suppression.tflite
+
+DTLN_GENERATED_SRCS := \
+$(GENERATED_SRCS_DIR)$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/dtln/dtln_noise_suppression_model_data.cc
+
+DTLN_GENERATED_HDRS := \
+$(GENERATED_SRCS_DIR)$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/dtln/dtln_noise_suppression_model_data.h
+
+#Find any platform - specific rules for this example.
+include $(wildcard $(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/dtln/*/Makefile.inc)
+
+# TODO(b/161489252): Disabling warnings for this example until we have a better
+# way to build third_party code with a reduced list of CFLAGS.
+CCFLAGS := $(filter-out $(CC_WARNINGS),$(CCFLAGS))
+
+# Tests loading and running a dtln model.
+# TODO(b/319712246): Re-enable tests once the dtln_test works on HiFi Mini & VP6
+ifneq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), hifimini vision_p6))
+  $(eval $(call microlite_test,dtln_test,\
+  $(DTLN_TEST_SRCS),$(DTLN_TEST_HDRS),$(DTLN_GENERATOR_INPUTS)))
+endif
diff --git a/tensorflow/lite/micro/examples/dtln/README.md b/tensorflow/lite/micro/examples/dtln/README.md
new file mode 100644
index 0000000..eb96b0c
--- /dev/null
+++ b/tensorflow/lite/micro/examples/dtln/README.md
@@ -0,0 +1,23 @@
+# DTLN example
+The DTLN example is a demonstration of DTLN network running on HiFi DSP for Noise suppression in speech.
+It uses feature_data as input and provides noise suppressed speech as output.
+It is based on the paper(https://github.com/breizhn/DTLN).
+While paper presents 2 parts, one for noise suppression and the other for speech enhancement, 
+the example presented here follows the noise suppression part only.
+The model was re-trained by Cadence using the DNS challenge data (https://github.com/microsoft/DNS-Challenge) 
+and the noise suppression part was 8-bit quantized. 
+This example is not to be used to evaluate the network quality or quality of noise suppression, but only as a demonstration as stated above.
+
+## Run the tests on a development machine
+
+```
+make -f tensorflow/lite/micro/tools/make/Makefile third_party_downloads
+make -f tensorflow/lite/micro/tools/make/Makefile test_dtln_test
+```
+
+You should see a series of files get compiled, followed by some logging output
+from a test, which should conclude with `~~~ALL TESTS PASSED~~~`. If you see
+this, it means that a small program has been built and run that loads a trained
+TensorFlow model, runs with features data, and got the expected
+outputs. This particular test runs with a feature data as input,
+and validate the output with golden reference output.
diff --git a/tensorflow/lite/micro/examples/dtln/dtln_inout_data.cc b/tensorflow/lite/micro/examples/dtln/dtln_inout_data.cc
new file mode 100644
index 0000000..a4652a7
--- /dev/null
+++ b/tensorflow/lite/micro/examples/dtln/dtln_inout_data.cc
@@ -0,0 +1,60 @@
+/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/lite/c/common.h"
+
+int8_t feature_data[] = {
+    -127, -126, -115, -82,  -90,  -113, -15,  13,   -87,  -105, -77,  -106,
+    -113, -81,  -90,  -123, -113, -112, -124, -120, -114, -123, -123, -112,
+    -115, -125, -120, -120, -126, -126, -126, -127, -128, -127, -128, -128,
+    -127, -127, -128, -126, -125, -126, -127, -127, -127, -128, -126, -127,
+    -128, -127, -127, -128, -127, -126, -127, -128, -127, -127, -127, -124,
+    -124, -127, -126, -126, -127, -126, -124, -125, -128, -126, -125, -127,
+    -126, -126, -127, -127, -126, -126, -127, -126, -126, -127, -126, -125,
+    -127, -126, -123, -124, -126, -126, -126, -128, -127, -127, -127, -128,
+    -127, -127, -128, -128, -128, -128, -128, -127, -128, -128, -127, -128,
+    -128, -127, -127, -128, -128, -128, -128, -128, -128, -128, -128, -128,
+    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
+    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
+    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
+    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
+    -128, -128, -128, -128, -128, -128, -128, -127, -127, -127, -127, -127,
+    -127, -127, -127, -128, -127, -127, -127, -127, -126, -127, -127, -127,
+    -127, -127, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
+    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
+    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
+    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
+    -128, -127, -128, -128, -127, -127, -128, -128, -128, -128, -128, -128,
+    -128, -128, -128, -128, -128};
+
+int8_t golden_ref[] = {
+    119, 82,  72,  116, -5,  10,  -2,  0,   -2,  36,  116, 125, 123, 124, 126,
+    124, 115, 116, 124, 126, 121, 124, 121, 104, 104, 113, 113, 110, 115, 101,
+    96,  119, 120, 117, 118, 114, 104, 110, 118, 119, 111, 114, 119, 117, 114,
+    110, 117, 112, 115, 120, 119, 118, 119, 116, 117, 120, 121, 121, 121, 119,
+    117, 120, 121, 120, 118, 115, 114, 114, 117, 119, 113, 108, 108, 111, 112,
+    114, 114, 116, 115, 112, 110, 113, 113, 110, 107, 98,  102, 101, 101, 103,
+    92,  98,  101, 102, 102, 101, 104, 102, 101, 101, 100, 102, 98,  104, 100,
+    99,  92,  96,  87,  97,  96,  96,  96,  95,  92,  98,  95,  90,  85,  82,
+    87,  82,  82,  89,  90,  83,  86,  85,  80,  86,  87,  91,  89,  87,  87,
+    85,  82,  74,  80,  80,  72,  79,  74,  79,  82,  83,  77,  85,  71,  76,
+    72,  76,  76,  77,  56,  74,  74,  69,  69,  69,  65,  56,  60,  67,  71,
+    69,  74,  67,  71,  65,  77,  76,  79,  67,  72,  61,  60,  67,  69,  71,
+    77,  63,  63,  60,  63,  71,  80,  80,  74,  76,  67,  74,  63,  67,  69,
+    72,  77,  71,  72,  82,  65,  49,  67,  58,  71,  65,  63,  69,  61,  77,
+    63,  65,  65,  69,  69,  65,  72,  77,  80,  60,  79,  77,  71,  67,  79,
+    69,  67,  65,  74,  69,  71,  67,  76,  77,  77,  77,  83,  67,  65,  79,
+    77,  60,  71,  86,  86,  63,  74,  63,  63,  63,  69,  79,  63,  52,  85,
+    87,  86};
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.cc b/tensorflow/lite/micro/examples/dtln/dtln_inout_data.h
similarity index 69%
rename from tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.cc
rename to tensorflow/lite/micro/examples/dtln/dtln_inout_data.h
index e2cf661..c5fde46 100644
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.cc
+++ b/tensorflow/lite/micro/examples/dtln/dtln_inout_data.h
@@ -1,4 +1,4 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -13,11 +13,7 @@
 limitations under the License.
 ==============================================================================*/
 
-#include "tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.h"
+#include "tensorflow/lite/c/common.h"
 
-const char* kCategoryLabels[kCategoryCount] = {
-    "silence",
-    "unknown",
-    "yes",
-    "no",
-};
+extern int8_t feature_data[];
+extern int8_t golden_ref[];
diff --git a/tensorflow/lite/micro/examples/dtln/dtln_noise_suppression.tflite b/tensorflow/lite/micro/examples/dtln/dtln_noise_suppression.tflite
new file mode 100644
index 0000000..143ef8d
--- /dev/null
+++ b/tensorflow/lite/micro/examples/dtln/dtln_noise_suppression.tflite
Binary files differ
diff --git a/tensorflow/lite/micro/examples/dtln/dtln_test.cc b/tensorflow/lite/micro/examples/dtln/dtln_test.cc
new file mode 100644
index 0000000..1077195
--- /dev/null
+++ b/tensorflow/lite/micro/examples/dtln/dtln_test.cc
@@ -0,0 +1,100 @@
+/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/micro/examples/dtln/dtln_inout_data.h"
+#include "tensorflow/lite/micro/examples/dtln/dtln_noise_suppression_model_data.h"
+#include "tensorflow/lite/micro/micro_interpreter.h"
+#include "tensorflow/lite/micro/micro_log.h"
+#include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
+#include "tensorflow/lite/micro/testing/micro_test.h"
+#include "tensorflow/lite/schema/schema_generated.h"
+
+TF_LITE_MICRO_TESTS_BEGIN
+
+MicroPrintf(
+    "\nThis example demonstrates LSTM layers on HiFi DSP, NOT for evaluating "
+    "noise suppression quality.\n");
+TF_LITE_MICRO_TEST(TestInvoke) {
+  // Map the model into a usable data structure. This doesn't involve any
+  // copying or parsing, it's a very lightweight operation.
+  const tflite::Model* model =
+      ::tflite::GetModel(g_dtln_noise_suppression_model_data);
+  if (model->version() != TFLITE_SCHEMA_VERSION) {
+    MicroPrintf(
+        "Model provided is schema version %d not equal "
+        "to supported version %d.\n",
+        model->version(), TFLITE_SCHEMA_VERSION);
+  }
+
+  // Pull in only the operation implementations we need.
+  // This relies on a complete list of all the ops needed by this graph.
+
+  tflite::MicroMutableOpResolver<3> micro_op_resolver;
+  micro_op_resolver.AddUnidirectionalSequenceLSTM();
+  micro_op_resolver.AddFullyConnected();
+  micro_op_resolver.AddLogistic();
+
+  // Create an area of memory to use for input, output, and intermediate arrays.
+  constexpr int tensor_arena_size = 16 * 1024;
+  alignas(16) uint8_t tensor_arena[tensor_arena_size];
+
+  // Build an interpreter to run the model with.
+  tflite::MicroInterpreter interpreter(model, micro_op_resolver, tensor_arena,
+                                       tensor_arena_size);
+  interpreter.AllocateTensors();
+
+  // Get information about the memory area to use for the model's input.
+  TfLiteTensor* input = interpreter.input(0);
+
+  // Make sure the input has the properties we expect.
+  TF_LITE_MICRO_EXPECT(input != nullptr);
+  TF_LITE_MICRO_EXPECT_EQ(3, input->dims->size);
+  TF_LITE_MICRO_EXPECT_EQ(1, input->dims->data[0]);
+  TF_LITE_MICRO_EXPECT_EQ(1, input->dims->data[1]);
+  TF_LITE_MICRO_EXPECT_EQ(257, input->dims->data[2]);
+  TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, input->type);
+
+  // Copy a spectrogram created from a noisy.wav audio file,
+  // into the memory area used for the input.
+  for (size_t i = 0; i < input->bytes; ++i) {
+    input->data.int8[i] = feature_data[i];
+  }
+
+  // Run the model on this input and make sure it succeeds.
+  TfLiteStatus invoke_status = interpreter.Invoke();
+  if (invoke_status != kTfLiteOk) {
+    MicroPrintf("Invoke failed\n");
+  }
+  TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status);
+
+  // Get the output from the model, and make sure it's the expected size and
+  // type.
+  TfLiteTensor* output = interpreter.output(0);
+  TF_LITE_MICRO_EXPECT_EQ(3, output->dims->size);
+  TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]);
+  TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[1]);
+  TF_LITE_MICRO_EXPECT_EQ(257, output->dims->data[2]);
+  TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, output->type);
+
+  int output_size =
+      output->dims->data[0] * output->dims->data[1] * output->dims->data[2];
+  for (int i = 0; i < output_size; i++)
+    TF_LITE_MICRO_EXPECT_EQ(output->data.int8[i], golden_ref[i]);
+
+  MicroPrintf("Ran successfully\n");
+}
+
+TF_LITE_MICRO_TESTS_END
diff --git a/tensorflow/lite/micro/examples/hello_world/BUILD b/tensorflow/lite/micro/examples/hello_world/BUILD
index f2b41b3..988b7dd 100644
--- a/tensorflow/lite/micro/examples/hello_world/BUILD
+++ b/tensorflow/lite/micro/examples/hello_world/BUILD
@@ -1,5 +1,6 @@
 # Description:
 #   TensorFlow Lite for Microcontrollers "hello world" example.
+load("@rules_python//python:defs.bzl", "py_binary")
 load("@tflm_pip_deps//:requirements.bzl", "requirement")
 load(
     "//tensorflow/lite/micro:build_def.bzl",
@@ -53,7 +54,7 @@
         "@absl_py//absl/flags",
         "@absl_py//absl/logging",
         requirement("numpy"),
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
         "//python/tflite_micro:runtime",
     ],
 )
@@ -78,6 +79,6 @@
     srcs_version = "PY3",
     deps = [
         requirement("numpy"),
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
     ],
 )
diff --git a/tensorflow/lite/micro/examples/hello_world/models/BUILD b/tensorflow/lite/micro/examples/hello_world/models/BUILD
index 4f025b0..4c9441b 100644
--- a/tensorflow/lite/micro/examples/hello_world/models/BUILD
+++ b/tensorflow/lite/micro/examples/hello_world/models/BUILD
@@ -9,7 +9,10 @@
         "hello_world_float.tflite",
         "hello_world_int8.tflite",
     ],
-    visibility = ["//tensorflow/lite/micro/examples/hello_world:__subpackages__"],
+    visibility = [
+        "//codegen/examples/hello_world:__subpackages__",
+        "//tensorflow/lite/micro/examples/hello_world:__subpackages__",
+    ],
 )
 
 generate_cc_arrays(
diff --git a/tensorflow/lite/micro/examples/hello_world/quantization/BUILD b/tensorflow/lite/micro/examples/hello_world/quantization/BUILD
index ecba316..9a204f2 100644
--- a/tensorflow/lite/micro/examples/hello_world/quantization/BUILD
+++ b/tensorflow/lite/micro/examples/hello_world/quantization/BUILD
@@ -1,3 +1,4 @@
+load("@rules_python//python:defs.bzl", "py_binary")
 load("@tflm_pip_deps//:requirements.bzl", "requirement")
 
 py_binary(
@@ -11,7 +12,7 @@
         "@absl_py//absl/flags",
         "@absl_py//absl/logging",
         requirement("numpy"),
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
         "//python/tflite_micro:runtime",
     ],
 )
diff --git a/tensorflow/lite/micro/examples/hello_world/quantization/ptq.py b/tensorflow/lite/micro/examples/hello_world/quantization/ptq.py
index bfab0d0..f1c4210 100644
--- a/tensorflow/lite/micro/examples/hello_world/quantization/ptq.py
+++ b/tensorflow/lite/micro/examples/hello_world/quantization/ptq.py
@@ -16,7 +16,7 @@
 
 Run:
 Build the train.py script
-`bazel build tensorflow/lite/micro/examples/hello_world/quantization:train`
+`bazel build tensorflow/lite/micro/examples/hello_world:train`
 
 The following command first creates the trained TF float model that we will quantize later
 `bazel-bin/tensorflow/lite/micro/examples/hello_world/train --save_tf_model --save_dir=/tmp/float_model/`
@@ -113,4 +113,4 @@
 
 
 if __name__ == "__main__":
-  app.run(main)
\ No newline at end of file
+  app.run(main)
diff --git a/tensorflow/lite/micro/examples/micro_speech/BUILD b/tensorflow/lite/micro/examples/micro_speech/BUILD
index 71741f3..10d1c10 100644
--- a/tensorflow/lite/micro/examples/micro_speech/BUILD
+++ b/tensorflow/lite/micro/examples/micro_speech/BUILD
@@ -1,5 +1,7 @@
 # Description:
 #   TensorFlow Lite microcontroller example.
+load("@rules_python//python:defs.bzl", "py_binary", "py_test")
+load("@tflm_pip_deps//:requirements.bzl", "requirement")
 load("//tensorflow/lite/micro:build_def.bzl", "generate_cc_arrays")
 
 package(
@@ -9,16 +11,6 @@
     licenses = ["notice"],
 )
 
-cc_library(
-    name = "simple_model_settings",
-    srcs = [
-        "simple_features/simple_model_settings.cc",
-    ],
-    hdrs = [
-        "simple_features/simple_model_settings.h",
-    ],
-)
-
 generate_cc_arrays(
     name = "generated_yes_1000ms_wav_cc",
     src = "testdata/yes_1000ms.wav",
@@ -44,6 +36,30 @@
 )
 
 generate_cc_arrays(
+    name = "generated_noise_1000ms_wav_cc",
+    src = "testdata/noise_1000ms.wav",
+    out = "testdata/noise_1000ms_audio_data.cc",
+)
+
+generate_cc_arrays(
+    name = "generated_noise_1000ms_wav_hdr",
+    src = "testdata/noise_1000ms.wav",
+    out = "testdata/noise_1000ms_audio_data.h",
+)
+
+generate_cc_arrays(
+    name = "generated_silence_1000ms_wav_cc",
+    src = "testdata/silence_1000ms.wav",
+    out = "testdata/silence_1000ms_audio_data.cc",
+)
+
+generate_cc_arrays(
+    name = "generated_silence_1000ms_wav_hdr",
+    src = "testdata/silence_1000ms.wav",
+    out = "testdata/silence_1000ms_audio_data.h",
+)
+
+generate_cc_arrays(
     name = "generated_yes_30ms_wav_cc",
     src = "testdata/yes_30ms.wav",
     out = "testdata/yes_30ms_audio_data.cc",
@@ -69,14 +85,26 @@
 
 generate_cc_arrays(
     name = "generated_micro_speech_model_cc",
-    src = "micro_speech.tflite",
-    out = "micro_speech_model_data.cc",
+    src = "models/micro_speech_quantized.tflite",
+    out = "models/micro_speech_quantized_model_data.cc",
 )
 
 generate_cc_arrays(
     name = "generated_micro_speech_model_hdr",
-    src = "micro_speech.tflite",
-    out = "micro_speech_model_data.h",
+    src = "models/micro_speech_quantized.tflite",
+    out = "models/micro_speech_quantized_model_data.h",
+)
+
+generate_cc_arrays(
+    name = "generated_audio_preprocessor_model_cc",
+    src = "models/audio_preprocessor_int8.tflite",
+    out = "models/audio_preprocessor_int8_model_data.cc",
+)
+
+generate_cc_arrays(
+    name = "generated_audio_preprocessor_model_hdr",
+    src = "models/audio_preprocessor_int8.tflite",
+    out = "models/audio_preprocessor_int8_model_data.h",
 )
 
 cc_library(
@@ -90,35 +118,17 @@
 )
 
 cc_library(
-    name = "simple_features_test_data",
+    name = "audio_preprocessor_model_data",
     srcs = [
-        "simple_features/no_simple_features_data.cc",
-        "simple_features/yes_simple_features_data.cc",
+        ":generated_audio_preprocessor_model_cc",
     ],
     hdrs = [
-        "simple_features/no_simple_features_data.h",
-        "simple_features/yes_simple_features_data.h",
-    ],
-)
-
-cc_test(
-    name = "micro_speech_test",
-    srcs = [
-        "micro_speech_test.cc",
-    ],
-    deps = [
-        ":micro_speech_model_data",
-        "//tensorflow/lite/micro:micro_framework",
-        "//tensorflow/lite/micro:micro_log",
-        "//tensorflow/lite/micro:op_resolvers",
-        "//tensorflow/lite/micro/examples/micro_speech/micro_features:micro_features_test_data",
-        "//tensorflow/lite/micro/testing:micro_test",
-        "//tensorflow/lite/schema:schema_fbs",
+        ":generated_audio_preprocessor_model_hdr",
     ],
 )
 
 cc_library(
-    name = "audio_sample_test_data",
+    name = "audio_sample_test_data_30ms",
     srcs = [
         ":generated_no_30ms_wav_cc",
         ":generated_yes_30ms_wav_cc",
@@ -130,324 +140,128 @@
 )
 
 cc_library(
-    name = "audio_large_sample_test_data",
+    name = "audio_sample_test_data_1000ms",
     srcs = [
         ":generated_no_1000ms_wav_cc",
+        ":generated_noise_1000ms_wav_cc",
+        ":generated_silence_1000ms_wav_cc",
         ":generated_yes_1000ms_wav_cc",
     ],
     hdrs = [
         ":generated_no_1000ms_wav_hdr",
+        ":generated_noise_1000ms_wav_hdr",
+        ":generated_silence_1000ms_wav_hdr",
         ":generated_yes_1000ms_wav_hdr",
     ],
 )
 
 cc_library(
-    name = "simple_features_generator_test_data",
-    srcs = [
-        "simple_features/no_power_spectrum_data.cc",
-        "simple_features/yes_power_spectrum_data.cc",
-    ],
+    name = "micro_model_settings",
     hdrs = [
-        "simple_features/no_power_spectrum_data.h",
-        "simple_features/yes_power_spectrum_data.h",
-    ],
-)
-
-cc_library(
-    name = "simple_features_generator_reference",
-    srcs = [
-        "simple_features/simple_features_generator.cc",
-    ],
-    hdrs = [
-        "simple_features/simple_features_generator.h",
-    ],
-    deps = [
-        ":simple_model_settings",
-        "//tensorflow/lite/c:common",
-        "//tensorflow/lite/micro:micro_log",
+        "micro_model_settings.h",
     ],
 )
 
 cc_test(
-    name = "simple_features_generator_reference_test",
+    name = "micro_speech_test",
     srcs = [
-        "simple_features/simple_features_generator_test.cc",
+        "micro_speech_test.cc",
     ],
     deps = [
-        ":audio_sample_test_data",
-        ":simple_features_generator_reference",
-        ":simple_features_generator_test_data",
-        ":simple_model_settings",
-        "//tensorflow/lite/c:common",
-        "//tensorflow/lite/micro:micro_framework",
-        "//tensorflow/lite/micro:micro_log",
-        "//tensorflow/lite/micro/testing:micro_test",
-    ],
-)
-
-cc_library(
-    name = "simple_features_generator_fixed",
-    srcs = [
-        "simple_features/fixed_point/simple_features_generator.cc",
-    ],
-    hdrs = [
-        "simple_features/simple_features_generator.h",
-    ],
-    deps = [
-        ":simple_model_settings",
-        "//tensorflow/lite/c:common",
-        "//tensorflow/lite/micro:micro_log",
-    ],
-)
-
-cc_test(
-    name = "simple_features_generator_fixed_test",
-    srcs = [
-        "simple_features/simple_features_generator_test.cc",
-    ],
-    deps = [
-        ":audio_sample_test_data",
-        ":simple_features_generator_fixed",
-        ":simple_features_generator_test_data",
-        ":simple_model_settings",
-        "//tensorflow/lite/c:common",
-        "//tensorflow/lite/micro:micro_framework",
-        "//tensorflow/lite/micro:micro_log",
-        "//tensorflow/lite/micro/testing:micro_test",
-    ],
-)
-
-cc_library(
-    name = "audio_provider",
-    srcs = [
-        "audio_provider.cc",
-    ],
-    hdrs = [
-        "audio_provider.h",
-    ],
-    deps = [
-        "//tensorflow/lite/c:common",
-        "//tensorflow/lite/micro/examples/micro_speech/micro_features:micro_model_settings",
-    ],
-)
-
-cc_library(
-    name = "audio_provider_mock",
-    srcs = [
-        "audio_provider_mock.cc",
-    ],
-    hdrs = [
-        "audio_provider.h",
-    ],
-    deps = [
-        ":audio_large_sample_test_data",
-        "//tensorflow/lite/c:common",
-        "//tensorflow/lite/micro/examples/micro_speech/micro_features:micro_model_settings",
-    ],
-)
-
-cc_test(
-    name = "audio_provider_test",
-    srcs = [
-        "audio_provider_test.cc",
-    ],
-    deps = [
-        ":audio_provider",
-        "//tensorflow/lite/c:common",
-        "//tensorflow/lite/micro:micro_framework",
-        "//tensorflow/lite/micro/examples/micro_speech/micro_features:micro_model_settings",
-        "//tensorflow/lite/micro/testing:micro_test",
-    ],
-)
-
-cc_test(
-    name = "audio_provider_mock_test",
-    srcs = [
-        "audio_provider_mock_test.cc",
-    ],
-    deps = [
-        ":audio_large_sample_test_data",
-        ":audio_provider_mock",
-        "//tensorflow/lite/c:common",
-        "//tensorflow/lite/micro:micro_framework",
-        "//tensorflow/lite/micro/examples/micro_speech/micro_features:micro_model_settings",
-        "//tensorflow/lite/micro/testing:micro_test",
-    ],
-)
-
-cc_library(
-    name = "feature_provider",
-    srcs = [
-        "feature_provider.cc",
-    ],
-    hdrs = [
-        "feature_provider.h",
-    ],
-    deps = [
-        ":audio_provider",
-        "//tensorflow/lite/c:common",
-        "//tensorflow/lite/micro:micro_log",
-        "//tensorflow/lite/micro/examples/micro_speech/micro_features:micro_features_generator",
-        "//tensorflow/lite/micro/examples/micro_speech/micro_features:micro_model_settings",
-    ],
-)
-
-cc_test(
-    name = "feature_provider_test",
-    srcs = [
-        "feature_provider_test.cc",
-    ],
-    deps = [
-        ":audio_provider",
-        ":feature_provider",
-        "//tensorflow/lite/c:common",
-        "//tensorflow/lite/micro:micro_framework",
-        "//tensorflow/lite/micro/examples/micro_speech/micro_features:micro_model_settings",
-        "//tensorflow/lite/micro/testing:micro_test",
-    ],
-)
-
-cc_library(
-    name = "feature_provider_mock",
-    srcs = [
-        "feature_provider.cc",
-    ],
-    hdrs = [
-        "feature_provider.h",
-    ],
-    deps = [
-        ":audio_provider_mock",
-        "//tensorflow/lite/c:common",
-        "//tensorflow/lite/micro:micro_log",
-        "//tensorflow/lite/micro/examples/micro_speech/micro_features:micro_features_generator",
-        "//tensorflow/lite/micro/examples/micro_speech/micro_features:micro_model_settings",
-    ],
-)
-
-cc_test(
-    name = "feature_provider_mock_test",
-    size = "small",
-    srcs = [
-        "feature_provider_mock_test.cc",
-    ],
-    tags = [
-        "noasan",  # TODO(b/179930607): Fix with asan.
-    ],
-    deps = [
-        ":feature_provider_mock",
-        "//tensorflow/lite/c:common",
-        "//tensorflow/lite/micro:micro_framework",
-        "//tensorflow/lite/micro/examples/micro_speech/micro_features:micro_features_test_data",
-        "//tensorflow/lite/micro/examples/micro_speech/micro_features:micro_model_settings",
-        "//tensorflow/lite/micro/testing:micro_test",
-    ],
-)
-
-cc_library(
-    name = "recognize_commands",
-    srcs = [
-        "recognize_commands.cc",
-    ],
-    hdrs = [
-        "recognize_commands.h",
-    ],
-    deps = [
-        "//tensorflow/lite/c:common",
-        "//tensorflow/lite/micro:micro_log",
-        "//tensorflow/lite/micro/examples/micro_speech/micro_features:micro_model_settings",
-    ],
-)
-
-cc_test(
-    name = "recognize_commands_test",
-    srcs = [
-        "recognize_commands_test.cc",
-    ],
-    tags = [
-        "no_oss",  # TODO(122853023): Resolve issues and re-enable.
-    ],
-    deps = [
-        ":recognize_commands",
-        "//tensorflow/lite/c:common",
-        "//tensorflow/lite/micro:micro_framework",
-        "//tensorflow/lite/micro:micro_log",
-        "//tensorflow/lite/micro:test_helpers",
-        "//tensorflow/lite/micro/testing:micro_test",
-    ],
-)
-
-cc_library(
-    name = "command_responder",
-    srcs = [
-        "command_responder.cc",
-    ],
-    hdrs = [
-        "command_responder.h",
-    ],
-    deps = [
-        "//tensorflow/lite/c:common",
-        "//tensorflow/lite/micro:micro_log",
-    ],
-)
-
-cc_test(
-    name = "command_responder_test",
-    srcs = [
-        "command_responder_test.cc",
-    ],
-    deps = [
-        ":command_responder",
-        "//tensorflow/lite/c:common",
-        "//tensorflow/lite/micro:micro_framework",
-        "//tensorflow/lite/micro/testing:micro_test",
-    ],
-)
-
-cc_binary(
-    name = "micro_speech",
-    srcs = [
-        "main.cc",
-        "main_functions.cc",
-        "main_functions.h",
-    ],
-    deps = [
-        ":audio_provider",
-        ":command_responder",
-        ":feature_provider",
+        ":audio_preprocessor_model_data",
+        ":audio_sample_test_data_1000ms",
+        ":audio_sample_test_data_30ms",
+        ":micro_model_settings",
         ":micro_speech_model_data",
-        ":recognize_commands",
         "//tensorflow/lite/micro:micro_framework",
         "//tensorflow/lite/micro:micro_log",
         "//tensorflow/lite/micro:op_resolvers",
-        "//tensorflow/lite/micro:system_setup",
-        "//tensorflow/lite/micro/examples/micro_speech/micro_features:micro_model_settings",
+        "//tensorflow/lite/micro/testing:micro_test",
         "//tensorflow/lite/schema:schema_fbs",
     ],
 )
 
-cc_binary(
-    name = "micro_speech_mock",
-    srcs = [
-        "main.cc",
-        "main_functions.cc",
-        "main_functions.h",
+filegroup(
+    name = "samples_30ms",
+    srcs = glob(["testdata/*_30ms.wav"]),
+)
+
+filegroup(
+    name = "samples_1000ms",
+    srcs = glob(["testdata/*_1000ms.wav"]),
+)
+
+filegroup(
+    name = "models_tflite",
+    srcs = glob(["models/*.tflite"]),
+)
+
+py_binary(
+    name = "audio_preprocessor",
+    srcs = ["audio_preprocessor.py"],
+    data = [
+        ":samples_30ms",
     ],
+    python_version = "PY3",
+    srcs_version = "PY3",
     deps = [
-        ":audio_provider_mock",
-        ":command_responder",
-        ":feature_provider",
-        ":micro_speech_model_data",
-        ":recognize_commands",
-        "//tensorflow/lite/micro:micro_framework",
-        "//tensorflow/lite/micro:op_resolvers",
-        "//tensorflow/lite/micro:system_setup",
-        "//tensorflow/lite/micro/examples/micro_speech/micro_features:micro_model_settings",
-        "//tensorflow/lite/schema:schema_fbs",
+        "@absl_py//absl:app",
+        "@absl_py//absl/flags",
+        requirement("numpy"),
+        requirement("tensorflow"),
+        "//python/tflite_micro:runtime",
+        "//python/tflite_micro/signal:ops",
+        "//python/tflite_micro/signal/utils:util",
     ],
 )
 
-sh_test(
-    name = "micro_speech_binary_mock_test",
-    srcs = ["micro_speech_binary_mock_test.sh"],
-    data = [":micro_speech_mock"],
+py_binary(
+    name = "evaluate",
+    srcs = ["evaluate.py"],
+    data = [
+        ":models_tflite",
+    ],
+    python_version = "PY3",
+    srcs_version = "PY3",
+    deps = [
+        ":audio_preprocessor",
+    ],
+)
+
+py_test(
+    name = "evaluate_test",
+    srcs = ["evaluate_test.py"],
+    data = [
+        ":models_tflite",
+        ":samples_1000ms",
+    ],
+    main = "evaluate_test.py",
+    python_version = "PY3",
+    tags = [
+        "noasan",
+        "nomsan",  # Python doesn't like these symbols
+        "noubsan",
+    ],
+    deps = [
+        ":evaluate",
+    ],
+)
+
+py_test(
+    name = "audio_preprocessor_test",
+    srcs = ["audio_preprocessor_test.py"],
+    data = [
+        ":models_tflite",
+        ":samples_30ms",
+    ],
+    main = "audio_preprocessor_test.py",
+    python_version = "PY3",
+    tags = [
+        "noasan",
+        "nomsan",  # Python doesn't like these symbols
+        "noubsan",
+    ],
+    deps = [
+        ":audio_preprocessor",
+    ],
 )
diff --git a/tensorflow/lite/micro/examples/micro_speech/Makefile.inc b/tensorflow/lite/micro/examples/micro_speech/Makefile.inc
index d2ceab5..a1b5b56 100644
--- a/tensorflow/lite/micro/examples/micro_speech/Makefile.inc
+++ b/tensorflow/lite/micro/examples/micro_speech/Makefile.inc
@@ -1,299 +1,64 @@
 
-INCLUDES += \
- -I$(MAKEFILE_DIR)/downloads/kissfft
-
-KISSFFT_LIB_SRCS :=
-
-KISSFFT_LIB_HDRS := \
-$(MAKEFILE_DIR)/downloads/kissfft/COPYING \
-$(MAKEFILE_DIR)/downloads/kissfft/kiss_fft.c \
-$(MAKEFILE_DIR)/downloads/kissfft/kiss_fft.h \
-$(MAKEFILE_DIR)/downloads/kissfft/_kiss_fft_guts.h \
-$(MAKEFILE_DIR)/downloads/kissfft/tools/kiss_fftr.c \
-$(MAKEFILE_DIR)/downloads/kissfft/tools/kiss_fftr.h
-
 MICRO_SPEECH_TEST_SRCS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_speech_test.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/no_micro_features_data.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/yes_micro_features_data.cc
+$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_speech_test.cc
 
 MICRO_SPEECH_TEST_HDRS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/no_micro_features_data.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/yes_micro_features_data.h \
+$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_model_settings.h \
+$(TENSORFLOW_ROOT)tensorflow/lite/micro/testing/micro_test.h
 
-SIMPLE_FEATURES_GENERATOR_TEST_SRCS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator_test.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/simple_features/no_power_spectrum_data.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/simple_features/yes_power_spectrum_data.cc
+MICRO_SPEECH_SRCS := $(MICRO_SPEECH_TEST_SRCS)
 
-SIMPLE_FEATURES_GENERATOR_TEST_HDRS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/simple_features/no_power_spectrum_data.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/simple_features/yes_power_spectrum_data.h
-
-MICRO_FEATURES_LIB_SRCS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/fft.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/fft_util.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/filterbank.c \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/filterbank_util.c \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/frontend.c \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/frontend_util.c \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/log_lut.c \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/log_scale.c \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/log_scale_util.c \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/noise_reduction.c \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.c \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.c \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.c \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/window.c \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/window_util.c \
-$(KISSFFT_LIB_SRCS)
-
-MICRO_FEATURES_LIB_HDRS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/bits.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/fft.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/fft_util.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/filterbank.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/frontend.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/frontend_util.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/log_lut.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/log_scale.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/window.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/experimental/microfrontend/lib/window_util.h \
-$(KISSFFT_LIB_HDRS)
-
-MICRO_FEATURES_GENERATOR_SRCS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.cc \
-$(MICRO_FEATURES_LIB_SRCS)
-
-MICRO_FEATURES_GENERATOR_HDRS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h \
-$(MICRO_FEATURES_LIB_HDRS)
-
-MICRO_FEATURES_GENERATOR_TEST_SRCS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator_test.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/no_feature_data_slice.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/yes_feature_data_slice.cc \
-$(MICRO_FEATURES_GENERATOR_SRCS)
-
-MICRO_FEATURES_GENERATOR_TEST_HDRS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/no_feature_data_slice.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/yes_feature_data_slice.h \
-$(MICRO_FEATURES_GENERATOR_HDRS)
-
-AUDIO_PROVIDER_TEST_SRCS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/audio_provider_test.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/audio_provider.cc
-
-AUDIO_PROVIDER_TEST_HDRS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/audio_provider.h \
-
-AUDIO_PROVIDER_MOCK_TEST_SRCS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/audio_provider_mock_test.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/audio_provider_mock.cc
-
-AUDIO_PROVIDER_MOCK_TEST_HDRS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/audio_provider.h \
-
-FEATURE_PROVIDER_TEST_SRCS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/feature_provider_test.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/audio_provider.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/feature_provider.cc \
-$(MICRO_FEATURES_GENERATOR_SRCS)
-
-FEATURE_PROVIDER_TEST_HDRS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/audio_provider.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/feature_provider.h \
-$(MICRO_FEATURES_GENERATOR_HDRS)
-
-FEATURE_PROVIDER_MOCK_TEST_SRCS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/feature_provider_test.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/audio_provider_mock.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/feature_provider.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/no_micro_features_data.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/yes_micro_features_data.cc \
-$(MICRO_FEATURES_GENERATOR_SRCS)
-
-FEATURE_PROVIDER_MOCK_TEST_HDRS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/audio_provider.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/feature_provider.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/no_micro_features_data.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/yes_micro_features_data.h \
-$(MICRO_FEATURES_GENERATOR_HDRS)
-
-RECOGNIZE_COMMANDS_TEST_SRCS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/recognize_commands_test.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/recognize_commands.cc
-
-RECOGNIZE_COMMANDS_TEST_HDRS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/recognize_commands.h
-
-COMMAND_RESPONDER_TEST_SRCS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/command_responder_test.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/command_responder.cc
-
-COMMAND_RESPONDER_TEST_HDRS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/command_responder.h
-
-MICRO_SPEECH_SRCS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/main.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/main_functions.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/audio_provider.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/feature_provider.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/no_micro_features_data.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/yes_micro_features_data.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/recognize_commands.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/command_responder.cc \
-$(MICRO_FEATURES_GENERATOR_SRCS)
-
-MICRO_SPEECH_HDRS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/audio_provider.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/feature_provider.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/no_micro_features_data.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/yes_micro_features_data.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/recognize_commands.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/command_responder.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/main_functions.h \
-$(MICRO_FEATURES_GENERATOR_HDRS)
-
-MICRO_SPEECH_MOCK_SRCS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/main.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/main_functions.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/audio_provider_mock.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/feature_provider.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/no_micro_features_data.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/yes_micro_features_data.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/recognize_commands.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/command_responder.cc \
-$(MICRO_FEATURES_GENERATOR_SRCS)
-
-MICRO_SPEECH_MOCK_HDRS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/audio_provider.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/feature_provider.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/no_micro_features_data.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_features/yes_micro_features_data.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/recognize_commands.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/command_responder.h \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/main_functions.h \
-$(MICRO_FEATURES_GENERATOR_HDRS)
+MICRO_SPEECH_HDRS := $(MICRO_SPEECH_TEST_HDRS)
 
 MICRO_SPEECH_GENERATOR_INPUTS := \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_speech.tflite \
+$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/models/micro_speech_quantized.tflite \
+$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/models/audio_preprocessor_int8.tflite \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/testdata/no_1000ms.wav \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/testdata/no_30ms.wav \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/testdata/yes_1000ms.wav \
+$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/testdata/silence_1000ms.wav \
+$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/testdata/noise_1000ms.wav \
+$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/testdata/no_30ms.wav \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/testdata/yes_30ms.wav
 
 MICRO_SPEECH_GENERATED_SRCS := \
-$(GENERATED_SRCS_DIR)$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_speech_model_data.cc \
+$(GENERATED_SRCS_DIR)$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/models/micro_speech_quantized_model_data.cc \
+$(GENERATED_SRCS_DIR)$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/models/audio_preprocessor_int8_model_data.cc \
 $(GENERATED_SRCS_DIR)$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/testdata/no_1000ms_audio_data.cc \
-$(GENERATED_SRCS_DIR)$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/testdata/no_30ms_audio_data.cc \
 $(GENERATED_SRCS_DIR)$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/testdata/yes_1000ms_audio_data.cc \
+$(GENERATED_SRCS_DIR)$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/testdata/silence_1000ms_audio_data.cc \
+$(GENERATED_SRCS_DIR)$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/testdata/noise_1000ms_audio_data.cc \
+$(GENERATED_SRCS_DIR)$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/testdata/no_30ms_audio_data.cc \
 $(GENERATED_SRCS_DIR)$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/testdata/yes_30ms_audio_data.cc
 
 MICRO_SPEECH_GENERATED_HDRS := \
-$(GENERATED_SRCS_DIR)$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/micro_speech_model_data.h \
+$(GENERATED_SRCS_DIR)$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/models/micro_speech_quantized_model_data.h \
+$(GENERATED_SRCS_DIR)$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/models/audio_preprocessor_int8_model_data.h \
 $(GENERATED_SRCS_DIR)$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/testdata/no_1000ms_audio_data.h \
-$(GENERATED_SRCS_DIR)$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/testdata/no_30ms_audio_data.h \
 $(GENERATED_SRCS_DIR)$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/testdata/yes_1000ms_audio_data.h \
+$(GENERATED_SRCS_DIR)$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/testdata/silence_1000ms_audio_data.h \
+$(GENERATED_SRCS_DIR)$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/testdata/noise_1000ms_audio_data.h \
+$(GENERATED_SRCS_DIR)$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/testdata/no_30ms_audio_data.h \
 $(GENERATED_SRCS_DIR)$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/testdata/yes_30ms_audio_data.h
 
-#Find any platform - specific rules for this example.
-include $(wildcard $(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/*/Makefile.inc)
-
 # TODO(b/161489252): Disabling warnings for this example until we have a better
 # way to build third_party code with a reduced list of CFLAGS.
+# Xtensa hifi5/hifi4 xa_nnlib
 CCFLAGS := $(filter-out $(CC_WARNINGS),$(CCFLAGS))
 
-# Test the code for feature generation.
-ifneq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), hifi5 hifi3z))
-  $(eval $(call microlite_test,micro_features_generator_test,\
-  $(MICRO_FEATURES_GENERATOR_TEST_SRCS),$(MICRO_FEATURES_GENERATOR_TEST_HDRS),$(MICRO_SPEECH_GENERATOR_INPUTS)))
-endif
-
 # Tests loading and running a speech model.
 $(eval $(call microlite_test,micro_speech_test,\
 $(MICRO_SPEECH_TEST_SRCS),$(MICRO_SPEECH_TEST_HDRS),$(MICRO_SPEECH_GENERATOR_INPUTS)))
 
-# TODO(b/268568089): This test is taking very long time to finish; causing the
-# CI to run for a long time to finish.
-ifneq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), hifimini hifi5 hifi3z))
-  # Test the code for feature generation.
-  $(eval $(call microlite_test,simple_features_generator_test,\
-  $(SIMPLE_FEATURES_GENERATOR_TEST_SRCS),$(SIMPLE_FEATURES_GENERATOR_TEST_HDRS),$(MICRO_SPEECH_GENERATOR_INPUTS)))
-endif
-
-# Tests the audio provider module.
-$(eval $(call microlite_test,audio_provider_test,\
-$(AUDIO_PROVIDER_TEST_SRCS),$(AUDIO_PROVIDER_TEST_HDRS)))
-
-# Tests the audio provider mock module.
-$(eval $(call microlite_test,audio_provider_mock_test,\
-$(AUDIO_PROVIDER_MOCK_TEST_SRCS),$(AUDIO_PROVIDER_MOCK_TEST_HDRS),$(MICRO_SPEECH_GENERATOR_INPUTS)))
-
-# Tests the feature provider module.
-ifneq ($(TARGET_ARCH), hifi3z)
-  $(eval $(call microlite_test,feature_provider_test,\
-  $(FEATURE_PROVIDER_TEST_SRCS),$(FEATURE_PROVIDER_TEST_HDRS)))
-endif
-
-# Tests the feature provider module using the mock audio provider.
-ifneq ($(TARGET_ARCH), hifi3z)
-  $(eval $(call microlite_test,feature_provider_mock_test,\
-  $(FEATURE_PROVIDER_MOCK_TEST_SRCS),$(FEATURE_PROVIDER_MOCK_TEST_HDRS),$(MICRO_SPEECH_GENERATOR_INPUTS)))
-endif
-
-# Tests the command recognizer module.
-$(eval $(call microlite_test,recognize_commands_test,\
-$(RECOGNIZE_COMMANDS_TEST_SRCS),$(RECOGNIZE_COMMANDS_TEST_HDRS)))
-
-# Tests responding to a command.
-$(eval $(call microlite_test,command_responder_test,\
-$(COMMAND_RESPONDER_TEST_SRCS),$(COMMAND_RESPONDER_TEST_HDRS)))
-
-# Builds a standalone speech command recognizer binary.
+# Builds a standalone binary.
 $(eval $(call microlite_test,micro_speech,\
 $(MICRO_SPEECH_SRCS),$(MICRO_SPEECH_HDRS),$(MICRO_SPEECH_GENERATOR_INPUTS)))
 
-# Builds a standalone speech command recognizer binary using fake audio input.
-$(eval $(call microlite_test,micro_speech_mock,\
-$(MICRO_SPEECH_MOCK_SRCS),$(MICRO_SPEECH_MOCK_HDRS),$(MICRO_SPEECH_GENERATOR_INPUTS)))
-
 # Add sources and headers generated from $(MICRO_SPEECH_GENERATOR_INPUTS).
 MICRO_SPEECH_SRCS += $(MICRO_SPEECH_GENERATED_SRCS)
 MICRO_SPEECH_HDRS += $(MICRO_SPEECH_GENERATED_HDRS)
 
-MICRO_SPEECH_MOCK_SRCS += $(MICRO_SPEECH_GENERATED_SRCS)
-MICRO_SPEECH_MOCK_HDRS += $(MICRO_SPEECH_GENERATED_HDRS)
-
 list_micro_speech_example_sources:
 	@echo $(MICRO_SPEECH_SRCS)
 
 list_micro_speech_example_headers:
 	@echo $(MICRO_SPEECH_HDRS)
-
-list_micro_speech_mock_example_sources:
-	@echo $(MICRO_SPEECH_MOCK_SRCS)
-
-list_micro_speech_mock_example_headers:
-	@echo $(MICRO_SPEECH_MOCK_HDRS)
diff --git a/tensorflow/lite/micro/examples/micro_speech/README.md b/tensorflow/lite/micro/examples/micro_speech/README.md
index 8a4aa77..84aaec1 100644
--- a/tensorflow/lite/micro/examples/micro_speech/README.md
+++ b/tensorflow/lite/micro/examples/micro_speech/README.md
@@ -2,321 +2,275 @@
 
 # Micro Speech Example
 
-This example shows how to run a 20 kB model that can recognize 2 keywords,
-"yes" and "no", from speech data.
+This example shows how to run inference using TensorFlow Lite Micro (TFLM)
+on two models for wake-word recognition.
+The first model is an audio preprocessor that generates spectrogram data
+from raw audio samples.
+The second is the Micro Speech model, a less than 20 kB model
+that can recognize 2 keywords, "yes" and "no", from speech data.
+The Micro Speech model takes the spectrogram data as input and produces
+category probabilities.
 
-The application listens to its surroundings with a microphone and indicates
-when it has detected a word by lighting an LED or displaying data on a
-screen, depending on the capabilities of the device.
-
-![Animation on Arduino](images/animation_on_arduino.gif)
-
-The code has a small footprint (for example, around 22 kilobytes on a Cortex
-M3) and only uses about 10 kilobytes of RAM for working memory, so it's able to
-run on systems like an STM32F103 with only 20 kilobytes of total SRAM and 64
-kilobytes of Flash.
 
 ## Table of contents
 
--   [Deploy to STM32F746](#deploy-to-STM32F746)
--   [Deploy to NXP FRDM K66F](#deploy-to-nxp-frdm-k66f)
--   [Deploy to CEVA BX1/SP500](#deploy-to-ceva-bx1)
--   [Run on macOS](#run-on-macos)
--   [Run the tests on a development machine](#run-the-tests-on-a-development-machine)
+-   [Audio Preprocessor](#audio-preprocessor)
+-   [Micro Speech Model Architecture](#micro-speech-model-architecture)
+-   [Run the C++ tests on a development machine](#run-the-c-tests-on-a-development-machine)
+-   [Run the evaluate.py script on a development machine](#run-the-evaluatepy-script-on-a-development-machine)
+-   [Run the evaluate_test.py script on a development machine](#run-the-evaluate_testpy-script-on-a-development-machine)
+-   [Converting models or audio samples to C++](#converting-models-or-audio-samples-to-c)
 -   [Train your own model](#train-your-own-model)
 
-## Deploy to STM32F746
+## Audio Preprocessor
 
-The following instructions will help you build and deploy the example to the
-[STM32F7 discovery kit](https://os.mbed.com/platforms/ST-Discovery-F746NG/)
-using [ARM Mbed](https://github.com/ARMmbed/mbed-cli).
+The Audio Preprocessor model converts raw audio samples into a spectrographic feature.
+Audio samples are input to the model in windowed frames, each window overlapping
+the previous.  When sufficient features have been accumulated, those features can
+be provided as input to the Micro Speech model.
 
-Before we begin, you'll need the following:
+This model provides a replication of the legacy preprocessing used during training
+of the Micro Speech model.  For additional information on audio preprocessing during training,
+please refer to the [training README](train/README.md#preprocessing-speech-input) documentation.
 
-- STM32F7 discovery kit board
-- Mini-USB cable
-- ARM Mbed CLI ([installation instructions](https://os.mbed.com/docs/mbed-os/v6.9/quick-start/build-with-mbed-cli.html). Check it out for MacOS Catalina - [mbed-cli is broken on MacOS Catalina #930](https://github.com/ARMmbed/mbed-cli/issues/930#issuecomment-660550734))
-- Python 3 and pip3
+Audio Preprocessing models providing `int8` and `float32` output, ready for use
+with the Micro Speech model, are provided in the [models](models/) directory.
+These models expect the audio input to conform to:
+* 30ms window frame
+* 20ms window stride
+* 16KHz sample rate
+* 16-bit signed PCM data
+* single channel (mono)
 
-Since Mbed requires a special folder structure for projects, we'll first run a
-command to generate a subfolder containing the required source files in this
-structure:
+### Model Architecture
 
-```
-make -f tensorflow/lite/micro/tools/make/Makefile TARGET=disco_f746ng OPTIMIZED_KERNEL_DIR=cmsis_nn generate_micro_speech_mbed_project
-```
+This model consists primarily of [Signal Library](https://github.com/tensorflow/tflite-micro/blob/main/python/tflite_micro/signal) operations.
+The library is a set of Python methods, and bindings to `C++` library code.
+To allow for use with the `TFLM MicroInterpreter`, a set of [Signal Library kernels](https://github.com/tensorflow/tflite-micro/blob/main/signal/micro/kernels)
+is also provided.
 
-Running the make command will result in the creation of a new folder:
+The [audio_preprocessor.py](audio_preprocessor.py) script provides a complete example
+of how to use the `Signal Library` within your own Python application.  This script
+has support for TensorFlow eager-execution mode, graph-execution mode, and
+`TFLM MicroInterpreter` inference operations.
 
-```
-gen/disco_f746ng_cortex-m4_default/prj/micro_speech/mbed
-```
+[<img src="images/audio_preprocessor_int8.png" width="900" alt="model architecture"/>](images/audio_preprocessor_int8.png)
 
-This folder contains all of the example's dependencies structured in the correct
-way for Mbed to be able to build it.
+*This image was derived from visualizing the 'models/audio_preprocessor_int8.tflite' file in
+[Netron](https://github.com/lutzroeder/netron)*
 
-Change into the directory and run the following commands.
+Each of the steps performed by the model are outlined as follows:
+1) Audio frame input with shape `(1, 480)`
+1) Apply `Hann Window` smoothing using `SignalWindow`
+1) Reshape tensor to match the input of `SignalFftAutoScale`
+1) Rescale tensor data using `SignalFftAutoScale` and calculate one of the input
+parameters to `SignalFilterBankSquareRoot`
+1) Compute FFT using `SignalRfft`
+1) Compute power spectrum using `SignalEnergy`.  The tensor data is only updated
+for elements between `[start_index, end_index)`.
+1) The `Cast`, `StridedSlice`, and `Concatenation` operations are used to fill
+the tensor data with zeros, for elements outside of `[start_index, end_index)`
+1) Compress the power spectrum tensor data into just 40 channels (frequency bands)
+using `SignalFilterBank`
+1) Scale down the tensor data using `SignalFilterBankSquareRoot`
+1) Apply noise reduction using `SignalFilterBankSpectralSubtraction`
+1) Apply gain control using `SignalPCAN`
+1) Scale down the tensor data using `SignalFilterBankLog`
+1) The remaining operations perform additional legacy down-scaling and convert
+the tensor data to `int8`
+1) Model output has shape `(40,)`
 
-First, tell Mbed that the current directory is the root of an Mbed project:
+### The `FeatureParams` Python Class
 
-```
-mbed config root .
-```
+The `FeatureParams` class is located within the [audio_preprocessor.py](audio_preprocessor.py#L260)
+script.  This class allows for custom configuration of the `AudioPreprocessor` class.
+Parameters such as sample rate, window size, window stride, number of output channels,
+and many more can be configured.  The parameters to be changed must be set during
+class instantiation, and are frozen thereafter.  The defaults for `FeatureParams`
+match those of the legacy audio preprocessing used during Micro Speech model training.
 
-Next, tell Mbed to download the dependencies and prepare to build:
+### The `AudioPreprocessor` Python Class
 
-```
-mbed deploy
-```
+The `AudioPreprocessor` class in the [audio_preprocessor.py](audio_preprocessor.py#L338)
+script provides easy to use convenience methods for creating
+and using an audio preprocessing model.  This class is configured through use of
+a `FeatureParams` object, allowing some flexibility in how the audio preprocessing
+model works.
 
-Older versions of Mbed will build the project using C++98. However, TensorFlow Lite
-requires C++11. If needed, run the following Python snippet to modify the Mbed
-configuration files so that it uses C++11:
+A short summary of the available methods and properties:
+* `load_samples`: load audio samples from a `WAV` format file and prepare
+the samples for use by other `AudioPreprocessor` methods
+* `samples`: tensor containing previously loaded audio samples
+* `params`: the `FeatureParams` object the class was instantiated with
+* `generate_feature`: generate a single feature using TensorFlow eager-execution
+* `generate_feature_using_graph`: generate a single feature using TensorFlow graph-execution
+* `generate_feature_using_tflm`: generate a single feature using the `TFLM MicroInterpreter`
+* `reset_tflm`: reset the internal state of the `TFLM MicroInterpreter` and the
+`Signal Library` operations
+* `generate_tflite_file`: create a `.tflite` format file for the preprocessor model
 
-```
-python -c 'import fileinput, glob;
-for filename in glob.glob("mbed-os/tools/profiles/*.json"):
-  for line in fileinput.input(filename, inplace=True):
-    print(line.replace("\"-std=gnu++98\"","\"-std=c++11\", \"-fpermissive\""))'
-```
+### Run the audio_preprocessor.py script on a development machine
 
-Note: Mbed has a dependency to an old version of arm_math.h and cmsis_gcc.h (adapted from the general [CMSIS-NN MBED example](https://github.com/tensorflow/tflite-micro/blob/main/tensorflow/lite/micro/kernels/cmsis_nn#example-2---mbed)). Therefore you need to copy the newer version as follows:
+The [audio_preprocessor.py](audio_preprocessor.py#L532) script generates a `.tflite`
+file for the preprocessing model, ready for use with the Micro Speech model.
+
+To generate a `.tflite` model file with `int8` output:
 ```bash
-cp tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/DSP/Include/\
-arm_math.h mbed-os/cmsis/TARGET_CORTEX_M/arm_math.h
-cp tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/Core/Include/\
-cmsis_gcc.h mbed-os/cmsis/TARGET_CORTEX_M/cmsis_gcc.h
+bazel build tensorflow/lite/micro/examples/micro_speech:audio_preprocessor
+bazel-bin/tensorflow/lite/micro/examples/micro_speech/audio_preprocessor --output_type=int8
 ```
 
-Finally, run the following command to compile:
-
-```
-mbed compile -m DISCO_F746NG -t GCC_ARM
+To generate a `.tflite` model file with `float32` output:
+```bash
+bazel build tensorflow/lite/micro/examples/micro_speech:audio_preprocessor
+bazel-bin/tensorflow/lite/micro/examples/micro_speech/audio_preprocessor --output_type=float32
 ```
 
-This should result in a binary at the following path:
+### Run the audio_preprocessor_test.py script on a development machine
 
-```
-./BUILD/DISCO_F746NG/GCC_ARM/mbed.bin
+The [audio_preprocessor_test.py](audio_preprocessor_test.py) script performs
+several tests to ensure correct inference operations occur across all execution modes.
+The tests are:
+* cross-check inference results between eager, graph, and `TFLM MicroInterpreter`
+execution modes
+* check the `yes` and `no` 30ms samples in the [testdata](testdata/) directory for
+correct generation of the feature tensor
+* compare the preprocessor `int8` model against the same model in the [models](models/) directory
+* compare the preprocessor `float32` model against the same model in the [models](models/) directory
+
+```bash
+bazel build tensorflow/lite/micro/examples/micro_speech:audio_preprocessor_test
+bazel-bin/tensorflow/lite/micro/examples/micro_speech/audio_preprocessor_test
 ```
 
-To deploy, plug in your STM board and copy the file to it. On macOS, you can do
-this with the following command:
+## Micro Speech Model Architecture
 
-```
-cp ./BUILD/DISCO_F746NG/GCC_ARM/mbed.bin /Volumes/DIS_F746NG/
+This is a simple model comprised of a Convolutional 2D layer, a Fully Connected
+Layer or a MatMul Layer (output: logits) and a Softmax layer
+(output: probabilities) as shown below. Refer to the [`tiny_conv`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/speech_commands/models.py#L673)
+model architecture. The output probabilities are in four categories:
+`silence`, `unknown`, `yes`, `no`.
+
+The input to the model is 49 spectrographic features, each feature
+consisting of 40 channels of data.  The features are generated by the
+Audio Preprocessor model.  For more information, please see the
+[training README](train/README.md#preprocessing-speech-input) documentation.
+
+[<img src="images/micro_speech_quantized.png" width="900" alt="model architecture"/>](images/micro_speech_quantized.png)
+
+*This image was derived from visualizing the 'models/micro_speech_quantized.tflite' file in
+[Netron](https://github.com/lutzroeder/netron)*
+
+## Run the C++ tests on a development machine
+
+To compile and test this example on a desktop Linux or macOS machine, download the
+[TFLM source code](https://github.com/tensorflow/tflite-micro). Then switch
+into the source directory from a terminal using the `cd` command.
+
+Compile and run a native binary using Bazel:
+```bash
+bazel run tensorflow/lite/micro/examples/micro_speech:micro_speech_test
 ```
 
-Copying the file will initiate the flashing process.
-
-The inference results are logged by the board while the program is running.
-To view it, establish a serial connection to the board
-using a baud rate of `9600`. On OSX and Linux, the following command should
-work, replacing `/dev/tty.devicename` with the name of your device as it appears
-in `/dev`:
-
-```
-screen /dev/tty.devicename 9600
-```
-
-You will see a line output for every word that is detected:
-
-```
-Heard yes (201) @4056ms
-Heard no (205) @6448ms
-Heard unknown (201) @13696ms
-Heard yes (205) @15000ms
-```
-
-The number after each detected word is its score. By default, the program only
-considers matches as valid if their score is over 200, so all of the scores you
-see will be at least 200.
-
-To stop viewing the debug output with `screen`, hit `Ctrl+A`, immediately
-followed by the `K` key, then hit the `Y` key.
-
-## Deploy to NXP FRDM K66F
-
-The following instructions will help you build and deploy the example to the
-[NXP FRDM K66F](https://www.nxp.com/design/development-boards/freedom-development-boards/mcu-boards/freedom-development-platform-for-kinetis-k66-k65-and-k26-mcus:FRDM-K66F)
-using [ARM Mbed](https://github.com/ARMmbed/mbed-cli).
-
-1.  Download
-    [the TensorFlow source code](https://github.com/tensorflow/tensorflow).
-2.  Follow instructions from
-    [mbed website](https://os.mbed.com/docs/mbed-os/v5.13/tools/installation-and-setup.html)
-    to setup and install mbed CLI.
-3.  Compile TensorFlow with the following command to generate mbed project:
-
-    ```
-    make -f tensorflow/lite/micro/tools/make/Makefile TARGET=mbed TAGS="nxp_k66f" generate_micro_speech_mbed_project
-    ```
-
-4.  Change into the following directory that has been generated:
-    `gen/mbed_cortex-m4/prj/micro_speech/mbed`
-
-5.  Create an Mbed project using the generated files, run ensuring your
-    environment is using Python 2.7: `mbed config root .`
-
-6.  Next, tell Mbed to download the dependencies and prepare to build: `mbed
-    deploy`
-
-7.  Finally, we can run the following command to compile the code: `mbed compile
-    -m K66F -t GCC_ARM`
-
-8.  For some Mbed compilers (such as GCC), you may get compile error in
-    mbed_rtc_time.cpp. Go to `mbed-os/platform/mbed_rtc_time.h` and comment line
-    32 and line 37:
-
-    ```
-    //#if !defined(__GNUC__) || defined(__CC_ARM) || defined(__clang__)
-    struct timeval {
-    time_t tv_sec;
-    int32_t tv_usec;
-    };
-    //#endif
-    ```
-
-9.  If your system does not recognize the board with the `mbed detect` command.
-    Follow the instructions for setting up
-    [DAPLink](https://armmbed.github.io/DAPLink/?board=FRDM-K66F) for the
-    [K66F](https://os.mbed.com/platforms/FRDM-K66F/).
-
-10. Connect the USB cable to the micro USB port. When the Ethernet port is
-    facing towards you, the micro USB port is left of the Ethernet port.
-
-11. To compile and flash in a single step, add the `--flash` option:
-
-    ```
-    mbed compile -m K66F -t GCC_ARM --flash
-    ```
-
-12. Disconnect USB cable from the device to power down the device and connect
-    back the power cable to start running the model.
-
-13. Connect to serial port with baud rate of 9600 and correct serial device to
-    view the output from the MCU. In linux, you can run the following screen
-    command if the serial device is `/dev/ttyACM0`:
-
-    ```
-    sudo screen /dev/ttyACM0 9600
-    ```
-
-14. Saying "Yes" will print "Yes" and "No" will print "No" on the serial port.
-
-15. A loopback path from microphone to headset jack is enabled. Headset jack is
-    in black color. If there is no output on the serial port, you can connect
-    headphone to headphone port to check if audio loopback path is working.
-
-## Deploy to CEVA-BX1
-
-The following instructions will help you build and deploy the sample to the
-[CEVA-BX1](https://www.ceva-dsp.com/product/ceva-bx1-sound/) or [CEVA-SP500](https://www.ceva-dsp.com/product/ceva-senspro/)
-
-1.  Contact CEVA at [sales@ceva-dsp.com](mailto:sales@ceva-dsp.com)
-2.  For BX1:
-2.1. Download and install CEVA-BX Toolbox v18.0.2
-2.2.  Set the TARGET_TOOLCHAIN_ROOT variable in
-    /tensorflow/lite/micro/tools/make/templates/ceva_bx1/ceva_app_makefile.tpl
-    To your installation location. For example: TARGET_TOOLCHAIN_ROOT :=
-    /home/myuser/work/CEVA-ToolBox/V18/BX
-2.3.  Generate the Makefile for the project: /tensorflow$ make -f
-    tensorflow/lite/micro/tools/make/Makefile TARGET=ceva TARGET_ARCH=CEVA_BX1
-    generate_micro_speech_make_project
-3. For SensPro (SP500):
-3.1. Download and install CEVA-SP Toolbox v20
-3.2. Set the TARGET_TOOLCHAIN_ROOT variable in
-    /tensorflow/lite/micro/tools/make/templates/ceva_SP500/ceva_app_makefile.tpl
-    To your installation location. For example: TARGET_TOOLCHAIN_ROOT :=
-    /home/myuser/work/CEVA-ToolBox/V20/SensPro
-3.3. Generate the Makefile for the project: /tensorflow$ make -f
-    tensorflow/lite/micro/tools/make/Makefile TARGET=ceva TARGET_ARCH=CEVA_SP500
-    generate_micro_speech_make_project 	
-5.  Build the project:
-    /gen/ceva_bx1/prj/micro_speech/make$ make
-6.  This should build the project and create a file called micro_speech.elf.
-7.  The supplied configuration reads input from a files and expects a file
-    called input.wav (easily changed in audio_provider.cc) to be placed in the
-    same directory of the .elf file
-8.  We used Google's speech command dataset: V0.0.2:
-    http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz V0.0.1:
-    http://download.tensorflow.org/data/speech_commands_v0.01.tar.gz
-9.  Follow CEVA Toolbox instructions for creating a debug target and running the
-    project.
-10. Output should look like: Heard silence (208) @352ms Heard no (201) @1696ms
-    Heard yes (203) @3904ms
-
-## Run on macOS
-
-The example contains an audio provider compatible with macOS. If you have access
-to a Mac, you can run the example on your development machine.
-
-First, use the following command to build it:
-
-```
-make -f tensorflow/lite/micro/tools/make/Makefile micro_speech
-```
-
-Once the build completes, you can run the example with the following command:
-
-```
-gen/osx_x86_64/bin/micro_speech
-```
-
-You might see a pop-up asking for microphone access. If so, grant it, and the
-program will start.
-
-Try saying "yes" and "no". You should see output that looks like the following:
-
-```
-Heard yes (201) @4056ms
-Heard no (205) @6448ms
-Heard unknown (201) @13696ms
-Heard yes (205) @15000ms
-Heard yes (205) @16856ms
-Heard unknown (204) @18704ms
-Heard no (206) @21000ms
-```
-
-The number after each detected word is its score. By default, the recognize
-commands component only considers matches as valid if their score is over 200,
-so all of the scores you see will be at least 200.
-
-The number after the score is the number of milliseconds since the program was
-started.
-
-If you don't see any output, make sure your Mac's internal microphone is
-selected in the Mac's *Sound* menu, and that its input volume is turned up high
-enough.
-
-## Run the tests on a development machine
-
-To compile and test this example on a desktop Linux or macOS machine, download
-[the TensorFlow source code](https://github.com/tensorflow/tensorflow), `cd`
-into the source directory from a terminal, and then run the following command:
-
-```
+For a native binary using `make`, run the following command:
+```bash
 make -f tensorflow/lite/micro/tools/make/Makefile test_micro_speech_test
 ```
 
+For an Arm Cortex-M0 binary running in the QEMU emulator:
+```bash
+make -f tensorflow/lite/micro/tools/make/Makefile TARGET=cortex_m_qemu TARGET_ARCH=cortex-m0 OPTIMIZED_KERNEL_DIR=cmsis_nn BUILD_TYPE=default test_micro_speech_test
+```
+
 This will take a few minutes, and downloads frameworks the code uses like
 [CMSIS](https://developer.arm.com/embedded/cmsis) and
 [flatbuffers](https://google.github.io/flatbuffers/). Once that process has
 finished, you should see a series of files get compiled, followed by some
 logging output from a test, which should conclude with `~~~ALL TESTS PASSED~~~`.
 
-If you see this, it means that a small program has been built and run that loads
-the trained TensorFlow model, runs some example inputs through it, and got the
+If you see this, it means that a small program has been built and executed that loads
+the trained TensorFlow Lite model, runs some example inputs through it, and got the
 expected outputs.
 
-To understand how TensorFlow Lite does this, you can look at the source in
-[micro_speech_test.cc](micro_speech_test.cc).
-It's a fairly small amount of code that creates an interpreter, gets a handle to
-a model that's been compiled into the program, and then invokes the interpreter
-with the model and sample inputs.
+To understand how TFLM does this, you can look at the source in the
+[micro_speech_test.cc](micro_speech_test.cc) file.
+It's a fairly small amount of code that executes the following steps:
+1) Create a `TFLM MicroInterpreter` with a handle to the Audio Preprocessor model
+that has been compiled into the program
+1) Repeatedly execute inference operations using `MicroInterpreter::invoke`,
+with audio samples as input, and spectrogram features as output
+1) Create a new `TFLM MicroInterpreter` with a handle to the Micro Speech model
+that has been compiled into the program
+1) Execute a single inference operation using `MicroInterpreter::invoke`,
+with the spectrogram features as input, and category probabilities as output
+1) Check the largest category probability for a match with the speech sample label.
+
+## Run the evaluate.py script on a development machine
+The [evaluate.py](evaluate.py#L166) script predicts the category of a single audio sample
+given by the `sample_path` argument.  The output consists of the predictions for
+the accumulated spectrogram features across (at most) 49 audio sample window frames.
+
+```bash
+bazel build tensorflow/lite/micro/examples/micro_speech:evaluate
+bazel-bin/tensorflow/lite/micro/examples/micro_speech/evaluate --sample_path=tensorflow/lite/micro/examples/micro_speech/testdata/no_1000ms.wav
+```
+
+The output looks like this:
+```bash
+Frame #0: [0.0000, 0.0273, 0.0312, 0.9414]
+Frame #1: [0.0000, 0.0273, 0.0312, 0.9414]
+Frame #2: [0.0000, 0.0273, 0.0312, 0.9414]
+Frame #3: [0.0000, 0.0273, 0.0273, 0.9414]
+Frame #4: [0.0000, 0.0273, 0.0273, 0.9414]
+Frame #5: [0.0000, 0.0273, 0.0273, 0.9414]
+Frame #6: [0.0000, 0.0273, 0.0273, 0.9453]
+Frame #7: [0.0000, 0.0273, 0.0273, 0.9453]
+Frame #8: [0.0000, 0.0273, 0.0273, 0.9453]
+
+...
+
+Frame #40: [0.0000, 0.0312, 0.0000, 0.9648]
+Frame #41: [0.0000, 0.0273, 0.0000, 0.9727]
+Frame #42: [0.0000, 0.0312, 0.0000, 0.9688]
+Frame #43: [0.0000, 0.0273, 0.0000, 0.9727]
+Frame #44: [0.0000, 0.0273, 0.0000, 0.9727]
+Frame #45: [0.0000, 0.0352, 0.0000, 0.9648]
+Frame #46: [0.0000, 0.0391, 0.0000, 0.9609]
+Frame #47: [0.0000, 0.0469, 0.0000, 0.9531]
+Frame #48: [0.0000, 0.0547, 0.0000, 0.9453]
+Model predicts the audio sample as <no> with probability 0.95
+```
+
+## Run the evaluate_test.py script on a development machine
+The [evaluate_test.py](evaluate_test.py) script verifies the combination of the
+Audio Preprocessor model and the Micro Speech model to generate correct inference results.
+Four audio samples from the [testdata](testdata/) directory are used as input to
+the Audio Preprocessor model.
+The Audio Preprocessor model is tested with both `int8` and `float32` outputs.
+The results of the audio preprocessing are then used to check predictions by the
+Micro Speech model.
+
+```bash
+bazel build tensorflow/lite/micro/examples/micro_speech:evaluate_test
+bazel-bin/tensorflow/lite/micro/examples/micro_speech/evaluate_test
+```
+
+## Converting models or audio samples to C++
+A tool is available to convert your custom model or audio samples into `C++` data
+structures that you can then use in your own wake-word application.
+Keep in mind that audio samples for use with Audio Preprocessor and Micro Speech models
+must be 1000ms in length, 16-bit PCM samples, and single channel (mono).
+The tool can be found here: [generate_cc_arrays.py](../../tools/generate_cc_arrays.py)
+
+The following commands show how to use the tool:
+```bash
+bazel build tensorflow/lite/micro/tools:generate_cc_arrays
+bazel-bin/tensorflow/lite/micro/tools/generate_cc_arrays /tmp/data.cc path_to_custom_sample.wav
+bazel-bin/tensorflow/lite/micro/tools/generate_cc_arrays /tmp/header.h path_to_custom_sample.wav
+```
 
 ## Train your own model
 
 So far you have used an existing trained model to run inference on
 microcontrollers. If you wish to train your own model, follow the instructions
-given in the [train/](train/) directory.
+given in the [train](train/README.md) directory.
diff --git a/tensorflow/lite/micro/examples/micro_speech/audio_preprocessor.py b/tensorflow/lite/micro/examples/micro_speech/audio_preprocessor.py
new file mode 100644
index 0000000..0379ec6
--- /dev/null
+++ b/tensorflow/lite/micro/examples/micro_speech/audio_preprocessor.py
@@ -0,0 +1,551 @@
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Audio Sample Preprocessor
+
+When this module is run, feature generation models are created in the .tflite
+format.
+
+Run:
+bazel build tensorflow/lite/micro/examples/micro_speech:audio_preprocessor
+bazel-bin/tensorflow/lite/micro/examples/micro_speech/audio_preprocessor
+"""
+
+from __future__ import annotations
+from pathlib import Path
+from dataclasses import dataclass
+import tempfile
+
+from absl import app
+from absl import flags
+
+import tensorflow as tf
+from tensorflow.python.platform import resource_loader
+from tflite_micro.python.tflite_micro.signal.ops import window_op
+from tflite_micro.python.tflite_micro.signal.ops import fft_ops
+from tflite_micro.python.tflite_micro.signal.ops import energy_op
+from tflite_micro.python.tflite_micro.signal.ops import filter_bank_ops
+from tflite_micro.python.tflite_micro.signal.ops import pcan_op
+from tflite_micro.python.tflite_micro import runtime
+
+_ENABLE_DEBUG = flags.DEFINE_enum(
+    'debug_mode',
+    'off',
+    ['off', 'all'],
+    'Enable debug output',
+)
+
+_FILE_TO_TEST = flags.DEFINE_enum('file_to_test', 'no', ['no', 'yes'],
+                                  'File to test')
+
+_OUTPUT_TYPE = flags.DEFINE_enum(
+    'output_type', 'int8', ['int8', 'float32'],
+    'Type of TfLite output file (.tflite) to generate')
+
+
+def _debug_print(*args):
+  if _ENABLE_DEBUG.value != 'off':
+    print(*args)
+
+
+class _GenerateFeature(tf.Module):
+  """Generate feature tensor from audio window samples"""
+
+  def __init__(self, name: str, params: FeatureParams, detail: str):
+    super().__init__(name=name)
+    self._params = params
+    window_sample_count: int = int(params.window_size_ms * params.sample_rate /
+                                   1000)
+    hann_window_weights = window_op.hann_window_weights(
+        window_sample_count, params.window_scaling_bits)
+    self._hann_window_weights_tensor = tf.constant(hann_window_weights,
+                                                   name='hann_window_weights')
+    self._fft_size, self._fft_size_log2 = fft_ops.get_pow2_fft_length(
+        window_sample_count)
+    self._filter_bank_index_start, self._filter_bank_index_end = \
+        filter_bank_ops.calc_start_end_indices(
+            self._fft_size,
+            params.sample_rate,
+            params.filter_bank_number_of_channels,
+            params.filter_bank_lower_band_limit_hz,
+            params.filter_bank_upper_band_limit_hz)
+    self._detail = detail
+
+  def generate_feature_for_frame(self, audio_frame: tf.Tensor) -> tf.Tensor:
+    # Graph execution does not handle global variables.  Instead, capture the
+    # global variable(s) within a closure (_debug_print_internal).
+    def _debug_print_internal(*args):
+      if _ENABLE_DEBUG.value != 'off' and tf.executing_eagerly():
+        print(*args)
+
+    _debug_print('*** generate_feature_for_frame ***')
+    params = self._params
+    detail = self._detail
+
+    # update filter_bank_ops constants
+    filter_bank_ops.FILTER_BANK_WEIGHT_SCALING_BITS = \
+        params.filter_bank_scaling_bits
+    filter_bank_ops.FILTER_BANK_ALIGNMENT = params.filter_bank_alignment
+    filter_bank_ops.FILTER_BANK_CHANNEL_BLOCK_SIZE = \
+        params.filter_bank_channel_block_size
+
+    _debug_print_internal(f'audio frame output [{detail}]: {audio_frame!r}')
+
+    # apply window to audio frame
+    weights = self._hann_window_weights_tensor
+    _debug_print_internal(f'window weights output [{detail}]: {weights!r}')
+    window_output: tf.Tensor = window_op.window(audio_frame, weights,
+                                                params.window_scaling_bits)
+    _debug_print_internal(f'window output [{detail}]: {window_output!r}')
+
+    # pre-scale window output
+    window_output = tf.reshape(window_output, [-1])
+    window_scaled_output, scaling_shift = fft_ops.fft_auto_scale(window_output)
+    _debug_print_internal(f'scaling shift [{detail}]: {scaling_shift!r}')
+
+    # compute FFT on scaled window output
+    _debug_print_internal(
+        f'fft size, log2 [{detail}]: {self._fft_size}, {self._fft_size_log2}')
+    fft_output: tf.Tensor = fft_ops.rfft(window_scaled_output, self._fft_size)
+    _debug_print_internal(f'fft output [{detail}]: {fft_output!r}')
+
+    index_start = self._filter_bank_index_start
+    index_end = self._filter_bank_index_end
+    # convert fft output complex numbers to energy values
+    _debug_print_internal(
+        f'index start, end [{detail}]: {index_start}, {index_end}')
+    energy_output: tf.Tensor = energy_op.energy(fft_output, index_start,
+                                                index_end)
+    # Energy op does not zero indices outside [index_start,index_end).
+    # The following operations to zero portions of the energy op output
+    # could be much more efficiently performed inside the energy op C++
+    # code.
+    # Need to convert to tf.int32 or the TfLite converter will not use
+    # the correct ops.
+    energy_output = tf.cast(energy_output, tf.int32)  # type: ignore
+    zeros_head = tf.zeros(index_start, dtype=tf.int32)
+    number_of_elements = energy_output.shape.num_elements()
+    zeros_tail = tf.zeros(
+        number_of_elements - index_end,  # type: ignore
+        dtype=tf.int32)
+    energy_slice = energy_output[index_start:index_end]
+    energy_output = tf.concat([zeros_head, energy_slice, zeros_tail],
+                              0)  # type: ignore
+    energy_output = tf.cast(energy_output, dtype=tf.uint32)  # type: ignore
+    _debug_print_internal(f'energy output [{detail}]: {energy_output!r}')
+
+    # compress energy output into 40 channels
+    filter_output: tf.Tensor = filter_bank_ops.filter_bank(
+        energy_output, params.sample_rate,
+        params.filter_bank_number_of_channels,
+        params.filter_bank_lower_band_limit_hz,
+        params.filter_bank_upper_band_limit_hz)
+    _debug_print_internal(f'filterbank output [{detail}]: {filter_output!r}')
+
+    # scale down filter_output
+    filter_scaled_output: tf.Tensor = filter_bank_ops.filter_bank_square_root(
+        filter_output, scaling_shift)
+    _debug_print_internal(
+        f'scaled filterbank output [{detail}]: {filter_scaled_output!r}')
+
+    # noise reduction
+    spectral_sub_bits: int = params.filter_bank_spectral_subtraction_bits
+    filter_noise_output: tf.Tensor
+    filter_noise_estimate: tf.Tensor
+    filter_noise_output, filter_noise_estimate = \
+        filter_bank_ops.filter_bank_spectral_subtraction(
+            filter_scaled_output,
+            num_channels=params.filter_bank_number_of_channels,
+            smoothing=params.filter_bank_even_smoothing,
+            alternate_smoothing=params.filter_bank_odd_smoothing,
+            smoothing_bits=params.filter_bank_smoothing_bits,
+            min_signal_remaining=params.filter_bank_min_signal_remaining,
+            clamping=params.filter_bank_clamping,
+            spectral_subtraction_bits=spectral_sub_bits,
+        )
+    _debug_print_internal(f'noise output [{detail}]: {filter_noise_output!r}')
+
+    # automatic gain control (PCAN)
+    correction_bits: int = self._fft_size_log2 - \
+        int(params.filter_bank_scaling_bits / 2)
+    filter_agc_output: tf.Tensor = pcan_op.pcan(
+        filter_noise_output,
+        filter_noise_estimate,
+        strength=params.pcan_strength,
+        offset=params.pcan_offset,
+        gain_bits=params.pcan_gain_bits,
+        smoothing_bits=params.pcan_smoothing_bits,
+        input_correction_bits=correction_bits)
+    _debug_print_internal(
+        f'AGC Noise output [{detail}]: {filter_agc_output!r}')
+
+    # re-scale features from UINT32 to INT16
+    feature_post_scale: int = 1 << params.filter_bank_post_scaling_bits
+    feature_pre_scale_shift: int = correction_bits
+    feature_rescaled_output: tf.Tensor = filter_bank_ops.filter_bank_log(
+        filter_agc_output,
+        output_scale=feature_post_scale,
+        input_correction_bits=feature_pre_scale_shift)
+    _debug_print_internal(
+        f'scaled noise output [{detail}]: {feature_rescaled_output!r}')
+
+    # These scaling values are derived from those used in input_data.py in the
+    # training pipeline.
+    # The feature pipeline outputs 16-bit signed integers in roughly a 0 to 670
+    # range. In training, these are then arbitrarily divided by 25.6 to get
+    # float values in the rough range of 0.0 to 26.0. This scaling is performed
+    # for historical reasons, to match up with the output of other feature
+    # generators.
+    # The process is then further complicated when we quantize the model. This
+    # means we have to scale the 0.0 to 26.0 real values to the -128 to 127
+    # signed integer numbers.
+    # All this means that to get matching values from our integer feature
+    # output into the tensor input, we have to perform:
+    # input = (((feature / 25.6) / 26.0) * 256) - 128
+    # To simplify this and perform it in 32-bit integer math, we rearrange to:
+    # input = (feature * 256) / (25.6 * 26.0) - 128
+    # constexpr int32_t value_scale = 256;
+    # constexpr int32_t value_div =
+    #     static_cast<int32_t>((25.6f * 26.0f) + 0.5f);
+    # int32_t value =
+    #     ((frontend_output.values[i] * value_scale) + (value_div / 2)) /
+    #     value_div;
+    # value -= 128;
+    # if (value < -128) {
+    #   value = -128;
+    # }
+    # if (value > 127) {
+    #   value = 127;
+    # }
+    # output[i] = value;
+
+    feature_output: tf.Tensor
+    if self._params.use_float_output:
+      # feature_rescaled_output is INT16, cast to FLOAT32
+      feature_output = tf.cast(feature_rescaled_output,
+                               tf.float32)  # type: ignore
+      # feature_output will be FLOAT32
+      feature_output /= self._params.legacy_output_scaling
+    else:
+      value_scale = tf.constant(256, dtype=tf.int32)
+      value_div = tf.constant(int((25.6 * 26) + 0.5), dtype=tf.int32)
+      feature_output = tf.cast(feature_rescaled_output,
+                               tf.int32)  # type: ignore
+      feature_output = (feature_output * value_scale) + int(value_div / 2)
+      feature_output = tf.truncatediv(feature_output,
+                                      value_div)  # type: ignore
+      feature_output += tf.constant(-128, dtype=tf.int32)
+      feature_output = tf.clip_by_value(feature_output,
+                                        clip_value_min=-128,
+                                        clip_value_max=127)  # type: ignore
+      feature_output = tf.cast(feature_output, tf.int8)  # type: ignore
+
+    _debug_print_internal(f'feature output [{detail}]: {feature_output!r}')
+
+    return feature_output
+
+
+@dataclass(kw_only=True, frozen=True)
+class FeatureParams:
+  """
+  Feature generator parameters
+
+  Defaults are configured to work with the micro_speech_quantized.tflite model
+  """
+
+  sample_rate: int = 16000
+  """audio sample rate"""
+
+  window_size_ms: int = 30
+  """input window size in milliseconds"""
+
+  window_stride_ms: int = 20
+  """input window stride in milliseconds"""
+
+  window_scaling_bits: int = 12
+  """input window shaping: scaling bits"""
+
+  filter_bank_number_of_channels: int = 40
+  """filter bank channel count"""
+
+  filter_bank_lower_band_limit_hz: float = 125.0
+  """filter bank lower band limit"""
+
+  filter_bank_upper_band_limit_hz: float = 7500.0
+  """filter bank upper band limit"""
+
+  filter_bank_scaling_bits: int = \
+      filter_bank_ops.FILTER_BANK_WEIGHT_SCALING_BITS
+  """filter bank weight scaling bits, updates filter bank constant"""
+
+  filter_bank_alignment: int = 4
+  """filter bank alignment, updates filter bank constant"""
+
+  filter_bank_channel_block_size: int = 4
+  """filter bank channel block size, updates filter bank constant"""
+
+  filter_bank_post_scaling_bits: int = 6
+  """filter bank output log-scaling bits"""
+
+  filter_bank_spectral_subtraction_bits: int = 14
+  """filter bank noise reduction spectral subtration bits"""
+
+  filter_bank_smoothing_bits: int = 10
+  """filter bank noise reduction smoothing bits"""
+
+  filter_bank_even_smoothing: float = 0.025
+  """filter bank noise reduction even smoothing"""
+
+  filter_bank_odd_smoothing: float = 0.06
+  """filter bank noise reduction odd smoothing"""
+
+  filter_bank_min_signal_remaining: float = 0.05
+  """filter bank noise reduction minimum signal remaining"""
+
+  filter_bank_clamping: bool = False
+  """filter bank noise reduction clamping"""
+
+  pcan_strength: float = 0.95
+  """PCAN gain control strength"""
+
+  pcan_offset: float = 80.0
+  """PCAN gain control offset"""
+
+  pcan_gain_bits: int = 21
+  """PCAN gain control bits"""
+
+  pcan_smoothing_bits = 10
+  """PCAN gain control smoothing bits"""
+
+  legacy_output_scaling: float = 25.6
+  """Final output scaling, legacy from training"""
+
+  use_float_output: bool = False
+  """Use float output if True, otherwise int8 output"""
+
+
+class AudioPreprocessor:
+  """
+  Audio Preprocessor
+
+  Args:
+    params: FeatureParams, an immutable object supplying parameters for
+    the AudioPreprocessor instance
+    detail: str, used for debug output (optional, for debugging only)
+  """
+
+  def __init__(self, params: FeatureParams, detail: str = 'unknown'):
+    self._detail = detail
+    self._params = params
+    self._samples_per_window = int(params.window_size_ms * params.sample_rate /
+                                   1000)
+    self._tflm_interpreter = None
+    self._feature_generator = None
+    self._feature_generator_concrete_function = None
+    self._model = None
+    self._samples = None
+
+  def _get_feature_generator(self):
+    if self._feature_generator is None:
+      self._feature_generator = _GenerateFeature(name='GenerateFeature',
+                                                 params=self._params,
+                                                 detail=self._detail)
+    return self._feature_generator
+
+  def _get_concrete_function(self):
+    if self._feature_generator_concrete_function is None:
+      shape = [1, self._samples_per_window]
+      fg = self._get_feature_generator()
+      func = tf.function(func=fg.generate_feature_for_frame)
+      self._feature_generator_concrete_function = func.get_concrete_function(
+          tf.TensorSpec(shape=shape, dtype=tf.int16))  # type: ignore
+    return self._feature_generator_concrete_function
+
+  def _get_model(self):
+    if self._model is None:
+      cf = self._get_concrete_function()
+      converter = tf.lite.TFLiteConverter.from_concrete_functions(
+          [cf], self._get_feature_generator())
+      converter.allow_custom_ops = True
+      self._model = converter.convert()
+      if _ENABLE_DEBUG.value != 'off':
+        tf.lite.experimental.Analyzer.analyze(model_content=self._model)
+    return self._model
+
+  def load_samples(self, filename: Path, use_rounding: bool = False):
+    """
+    Load audio samples from file.
+
+    Loads INT16 audio samples from a WAV file.
+    Supports single channel at 16KHz.
+    The audio samples are accessible through the 'samples' property.
+
+    Args:
+      filename: a Path object
+      use_rounding: bool, if True, convert the normalized FLOAT data that
+      has been loaded into INT16, using a standard rounding algorithm.
+      Otherwise use a simple conversion to INT16.
+    """
+    file_data = tf.io.read_file(str(filename))
+    samples: tf.Tensor
+    samples, sample_rate = tf.audio.decode_wav(file_data, desired_channels=1)
+    sample_rate = int(sample_rate)
+    _debug_print(f'Loaded {filename.name}'
+                 f' sample-rate={sample_rate}'
+                 f' sample-count={len(samples)}')
+    assert sample_rate == self._params.sample_rate, 'mismatched sample rate'
+    # convert samples to INT16
+    # i = (((int) ((x * 32767) + 32768.5f)) - 32768);
+    max_value = tf.dtypes.int16.max
+    min_value = tf.dtypes.int16.min
+    if use_rounding:
+      samples = ((samples * max_value) + (-min_value + 0.5)) + min_value
+    else:
+      samples *= -min_value
+    samples = tf.cast(samples, tf.int16)  # type: ignore
+    samples = tf.reshape(samples, [1, -1])
+
+    self._samples = samples
+
+  @property
+  def samples(self) -> tf.Tensor:
+    """
+    Audio Samples previously decoded using load_samples method.
+
+    Returns:
+      tf.Tensor containing INT16 audio samples
+    """
+    return self._samples
+
+  @property
+  def params(self) -> FeatureParams:
+    """
+    Feature Paramters being used by the AudioPreprocessor object
+
+    Returns:
+      FeatureParams object which is immutable
+    """
+    return self._params
+
+  def generate_feature(self, audio_frame: tf.Tensor) -> tf.Tensor:
+    """
+    Generate a single feature for a single audio frame.  Uses TensorFlow
+    eager execution.
+
+    Args:
+      audio_frame: tf.Tensor, a single audio frame (self.params.window_size_ms)
+      with shape (1, audio_samples_count)
+
+    Returns:
+      tf.Tensor, a tensor containing a single audio feature with shape
+      (self.params.filter_bank_number_of_channels,)
+    """
+    fg = self._get_feature_generator()
+    feature = fg.generate_feature_for_frame(audio_frame=audio_frame)
+    return feature
+
+  def generate_feature_using_graph(self, audio_frame: tf.Tensor) -> tf.Tensor:
+    """
+    Generate a single feature for a single audio frame.  Uses TensorFlow
+    graph execution.
+
+    Args:
+      audio_frame: tf.Tensor, a single audio frame (self.params.window_size_ms)
+      with shape (1, audio_samples_count)
+
+    Returns:
+      tf.Tensor, a tensor containing a single audio feature with shape
+      (self.params.filter_bank_number_of_channels,)
+    """
+    cf = self._get_concrete_function()
+    feature: tf.Tensor = cf(audio_frame=audio_frame)  # type: ignore
+    return feature
+
+  def generate_feature_using_tflm(self, audio_frame: tf.Tensor) -> tf.Tensor:
+    """
+    Generate a single feature for a single audio frame.  Uses TensorFlow
+    graph execution and the TensorFlow model converter to generate a
+    TFLM compatible model.  This model is then used by the TFLM
+    MicroInterpreter to execute a single inference operation.
+
+    Args:
+      audio_frame: tf.Tensor, a single audio frame (self.params.window_size_ms)
+      with shape (1, audio_samples_count)
+
+    Returns:
+      tf.Tensor, a tensor containing a single audio feature with shape
+      (self.params.filter_bank_number_of_channels,)
+    """
+    if self._tflm_interpreter is None:
+      model = self._get_model()
+      self._tflm_interpreter = runtime.Interpreter.from_bytes(model)
+
+    self._tflm_interpreter.set_input(audio_frame, 0)
+    self._tflm_interpreter.invoke()
+    result = self._tflm_interpreter.get_output(0)
+    return tf.convert_to_tensor(result)
+
+  def reset_tflm(self):
+    """
+    Reset TFLM interpreter state
+
+    Re-initializes TFLM interpreter state and the internal state
+    of all TFLM kernel operators.  Useful for resetting Signal
+    library operator noise estimation and other internal state.
+    """
+    if self._tflm_interpreter is not None:
+      self._tflm_interpreter.reset()
+
+  def generate_tflite_file(self) -> Path:
+    """
+    Create a .tflite model file
+
+    The model output tensor type will depend on the
+    'FeatureParams.use_float_output' parameter.
+
+    Returns:
+      Path object for the created model file
+    """
+    model = self._get_model()
+    if self._params.use_float_output:
+      type_name = 'float'
+    else:
+      type_name = 'int8'
+    fname = Path(tempfile.gettempdir(),
+                 'audio_preprocessor_' + type_name + '.tflite')
+    with open(fname, mode='wb') as file_handle:
+      file_handle.write(model)
+    return fname
+
+
+def _main(_):
+  prefix_path = resource_loader.get_path_to_datafile('testdata')
+
+  fname = _FILE_TO_TEST.value
+  audio_30ms_path = Path(prefix_path, f'{fname}_30ms.wav')
+
+  use_float_output = _OUTPUT_TYPE.value == 'float32'
+  params = FeatureParams(use_float_output=use_float_output)
+  pp = AudioPreprocessor(params=params, detail=fname)
+
+  if _ENABLE_DEBUG.value != 'off':
+    pp.load_samples(audio_30ms_path)
+    _ = pp.generate_feature(pp.samples)
+
+  output_file_path: Path = pp.generate_tflite_file()
+  print('\nOutput file:', str(output_file_path), '\n')
+
+
+if __name__ == '__main__':
+  app.run(_main)
diff --git a/tensorflow/lite/micro/examples/micro_speech/audio_preprocessor_test.py b/tensorflow/lite/micro/examples/micro_speech/audio_preprocessor_test.py
new file mode 100644
index 0000000..e723a72
--- /dev/null
+++ b/tensorflow/lite/micro/examples/micro_speech/audio_preprocessor_test.py
@@ -0,0 +1,102 @@
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =============================================================================
+"""
+Audio feature generation testing, using the AudioPreprocessor class
+
+Run:
+bazel build tensorflow/lite/micro/examples/micro_speech:audio_preprocessor_test
+bazel-bin/tensorflow/lite/micro/examples/micro_speech/audio_preprocessor_test
+"""
+
+from pathlib import Path
+import filecmp
+
+from tensorflow.python.framework import test_util
+from tensorflow.python.platform import resource_loader
+from tensorflow.python.platform import test
+
+import tensorflow as tf
+from tflite_micro.tensorflow.lite.micro.examples.micro_speech import audio_preprocessor
+
+
+class AudioPreprocessorTest(test_util.TensorFlowTestCase):
+
+  def setUp(self):
+    self.sample_prefix_path = resource_loader.get_path_to_datafile('testdata')
+
+  def testFeatureGeneration(self):
+    feature_params = audio_preprocessor.FeatureParams()
+    audio_pp = audio_preprocessor.AudioPreprocessor(feature_params)
+    window_size = int(feature_params.window_size_ms *
+                      feature_params.sample_rate / 1000)
+    data: tf.Tensor = tf.random.uniform(minval=int(tf.dtypes.int16.min),
+                                        maxval=tf.dtypes.int16.max,
+                                        seed=42,
+                                        shape=(1, window_size),
+                                        dtype=tf.int32)
+    data = tf.cast(data, dtype=tf.int16)  # type: ignore
+
+    # test signal ops internal state retained and features do not match
+    feature_eager1 = audio_pp.generate_feature(data)
+    feature_eager2 = audio_pp.generate_feature(data)
+    self.assertNotAllEqual(feature_eager1, feature_eager2)
+
+    # test eager vs graph execution feature match
+    _ = audio_pp.generate_feature_using_graph(data)
+    feature_graph = audio_pp.generate_feature_using_graph(data)
+    self.assertAllEqual(feature_graph, feature_eager2)
+
+    # test eager vs MicroInterpreter execution feature match
+    feature_tflm = audio_pp.generate_feature_using_tflm(data)
+    self.assertAllEqual(feature_tflm, feature_eager1)
+
+    # test signal ops internal state reset
+    audio_pp.reset_tflm()
+    feature_tflm = audio_pp.generate_feature_using_tflm(data)
+    self.assertAllEqual(feature_tflm, feature_eager1)
+
+    # test signal ops internal state retained
+    feature_tflm = audio_pp.generate_feature_using_tflm(data)
+    self.assertAllEqual(feature_tflm, feature_eager2)
+
+  def testFeatureOutputYes(self):
+    feature_params = audio_preprocessor.FeatureParams()
+    audio_pp = audio_preprocessor.AudioPreprocessor(feature_params)
+    audio_pp.load_samples(Path(self.sample_prefix_path, 'yes_30ms.wav'))
+    feature = audio_pp.generate_feature_using_tflm(audio_pp.samples)
+    feature_list = feature.numpy().tolist()
+    expected = [
+        124, 105, 126, 103, 125, 101, 123, 100, 116, 98, 115, 97, 113, 90, 91,
+        82, 104, 96, 117, 97, 121, 103, 126, 101, 125, 104, 126, 104, 125, 101,
+        116, 90, 81, 74, 80, 71, 83, 76, 82, 71
+    ]
+    self.assertSequenceEqual(feature_list, expected)
+
+  def testFeatureOutputNo(self):
+    feature_params = audio_preprocessor.FeatureParams()
+    audio_pp = audio_preprocessor.AudioPreprocessor(feature_params)
+    audio_pp.load_samples(Path(self.sample_prefix_path, 'no_30ms.wav'))
+    feature = audio_pp.generate_feature_using_tflm(audio_pp.samples)
+    feature_list = feature.numpy().tolist()
+    expected = [
+        126, 103, 124, 102, 124, 102, 123, 100, 118, 97, 118, 100, 118, 98,
+        121, 100, 121, 98, 117, 91, 96, 74, 54, 87, 100, 87, 109, 92, 91, 80,
+        64, 55, 83, 74, 74, 78, 114, 95, 101, 81
+    ]
+    self.assertSequenceEqual(feature_list, expected)
+
+
+if __name__ == '__main__':
+  test.main()
diff --git a/tensorflow/lite/micro/examples/micro_speech/audio_provider.cc b/tensorflow/lite/micro/examples/micro_speech/audio_provider.cc
deleted file mode 100644
index 5ca425d..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/audio_provider.cc
+++ /dev/null
@@ -1,38 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/lite/micro/examples/micro_speech/audio_provider.h"
-
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h"
-
-namespace {
-int16_t g_dummy_audio_data[kMaxAudioSampleSize];
-int32_t g_latest_audio_timestamp = 0;
-}  // namespace
-
-TfLiteStatus GetAudioSamples(int start_ms, int duration_ms,
-                             int* audio_samples_size, int16_t** audio_samples) {
-  for (int i = 0; i < kMaxAudioSampleSize; ++i) {
-    g_dummy_audio_data[i] = 0;
-  }
-  *audio_samples_size = kMaxAudioSampleSize;
-  *audio_samples = g_dummy_audio_data;
-  return kTfLiteOk;
-}
-
-int32_t LatestAudioTimestamp() {
-  g_latest_audio_timestamp += 100;
-  return g_latest_audio_timestamp;
-}
diff --git a/tensorflow/lite/micro/examples/micro_speech/audio_provider.h b/tensorflow/lite/micro/examples/micro_speech/audio_provider.h
deleted file mode 100644
index d3aab2c..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/audio_provider.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_AUDIO_PROVIDER_H_
-#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_AUDIO_PROVIDER_H_
-
-#include "tensorflow/lite/c/common.h"
-
-// This is an abstraction around an audio source like a microphone, and is
-// expected to return 16-bit PCM sample data for a given point in time. The
-// sample data itself should be used as quickly as possible by the caller, since
-// to allow memory optimizations there are no guarantees that the samples won't
-// be overwritten by new data in the future. In practice, implementations should
-// ensure that there's a reasonable time allowed for clients to access the data
-// before any reuse.
-// The reference implementation can have no platform-specific dependencies, so
-// it just returns an array filled with zeros. For real applications, you should
-// ensure there's a specialized implementation that accesses hardware APIs.
-TfLiteStatus GetAudioSamples(int start_ms, int duration_ms,
-                             int* audio_samples_size, int16_t** audio_samples);
-
-// Returns the time that audio data was last captured in milliseconds. There's
-// no contract about what time zero represents, the accuracy, or the granularity
-// of the result. Subsequent calls will generally not return a lower value, but
-// even that's not guaranteed if there's an overflow wraparound.
-// The reference implementation of this function just returns a constantly
-// incrementing value for each call, since it would need a non-portable platform
-// call to access time information. For real applications, you'll need to write
-// your own platform-specific implementation.
-int32_t LatestAudioTimestamp();
-
-#endif  // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_AUDIO_PROVIDER_H_
diff --git a/tensorflow/lite/micro/examples/micro_speech/audio_provider_mock.cc b/tensorflow/lite/micro/examples/micro_speech/audio_provider_mock.cc
deleted file mode 100644
index fe3ad16..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/audio_provider_mock.cc
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/lite/micro/examples/micro_speech/audio_provider.h"
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h"
-#include "tensorflow/lite/micro/examples/micro_speech/testdata/no_1000ms_audio_data.h"
-#include "tensorflow/lite/micro/examples/micro_speech/testdata/yes_1000ms_audio_data.h"
-
-namespace {
-int16_t g_dummy_audio_data[kMaxAudioSampleSize];
-int32_t g_latest_audio_timestamp = 0;
-}  // namespace
-
-TfLiteStatus GetAudioSamples(int start_ms, int duration_ms,
-                             int* audio_samples_size, int16_t** audio_samples) {
-  const int yes_start = (0 * kAudioSampleFrequency) / 1000;
-  const int yes_end = (1000 * kAudioSampleFrequency) / 1000;
-  const int no_start = (4000 * kAudioSampleFrequency) / 1000;
-  const int no_end = (5000 * kAudioSampleFrequency) / 1000;
-  const int wraparound = (8000 * kAudioSampleFrequency) / 1000;
-  const int start_sample = (start_ms * kAudioSampleFrequency) / 1000;
-  for (int i = 0; i < kMaxAudioSampleSize; ++i) {
-    const int sample_index = (start_sample + i) % wraparound;
-    int16_t sample;
-    if ((sample_index >= yes_start) && (sample_index < yes_end)) {
-      sample = g_yes_1000ms_audio_data[sample_index - yes_start];
-    } else if ((sample_index >= no_start) && (sample_index < no_end)) {
-      sample = g_no_1000ms_audio_data[sample_index - no_start];
-    } else {
-      sample = 0;
-    }
-    g_dummy_audio_data[i] = sample;
-  }
-  *audio_samples_size = kMaxAudioSampleSize;
-  *audio_samples = g_dummy_audio_data;
-  return kTfLiteOk;
-}
-
-int32_t LatestAudioTimestamp() {
-  g_latest_audio_timestamp += 100;
-  return g_latest_audio_timestamp;
-}
diff --git a/tensorflow/lite/micro/examples/micro_speech/audio_provider_mock_test.cc b/tensorflow/lite/micro/examples/micro_speech/audio_provider_mock_test.cc
deleted file mode 100644
index b15749e..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/audio_provider_mock_test.cc
+++ /dev/null
@@ -1,68 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include <limits>
-
-#include "tensorflow/lite/c/common.h"
-#include "tensorflow/lite/micro/examples/micro_speech/audio_provider.h"
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h"
-#include "tensorflow/lite/micro/examples/micro_speech/testdata/no_1000ms_audio_data.h"
-#include "tensorflow/lite/micro/examples/micro_speech/testdata/yes_1000ms_audio_data.h"
-#include "tensorflow/lite/micro/testing/micro_test.h"
-
-TF_LITE_MICRO_TESTS_BEGIN
-
-TF_LITE_MICRO_TEST(TestAudioProviderMock) {
-  int audio_samples_size = 0;
-  int16_t* audio_samples = nullptr;
-  TfLiteStatus get_status = GetAudioSamples(
-      0, kFeatureSliceDurationMs, &audio_samples_size, &audio_samples);
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, get_status);
-  TF_LITE_MICRO_EXPECT_LE(audio_samples_size, kMaxAudioSampleSize);
-  TF_LITE_MICRO_EXPECT(audio_samples != nullptr);
-  for (int i = 0; i < audio_samples_size; ++i) {
-    TF_LITE_MICRO_EXPECT_EQ(g_yes_1000ms_audio_data[i], audio_samples[i]);
-  }
-
-  get_status = GetAudioSamples(500, kFeatureSliceDurationMs,
-                               &audio_samples_size, &audio_samples);
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, get_status);
-  TF_LITE_MICRO_EXPECT_LE(audio_samples_size, kMaxAudioSampleSize);
-  TF_LITE_MICRO_EXPECT(audio_samples != nullptr);
-  for (int i = 0; i < audio_samples_size; ++i) {
-    TF_LITE_MICRO_EXPECT_EQ(g_yes_1000ms_audio_data[i + 8000],
-                            audio_samples[i]);
-  }
-
-  get_status = GetAudioSamples(1500, kFeatureSliceDurationMs,
-                               &audio_samples_size, &audio_samples);
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, get_status);
-  TF_LITE_MICRO_EXPECT_LE(audio_samples_size, kMaxAudioSampleSize);
-  TF_LITE_MICRO_EXPECT(audio_samples != nullptr);
-  for (int i = 0; i < audio_samples_size; ++i) {
-    TF_LITE_MICRO_EXPECT_EQ(0, audio_samples[i]);
-  }
-
-  get_status = GetAudioSamples(12250, kFeatureSliceDurationMs,
-                               &audio_samples_size, &audio_samples);
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, get_status);
-  TF_LITE_MICRO_EXPECT_LE(audio_samples_size, kMaxAudioSampleSize);
-  TF_LITE_MICRO_EXPECT(audio_samples != nullptr);
-  for (int i = 0; i < audio_samples_size; ++i) {
-    TF_LITE_MICRO_EXPECT_EQ(g_no_1000ms_audio_data[i + 4000], audio_samples[i]);
-  }
-}
-
-TF_LITE_MICRO_TESTS_END
diff --git a/tensorflow/lite/micro/examples/micro_speech/audio_provider_test.cc b/tensorflow/lite/micro/examples/micro_speech/audio_provider_test.cc
deleted file mode 100644
index fb403c0..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/audio_provider_test.cc
+++ /dev/null
@@ -1,66 +0,0 @@
-/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/lite/micro/examples/micro_speech/audio_provider.h"
-
-#include <limits>
-
-#include "tensorflow/lite/c/common.h"
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h"
-#include "tensorflow/lite/micro/testing/micro_test.h"
-
-TF_LITE_MICRO_TESTS_BEGIN
-
-TF_LITE_MICRO_TEST(TestAudioProvider) {
-  int audio_samples_size = 0;
-  int16_t* audio_samples = nullptr;
-  TfLiteStatus get_status = GetAudioSamples(
-      0, kFeatureSliceDurationMs, &audio_samples_size, &audio_samples);
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, get_status);
-  TF_LITE_MICRO_EXPECT_LE(audio_samples_size, kMaxAudioSampleSize);
-  TF_LITE_MICRO_EXPECT(audio_samples != nullptr);
-
-  // Make sure we can read all of the returned memory locations.
-  int total = 0;
-  for (int i = 0; i < audio_samples_size; ++i) {
-    total += audio_samples[i];
-  }
-  (void)total;
-}
-
-TF_LITE_MICRO_TEST(TestTimer) {
-  // Make sure that the technically-undefined overflow behavior we rely on below
-  // works on this platform. It's still not guaranteed, but at least this is a
-  // smoke check.  Turn off when running with ASan, as it will complain about
-  // the following undefined behavior.
-#ifndef ADDRESS_SANITIZER
-  int32_t overflow_value = std::numeric_limits<int32_t>::max();
-  overflow_value += 1;
-  TF_LITE_MICRO_EXPECT_EQ(std::numeric_limits<int32_t>::min(), overflow_value);
-#endif
-
-  const int32_t first_time = LatestAudioTimestamp();
-  const int32_t second_time = LatestAudioTimestamp();
-
-  // It's possible that the timer may have wrapped around from +BIG_NUM to
-  // -BIG_NUM between the first and second calls, since we're storing
-  // milliseconds in a 32-bit integer. It's not reasonable that the call itself
-  // would have taken more than 2^31 milliseconds though, so look at the
-  // difference and rely on integer overflow to ensure it's accurate.
-  const int32_t time_delta = (second_time - first_time);
-  TF_LITE_MICRO_EXPECT_LE(0, time_delta);
-}
-
-TF_LITE_MICRO_TESTS_END
diff --git a/tensorflow/lite/micro/examples/micro_speech/command_responder.cc b/tensorflow/lite/micro/examples/micro_speech/command_responder.cc
deleted file mode 100644
index 2184478..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/command_responder.cc
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/lite/micro/examples/micro_speech/command_responder.h"
-
-#include "tensorflow/lite/micro/micro_log.h"
-
-// The default implementation writes out the name of the recognized command
-// to the error console. Real applications will want to take some custom
-// action instead, and should implement their own versions of this function.
-void RespondToCommand(int32_t current_time, const char* found_command,
-                      uint8_t score, bool is_new_command) {
-  if (is_new_command) {
-    MicroPrintf("Heard %s (%d) @%dms", found_command, score, current_time);
-  }
-}
diff --git a/tensorflow/lite/micro/examples/micro_speech/command_responder.h b/tensorflow/lite/micro/examples/micro_speech/command_responder.h
deleted file mode 100644
index a1acb99..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/command_responder.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// Provides an interface to take an action based on an audio command.
-
-#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_COMMAND_RESPONDER_H_
-#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_COMMAND_RESPONDER_H_
-
-#include "tensorflow/lite/c/common.h"
-
-// Called every time the results of an audio recognition run are available. The
-// human-readable name of any recognized command is in the `found_command`
-// argument, `score` has the numerical confidence, and `is_new_command` is set
-// if the previous command was different to this one.
-void RespondToCommand(int32_t current_time, const char* found_command,
-                      uint8_t score, bool is_new_command);
-
-#endif  // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_COMMAND_RESPONDER_H_
diff --git a/tensorflow/lite/micro/examples/micro_speech/command_responder_test.cc b/tensorflow/lite/micro/examples/micro_speech/command_responder_test.cc
deleted file mode 100644
index e02f7ae..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/command_responder_test.cc
+++ /dev/null
@@ -1,29 +0,0 @@
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/lite/micro/examples/micro_speech/command_responder.h"
-
-#include "tensorflow/lite/micro/testing/micro_test.h"
-
-TF_LITE_MICRO_TESTS_BEGIN
-
-TF_LITE_MICRO_TEST(TestCallability) {
-  // This will have external side-effects (like printing to the debug console
-  // or lighting an LED) that are hard to observe, so the most we can do is
-  // make sure the call doesn't crash.
-  RespondToCommand(0, "foo", 0, true);
-}
-
-TF_LITE_MICRO_TESTS_END
diff --git a/tensorflow/lite/micro/examples/micro_speech/evaluate.py b/tensorflow/lite/micro/examples/micro_speech/evaluate.py
new file mode 100644
index 0000000..81f74b1
--- /dev/null
+++ b/tensorflow/lite/micro/examples/micro_speech/evaluate.py
@@ -0,0 +1,203 @@
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =============================================================================
+"""
+Wake-word model evaluation, with audio preprocessing using MicroInterpreter
+
+Run:
+bazel build tensorflow/lite/micro/examples/micro_speech:evaluate
+bazel-bin/tensorflow/lite/micro/examples/micro_speech/evaluate
+  --sample_path="path to 1 second audio sample in WAV format"
+"""
+
+from absl import app
+from absl import flags
+import numpy as np
+from pathlib import Path
+
+from tflite_micro.python.tflite_micro import runtime
+from tensorflow.python.platform import resource_loader
+import tensorflow as tf
+from tflite_micro.tensorflow.lite.micro.examples.micro_speech import audio_preprocessor
+
+_SAMPLE_PATH = flags.DEFINE_string(
+    name='sample_path',
+    default='',
+    help='path for the audio sample to be predicted.',
+)
+
+_FEATURES_SHAPE = (49, 40)
+
+
+def quantize_input_data(data, input_details):
+  """quantize the input data using scale and zero point
+
+  Args:
+      data (np.array in float): input data for the interpreter
+      input_details : output of get_input_details from the tflm interpreter.
+
+  Returns:
+    np.ndarray: quantized data as int8 dtype
+  """
+  # Get input quantization parameters
+  data_type = input_details['dtype']
+  input_quantization_parameters = input_details['quantization_parameters']
+  input_scale, input_zero_point = input_quantization_parameters['scales'][
+      0], input_quantization_parameters['zero_points'][0]
+  # quantize the input data
+  data = data / input_scale + input_zero_point
+  return data.astype(data_type)
+
+
+def dequantize_output_data(data: np.ndarray,
+                           output_details: dict) -> np.ndarray:
+  """Dequantize the model output
+
+  Args:
+      data: integer data to be dequantized
+      output_details: TFLM interpreter model output details
+
+  Returns:
+      np.ndarray: dequantized data as float32 dtype
+  """
+  output_quantization_parameters = output_details['quantization_parameters']
+  output_scale = output_quantization_parameters['scales'][0]
+  output_zero_point = output_quantization_parameters['zero_points'][0]
+  # Caveat: tflm_output_quant need to be converted to float to avoid integer
+  # overflow during dequantization
+  # e.g., (tflm_output_quant -output_zero_point) and
+  # (tflm_output_quant + (-output_zero_point))
+  # can produce different results (int8 calculation)
+  return output_scale * (data.astype(np.float32) - output_zero_point)
+
+
+def predict(interpreter: runtime.Interpreter,
+            features: np.ndarray) -> np.ndarray:
+  """
+  Use TFLM interpreter to predict wake-word from audio sample features
+
+  Args:
+      interpreter: TFLM python interpreter instance
+      features: wake-word model feature data, with shape _FEATURES_SHAPE
+
+  Returns:
+      np.ndarray: predicted probability (softmax) for each model category
+  """
+
+  input_details = interpreter.get_input_details(0)
+  # Quantize the input if the model is quantized
+  # and our features are np.float32
+  if input_details['dtype'] != np.float32 and features.dtype == np.float32:
+    features = quantize_input_data(features, input_details)
+  flattened_features = features.flatten().reshape([1, -1])
+  interpreter.set_input(flattened_features, 0)
+  interpreter.invoke()
+  tflm_output = interpreter.get_output(0)
+
+  output_details = interpreter.get_output_details(0)
+  if output_details['dtype'] == np.float32:
+    return tflm_output[0].astype(np.float32)
+  # Dequantize the output for quantized model
+  return dequantize_output_data(tflm_output[0], output_details)
+
+
+def generate_features(
+    audio_pp: audio_preprocessor.AudioPreprocessor) -> np.ndarray:
+  """
+  Generate audio sample features
+
+  Args:
+      audio_pp: AudioPreprocessor instance
+
+  Returns:
+      np.ndarray: generated audio sample features with shape _FEATURES_SHAPE
+  """
+  if audio_pp.params.use_float_output:
+    dtype = np.float32
+  else:
+    dtype = np.int8
+  features = np.zeros(_FEATURES_SHAPE, dtype=dtype)
+  start_index = 0
+  window_size = int(audio_pp.params.window_size_ms *
+                    audio_pp.params.sample_rate / 1000)
+  window_stride = int(audio_pp.params.window_stride_ms *
+                      audio_pp.params.sample_rate / 1000)
+  samples = audio_pp.samples[0]
+  frame_number = 0
+  end_index = start_index + window_size
+
+  # reset audio preprocessor noise estimates
+  audio_pp.reset_tflm()
+
+  while end_index <= len(samples):
+    frame_tensor: tf.Tensor = tf.convert_to_tensor(
+        samples[start_index:end_index])
+    frame_tensor = tf.reshape(frame_tensor, [1, -1])
+    feature_tensor = audio_pp.generate_feature_using_tflm(frame_tensor)
+    features[frame_number] = feature_tensor.numpy()
+    start_index += window_stride
+    end_index += window_stride
+    frame_number += 1
+
+  return features
+
+
+def get_category_names() -> list[str]:
+  """
+  Get the list of model output category names
+
+  Returns:
+      list[str]: model output category names
+  """
+  return ['silence', 'unknown', 'yes', 'no']
+
+
+def _main(_):
+  sample_path = Path(_SAMPLE_PATH.value)
+  assert sample_path.exists() and sample_path.is_file(), \
+      'Audio sample file does not exist. Please check the path.'
+  model_prefix_path = resource_loader.get_path_to_datafile('models')
+  model_path = Path(model_prefix_path, 'micro_speech_quantized.tflite')
+
+  feature_params = audio_preprocessor.FeatureParams()
+  audio_pp = audio_preprocessor.AudioPreprocessor(feature_params)
+  audio_pp.load_samples(sample_path)
+  features = generate_features(audio_pp)
+
+  tflm_interpreter = runtime.Interpreter.from_file(model_path)
+
+  frame_number = 0
+  test_features = np.zeros(_FEATURES_SHAPE, dtype=np.int8)
+  for feature in features:
+    test_features[frame_number] = feature
+    category_probabilities = predict(tflm_interpreter, test_features)
+    category_probabilities_str = '['
+    for i in range(len(category_probabilities)):
+      if i > 0:
+        category_probabilities_str += ', '
+      category_probabilities_str += f'{category_probabilities[i]:.4f}'
+    category_probabilities_str += ']'
+    print(f'Frame #{frame_number}: {category_probabilities_str}')
+    frame_number += 1
+
+  category_probabilities = predict(tflm_interpreter, features)
+  predicted_category = np.argmax(category_probabilities)
+  category_names = get_category_names()
+  print('Model predicts the audio sample as'
+        f' <{category_names[predicted_category]}>'
+        f' with probability {category_probabilities[predicted_category]:.2f}')
+
+
+if __name__ == '__main__':
+  app.run(_main)
diff --git a/tensorflow/lite/micro/examples/micro_speech/evaluate_test.py b/tensorflow/lite/micro/examples/micro_speech/evaluate_test.py
new file mode 100644
index 0000000..d5d6ac0
--- /dev/null
+++ b/tensorflow/lite/micro/examples/micro_speech/evaluate_test.py
@@ -0,0 +1,96 @@
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =============================================================================
+"""
+Wake-word model testing, with audio preprocessing using MicroInterpreter
+
+Run:
+bazel build tensorflow/lite/micro/examples/micro_speech:evaluate_test
+bazel-bin/tensorflow/lite/micro/examples/micro_speech/evaluate_test
+"""
+
+import numpy as np
+from pathlib import Path
+
+from tensorflow.python.framework import test_util
+from tensorflow.python.platform import resource_loader
+from tensorflow.python.platform import test
+from tflite_micro.python.tflite_micro import runtime
+from tflite_micro.tensorflow.lite.micro.examples.micro_speech import audio_preprocessor
+from tflite_micro.tensorflow.lite.micro.examples.micro_speech import evaluate
+
+
+class MicroSpeechTest(test_util.TensorFlowTestCase):
+
+  def setUp(self):
+    model_prefix_path = resource_loader.get_path_to_datafile('models')
+    self.sample_prefix_path = resource_loader.get_path_to_datafile('testdata')
+    model_path = Path(model_prefix_path, 'micro_speech_quantized.tflite')
+    self.tflm_interpreter = runtime.Interpreter.from_file(model_path)
+    self.test_data = [
+        ('no', 'no_1000ms.wav'),
+        ('yes', 'yes_1000ms.wav'),
+        ('silence', 'noise_1000ms.wav'),
+        ('silence', 'silence_1000ms.wav'),
+    ]
+
+  def testModelAccuracyWithInt8Features(self):
+    feature_params = audio_preprocessor.FeatureParams()
+    audio_pp = audio_preprocessor.AudioPreprocessor(feature_params)
+    for label, sample_name in self.test_data:
+      # Load audio sample data
+      sample_path = Path(self.sample_prefix_path, sample_name)
+      audio_pp.load_samples(sample_path)
+
+      # Generate feature data from audio samples.
+      # Note that the noise estimate is reset each time generate_features()
+      # is called.
+      features = evaluate.generate_features(audio_pp)
+
+      # Run model inference (quantized) on the feature data
+      category_probabilities = evaluate.predict(self.tflm_interpreter,
+                                                features)
+
+      # Check the prediction result
+      predicted_category = np.argmax(category_probabilities)
+      category_names = evaluate.get_category_names()
+      # Check the prediction
+      self.assertEqual(category_names[predicted_category], label)
+
+  def testModelAccuracyWithFloatFeatures(self):
+    feature_params = audio_preprocessor.FeatureParams(use_float_output=True)
+    audio_pp = audio_preprocessor.AudioPreprocessor(feature_params)
+    for label, sample_name in self.test_data:
+      # Load audio sample data
+      sample_path = Path(self.sample_prefix_path, sample_name)
+      audio_pp.load_samples(sample_path)
+
+      # Generate feature data from audio samples.
+      # Note that the noise estimate is reset each time generate_features()
+      # is called.
+      features = evaluate.generate_features(audio_pp)
+
+      # Run model inference (quantized) on the feature data
+      category_probabilities = evaluate.predict(self.tflm_interpreter,
+                                                features)
+
+      # Check the prediction result
+      predicted_category = np.argmax(category_probabilities)
+      category_names = evaluate.get_category_names()
+      # Check the prediction
+      self.assertEqual(category_names[predicted_category], label)
+
+
+if __name__ == '__main__':
+  test.main()
diff --git a/tensorflow/lite/micro/examples/micro_speech/feature_provider.cc b/tensorflow/lite/micro/examples/micro_speech/feature_provider.cc
deleted file mode 100644
index a4a6635..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/feature_provider.cc
+++ /dev/null
@@ -1,119 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/lite/micro/examples/micro_speech/feature_provider.h"
-
-#include "tensorflow/lite/micro/examples/micro_speech/audio_provider.h"
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator.h"
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h"
-#include "tensorflow/lite/micro/micro_log.h"
-
-FeatureProvider::FeatureProvider(int feature_size, int8_t* feature_data)
-    : feature_size_(feature_size),
-      feature_data_(feature_data),
-      is_first_run_(true) {
-  // Initialize the feature data to default values.
-  for (int n = 0; n < feature_size_; ++n) {
-    feature_data_[n] = 0;
-  }
-}
-
-FeatureProvider::~FeatureProvider() {}
-
-TfLiteStatus FeatureProvider::PopulateFeatureData(int32_t last_time_in_ms,
-                                                  int32_t time_in_ms,
-                                                  int* how_many_new_slices) {
-  if (feature_size_ != kFeatureElementCount) {
-    MicroPrintf("Requested feature_data_ size %d doesn't match %d",
-                feature_size_, kFeatureElementCount);
-    return kTfLiteError;
-  }
-
-  // Quantize the time into steps as long as each window stride, so we can
-  // figure out which audio data we need to fetch.
-  const int last_step = (last_time_in_ms / kFeatureSliceStrideMs);
-  const int current_step = (time_in_ms / kFeatureSliceStrideMs);
-
-  int slices_needed = current_step - last_step;
-  // If this is the first call, make sure we don't use any cached information.
-  if (is_first_run_) {
-    TfLiteStatus init_status = InitializeMicroFeatures();
-    if (init_status != kTfLiteOk) {
-      return init_status;
-    }
-    is_first_run_ = false;
-    slices_needed = kFeatureSliceCount;
-  }
-  if (slices_needed > kFeatureSliceCount) {
-    slices_needed = kFeatureSliceCount;
-  }
-  *how_many_new_slices = slices_needed;
-
-  const int slices_to_keep = kFeatureSliceCount - slices_needed;
-  const int slices_to_drop = kFeatureSliceCount - slices_to_keep;
-  // If we can avoid recalculating some slices, just move the existing data
-  // up in the spectrogram, to perform something like this:
-  // last time = 80ms          current time = 120ms
-  // +-----------+             +-----------+
-  // | data@20ms |         --> | data@60ms |
-  // +-----------+       --    +-----------+
-  // | data@40ms |     --  --> | data@80ms |
-  // +-----------+   --  --    +-----------+
-  // | data@60ms | --  --      |  <empty>  |
-  // +-----------+   --        +-----------+
-  // | data@80ms | --          |  <empty>  |
-  // +-----------+             +-----------+
-  if (slices_to_keep > 0) {
-    for (int dest_slice = 0; dest_slice < slices_to_keep; ++dest_slice) {
-      int8_t* dest_slice_data =
-          feature_data_ + (dest_slice * kFeatureSliceSize);
-      const int src_slice = dest_slice + slices_to_drop;
-      const int8_t* src_slice_data =
-          feature_data_ + (src_slice * kFeatureSliceSize);
-      for (int i = 0; i < kFeatureSliceSize; ++i) {
-        dest_slice_data[i] = src_slice_data[i];
-      }
-    }
-  }
-  // Any slices that need to be filled in with feature data have their
-  // appropriate audio data pulled, and features calculated for that slice.
-  if (slices_needed > 0) {
-    for (int new_slice = slices_to_keep; new_slice < kFeatureSliceCount;
-         ++new_slice) {
-      const int new_step = (current_step - kFeatureSliceCount + 1) + new_slice;
-      const int32_t slice_start_ms = (new_step * kFeatureSliceStrideMs);
-      int16_t* audio_samples = nullptr;
-      int audio_samples_size = 0;
-      // TODO(petewarden): Fix bug that leads to non-zero slice_start_ms
-      GetAudioSamples((slice_start_ms > 0 ? slice_start_ms : 0),
-                      kFeatureSliceDurationMs, &audio_samples_size,
-                      &audio_samples);
-      if (audio_samples_size < kMaxAudioSampleSize) {
-        MicroPrintf("Audio data size %d too small, want %d", audio_samples_size,
-                    kMaxAudioSampleSize);
-        return kTfLiteError;
-      }
-      int8_t* new_slice_data = feature_data_ + (new_slice * kFeatureSliceSize);
-      size_t num_samples_read;
-      TfLiteStatus generate_status = GenerateMicroFeatures(
-          audio_samples, audio_samples_size, kFeatureSliceSize, new_slice_data,
-          &num_samples_read);
-      if (generate_status != kTfLiteOk) {
-        return generate_status;
-      }
-    }
-  }
-  return kTfLiteOk;
-}
diff --git a/tensorflow/lite/micro/examples/micro_speech/feature_provider.h b/tensorflow/lite/micro/examples/micro_speech/feature_provider.h
deleted file mode 100644
index 2a2ef8f..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/feature_provider.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_FEATURE_PROVIDER_H_
-#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_FEATURE_PROVIDER_H_
-
-#include "tensorflow/lite/c/common.h"
-
-// Binds itself to an area of memory intended to hold the input features for an
-// audio-recognition neural network model, and fills that data area with the
-// features representing the current audio input, for example from a microphone.
-// The audio features themselves are a two-dimensional array, made up of
-// horizontal slices representing the frequencies at one point in time, stacked
-// on top of each other to form a spectrogram showing how those frequencies
-// changed over time.
-class FeatureProvider {
- public:
-  // Create the provider, and bind it to an area of memory. This memory should
-  // remain accessible for the lifetime of the provider object, since subsequent
-  // calls will fill it with feature data. The provider does no memory
-  // management of this data.
-  FeatureProvider(int feature_size, int8_t* feature_data);
-  ~FeatureProvider();
-
-  // Fills the feature data with information from audio inputs, and returns how
-  // many feature slices were updated.
-  TfLiteStatus PopulateFeatureData(int32_t last_time_in_ms, int32_t time_in_ms,
-                                   int* how_many_new_slices);
-
- private:
-  int feature_size_;
-  int8_t* feature_data_;
-  // Make sure we don't try to use cached information if this is the first call
-  // into the provider.
-  bool is_first_run_;
-};
-
-#endif  // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_FEATURE_PROVIDER_H_
diff --git a/tensorflow/lite/micro/examples/micro_speech/feature_provider_mock_test.cc b/tensorflow/lite/micro/examples/micro_speech/feature_provider_mock_test.cc
deleted file mode 100644
index 6fe5e43..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/feature_provider_mock_test.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/lite/c/common.h"
-#include "tensorflow/lite/micro/examples/micro_speech/feature_provider.h"
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h"
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/no_micro_features_data.h"
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/yes_micro_features_data.h"
-#include "tensorflow/lite/micro/testing/micro_test.h"
-
-TF_LITE_MICRO_TESTS_BEGIN
-
-TF_LITE_MICRO_TEST(TestFeatureProviderMockYes) {
-  int8_t feature_data[kFeatureElementCount];
-  FeatureProvider feature_provider(kFeatureElementCount, feature_data);
-
-  int how_many_new_slices = 0;
-  TfLiteStatus populate_status = feature_provider.PopulateFeatureData(
-      /* last_time_in_ms= */ 0, /* time_in_ms= */ 970, &how_many_new_slices);
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, populate_status);
-  TF_LITE_MICRO_EXPECT_EQ(kFeatureSliceCount, how_many_new_slices);
-
-  for (int i = 0; i < kFeatureElementCount; ++i) {
-    TF_LITE_MICRO_EXPECT_EQ(g_yes_micro_f2e59fea_nohash_1_data[i],
-                            feature_data[i]);
-  }
-}
-
-TF_LITE_MICRO_TEST(TestFeatureProviderMockNo) {
-  int8_t feature_data[kFeatureElementCount];
-  FeatureProvider feature_provider(kFeatureElementCount, feature_data);
-
-  int how_many_new_slices = 0;
-  TfLiteStatus populate_status = feature_provider.PopulateFeatureData(
-      /* last_time_in_ms= */ 4000,
-      /* time_in_ms= */ 4970, &how_many_new_slices);
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, populate_status);
-  TF_LITE_MICRO_EXPECT_EQ(kFeatureSliceCount, how_many_new_slices);
-
-  for (int i = 0; i < kFeatureElementCount; ++i) {
-    TF_LITE_MICRO_EXPECT_EQ(g_no_micro_f9643d42_nohash_4_data[i],
-                            feature_data[i]);
-  }
-}
-
-TF_LITE_MICRO_TESTS_END
diff --git a/tensorflow/lite/micro/examples/micro_speech/feature_provider_test.cc b/tensorflow/lite/micro/examples/micro_speech/feature_provider_test.cc
deleted file mode 100644
index 2582e8c..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/feature_provider_test.cc
+++ /dev/null
@@ -1,35 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/lite/micro/examples/micro_speech/feature_provider.h"
-
-#include "tensorflow/lite/c/common.h"
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h"
-#include "tensorflow/lite/micro/testing/micro_test.h"
-
-TF_LITE_MICRO_TESTS_BEGIN
-
-TF_LITE_MICRO_TEST(TestFeatureProvider) {
-  int8_t feature_data[kFeatureElementCount];
-  FeatureProvider feature_provider(kFeatureElementCount, feature_data);
-
-  int how_many_new_slices = 0;
-  TfLiteStatus populate_status = feature_provider.PopulateFeatureData(
-      /* last_time_in_ms= */ 0, /* time_in_ms= */ 10000, &how_many_new_slices);
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, populate_status);
-  TF_LITE_MICRO_EXPECT_EQ(kFeatureSliceCount, how_many_new_slices);
-}
-
-TF_LITE_MICRO_TESTS_END
diff --git a/tensorflow/lite/micro/examples/micro_speech/images/animation_on_arduino.gif b/tensorflow/lite/micro/examples/micro_speech/images/animation_on_arduino.gif
deleted file mode 100644
index 66ab9c1..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/images/animation_on_arduino.gif
+++ /dev/null
Binary files differ
diff --git a/tensorflow/lite/micro/examples/micro_speech/images/audio_preprocessor_int8.png b/tensorflow/lite/micro/examples/micro_speech/images/audio_preprocessor_int8.png
new file mode 100644
index 0000000..a5c91fa
--- /dev/null
+++ b/tensorflow/lite/micro/examples/micro_speech/images/audio_preprocessor_int8.png
Binary files differ
diff --git a/tensorflow/lite/micro/examples/micro_speech/images/micro_speech_quantized.png b/tensorflow/lite/micro/examples/micro_speech/images/micro_speech_quantized.png
new file mode 100644
index 0000000..59e98c6
--- /dev/null
+++ b/tensorflow/lite/micro/examples/micro_speech/images/micro_speech_quantized.png
Binary files differ
diff --git a/tensorflow/lite/micro/examples/micro_speech/images/model_architecture.png b/tensorflow/lite/micro/examples/micro_speech/images/model_architecture.png
deleted file mode 100644
index ce91fad..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/images/model_architecture.png
+++ /dev/null
Binary files differ
diff --git a/tensorflow/lite/micro/examples/micro_speech/main.cc b/tensorflow/lite/micro/examples/micro_speech/main.cc
deleted file mode 100644
index f35c472..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/main.cc
+++ /dev/null
@@ -1,27 +0,0 @@
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/lite/micro/examples/micro_speech/main_functions.h"
-
-// This is the default main used on systems that have the standard C entry
-// point. Other devices (for example FreeRTOS or ESP32) that have different
-// requirements for entry code (like an app_main function) should specialize
-// this main.cc file in a target-specific subfolder.
-int main(int argc, char* argv[]) {
-  setup();
-  while (true) {
-    loop();
-  }
-}
diff --git a/tensorflow/lite/micro/examples/micro_speech/main_functions.cc b/tensorflow/lite/micro/examples/micro_speech/main_functions.cc
deleted file mode 100644
index c92636a..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/main_functions.cc
+++ /dev/null
@@ -1,163 +0,0 @@
-/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/lite/micro/examples/micro_speech/main_functions.h"
-
-#include "tensorflow/lite/micro/examples/micro_speech/audio_provider.h"
-#include "tensorflow/lite/micro/examples/micro_speech/command_responder.h"
-#include "tensorflow/lite/micro/examples/micro_speech/feature_provider.h"
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h"
-#include "tensorflow/lite/micro/examples/micro_speech/micro_speech_model_data.h"
-#include "tensorflow/lite/micro/examples/micro_speech/recognize_commands.h"
-#include "tensorflow/lite/micro/micro_interpreter.h"
-#include "tensorflow/lite/micro/micro_log.h"
-#include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
-#include "tensorflow/lite/micro/system_setup.h"
-#include "tensorflow/lite/schema/schema_generated.h"
-
-// Globals, used for compatibility with Arduino-style sketches.
-namespace {
-const tflite::Model* model = nullptr;
-tflite::MicroInterpreter* interpreter = nullptr;
-TfLiteTensor* model_input = nullptr;
-FeatureProvider* feature_provider = nullptr;
-RecognizeCommands* recognizer = nullptr;
-int32_t previous_time = 0;
-
-// Create an area of memory to use for input, output, and intermediate arrays.
-// The size of this will depend on the model you're using, and may need to be
-// determined by experimentation.
-constexpr int kTensorArenaSize = 10 * 1024;
-uint8_t tensor_arena[kTensorArenaSize];
-int8_t feature_buffer[kFeatureElementCount];
-int8_t* model_input_buffer = nullptr;
-}  // namespace
-
-// The name of this function is important for Arduino compatibility.
-void setup() {
-  tflite::InitializeTarget();
-
-  // Map the model into a usable data structure. This doesn't involve any
-  // copying or parsing, it's a very lightweight operation.
-  model = tflite::GetModel(g_micro_speech_model_data);
-  if (model->version() != TFLITE_SCHEMA_VERSION) {
-    MicroPrintf(
-        "Model provided is schema version %d not equal "
-        "to supported version %d.",
-        model->version(), TFLITE_SCHEMA_VERSION);
-    return;
-  }
-
-  // Pull in only the operation implementations we need.
-  // This relies on a complete list of all the ops needed by this graph.
-
-  // NOLINTNEXTLINE(runtime-global-variables)
-  static tflite::MicroMutableOpResolver<4> micro_op_resolver;
-  if (micro_op_resolver.AddDepthwiseConv2D() != kTfLiteOk) {
-    return;
-  }
-  if (micro_op_resolver.AddFullyConnected() != kTfLiteOk) {
-    return;
-  }
-  if (micro_op_resolver.AddSoftmax() != kTfLiteOk) {
-    return;
-  }
-  if (micro_op_resolver.AddReshape() != kTfLiteOk) {
-    return;
-  }
-
-  // Build an interpreter to run the model with.
-  static tflite::MicroInterpreter static_interpreter(
-      model, micro_op_resolver, tensor_arena, kTensorArenaSize);
-  interpreter = &static_interpreter;
-
-  // Allocate memory from the tensor_arena for the model's tensors.
-  TfLiteStatus allocate_status = interpreter->AllocateTensors();
-  if (allocate_status != kTfLiteOk) {
-    MicroPrintf("AllocateTensors() failed");
-    return;
-  }
-
-  // Get information about the memory area to use for the model's input.
-  model_input = interpreter->input(0);
-  if ((model_input->dims->size != 2) || (model_input->dims->data[0] != 1) ||
-      (model_input->dims->data[1] !=
-       (kFeatureSliceCount * kFeatureSliceSize)) ||
-      (model_input->type != kTfLiteInt8)) {
-    MicroPrintf("Bad input tensor parameters in model");
-    return;
-  }
-  model_input_buffer = model_input->data.int8;
-
-  // Prepare to access the audio spectrograms from a microphone or other source
-  // that will provide the inputs to the neural network.
-  // NOLINTNEXTLINE(runtime-global-variables)
-  static FeatureProvider static_feature_provider(kFeatureElementCount,
-                                                 feature_buffer);
-  feature_provider = &static_feature_provider;
-
-  static RecognizeCommands static_recognizer;
-  recognizer = &static_recognizer;
-
-  previous_time = 0;
-}
-
-// The name of this function is important for Arduino compatibility.
-void loop() {
-  // Fetch the spectrogram for the current time.
-  const int32_t current_time = LatestAudioTimestamp();
-  int how_many_new_slices = 0;
-  TfLiteStatus feature_status = feature_provider->PopulateFeatureData(
-      previous_time, current_time, &how_many_new_slices);
-  if (feature_status != kTfLiteOk) {
-    MicroPrintf("Feature generation failed");
-    return;
-  }
-  previous_time = current_time;
-  // If no new audio samples have been received since last time, don't bother
-  // running the network model.
-  if (how_many_new_slices == 0) {
-    return;
-  }
-
-  // Copy feature buffer to input tensor
-  for (int i = 0; i < kFeatureElementCount; i++) {
-    model_input_buffer[i] = feature_buffer[i];
-  }
-
-  // Run the model on the spectrogram input and make sure it succeeds.
-  TfLiteStatus invoke_status = interpreter->Invoke();
-  if (invoke_status != kTfLiteOk) {
-    MicroPrintf("Invoke failed");
-    return;
-  }
-
-  // Obtain a pointer to the output tensor
-  TfLiteTensor* output = interpreter->output(0);
-  // Determine whether a command was recognized based on the output of inference
-  const char* found_command = nullptr;
-  uint8_t score = 0;
-  bool is_new_command = false;
-  TfLiteStatus process_status = recognizer->ProcessLatestResults(
-      output, current_time, &found_command, &score, &is_new_command);
-  if (process_status != kTfLiteOk) {
-    MicroPrintf("RecognizeCommands::ProcessLatestResults() failed");
-    return;
-  }
-  // Do something based on the recognized command. The default implementation
-  // just prints to the error console, but you should replace this with your
-  // own function for a real application.
-  RespondToCommand(current_time, found_command, score, is_new_command);
-}
diff --git a/tensorflow/lite/micro/examples/micro_speech/main_functions.h b/tensorflow/lite/micro/examples/micro_speech/main_functions.h
deleted file mode 100644
index 0ac0677..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/main_functions.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MAIN_FUNCTIONS_H_
-#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MAIN_FUNCTIONS_H_
-
-// Expose a C friendly interface for main functions.
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-// Initializes all data needed for the example. The name is important, and needs
-// to be setup() for Arduino compatibility.
-void setup();
-
-// Runs one iteration of data gathering and inference. This should be called
-// repeatedly from the application code. The name needs to be loop() for Arduino
-// compatibility.
-void loop();
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif  // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MAIN_FUNCTIONS_H_
diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_features/BUILD b/tensorflow/lite/micro/examples/micro_speech/micro_features/BUILD
deleted file mode 100644
index 1077435..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/micro_features/BUILD
+++ /dev/null
@@ -1,78 +0,0 @@
-# Library for generating feature vectors from audio data
-package(
-    default_visibility = ["//visibility:public"],
-    # Disabling layering_check because of http://b/177257332
-    features = ["-layering_check"],
-    licenses = ["notice"],
-)
-
-cc_library(
-    name = "micro_model_settings",
-    srcs = [
-        "micro_model_settings.cc",
-    ],
-    hdrs = [
-        "micro_model_settings.h",
-    ],
-)
-
-cc_library(
-    name = "micro_features_test_data",
-    srcs = [
-        "no_micro_features_data.cc",
-        "yes_micro_features_data.cc",
-    ],
-    hdrs = [
-        "no_micro_features_data.h",
-        "yes_micro_features_data.h",
-    ],
-)
-
-cc_library(
-    name = "micro_features_generator",
-    srcs = [
-        "micro_features_generator.cc",
-    ],
-    hdrs = [
-        "micro_features_generator.h",
-    ],
-    deps = [
-        ":micro_model_settings",
-        "//tensorflow/lite/c:common",
-        "//tensorflow/lite/experimental/microfrontend/lib:frontend",
-        "//tensorflow/lite/micro:micro_log",
-    ],
-)
-
-cc_library(
-    name = "micro_features_generator_test_data",
-    srcs = [
-        "no_feature_data_slice.cc",
-        "yes_feature_data_slice.cc",
-    ],
-    hdrs = [
-        "no_feature_data_slice.h",
-        "yes_feature_data_slice.h",
-    ],
-)
-
-cc_test(
-    name = "micro_features_generator_test",
-    size = "small",
-    srcs = [
-        "micro_features_generator_test.cc",
-    ],
-    tags = [
-        "noasan",  # TODO(b/179930607): Fix with asan.
-    ],
-    deps = [
-        ":micro_features_generator",
-        ":micro_features_generator_test_data",
-        ":micro_model_settings",
-        "//tensorflow/lite/c:common",
-        "//tensorflow/lite/micro:micro_framework",
-        "//tensorflow/lite/micro:micro_log",
-        "//tensorflow/lite/micro/examples/micro_speech:audio_sample_test_data",
-        "//tensorflow/lite/micro/testing:micro_test",
-    ],
-)
diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator.cc b/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator.cc
deleted file mode 100644
index 3dbb5d3..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator.cc
+++ /dev/null
@@ -1,113 +0,0 @@
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator.h"
-
-#include <cmath>
-#include <cstring>
-
-#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
-#include "tensorflow/lite/experimental/microfrontend/lib/frontend_util.h"
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h"
-#include "tensorflow/lite/micro/micro_log.h"
-
-namespace {
-
-FrontendState g_micro_features_state;
-bool g_is_first_time = true;
-
-}  // namespace
-
-TfLiteStatus InitializeMicroFeatures() {
-  FrontendConfig config;
-  config.window.size_ms = kFeatureSliceDurationMs;
-  config.window.step_size_ms = kFeatureSliceStrideMs;
-  config.noise_reduction.smoothing_bits = 10;
-  config.filterbank.num_channels = kFeatureSliceSize;
-  config.filterbank.lower_band_limit = 125.0;
-  config.filterbank.upper_band_limit = 7500.0;
-  config.noise_reduction.smoothing_bits = 10;
-  config.noise_reduction.even_smoothing = 0.025;
-  config.noise_reduction.odd_smoothing = 0.06;
-  config.noise_reduction.min_signal_remaining = 0.05;
-  config.pcan_gain_control.enable_pcan = 1;
-  config.pcan_gain_control.strength = 0.95;
-  config.pcan_gain_control.offset = 80.0;
-  config.pcan_gain_control.gain_bits = 21;
-  config.log_scale.enable_log = 1;
-  config.log_scale.scale_shift = 6;
-  if (!FrontendPopulateState(&config, &g_micro_features_state,
-                             kAudioSampleFrequency)) {
-    MicroPrintf("FrontendPopulateState() failed");
-    return kTfLiteError;
-  }
-  g_is_first_time = true;
-  return kTfLiteOk;
-}
-
-// This is not exposed in any header, and is only used for testing, to ensure
-// that the state is correctly set up before generating results.
-void SetMicroFeaturesNoiseEstimates(const uint32_t* estimate_presets) {
-  for (int i = 0; i < g_micro_features_state.filterbank.num_channels; ++i) {
-    g_micro_features_state.noise_reduction.estimate[i] = estimate_presets[i];
-  }
-}
-
-TfLiteStatus GenerateMicroFeatures(const int16_t* input, int input_size,
-                                   int output_size, int8_t* output,
-                                   size_t* num_samples_read) {
-  const int16_t* frontend_input;
-  if (g_is_first_time) {
-    frontend_input = input;
-    g_is_first_time = false;
-  } else {
-    frontend_input = input + 160;
-  }
-  FrontendOutput frontend_output = FrontendProcessSamples(
-      &g_micro_features_state, frontend_input, input_size, num_samples_read);
-
-  for (size_t i = 0; i < frontend_output.size; ++i) {
-    // These scaling values are derived from those used in input_data.py in the
-    // training pipeline.
-    // The feature pipeline outputs 16-bit signed integers in roughly a 0 to 670
-    // range. In training, these are then arbitrarily divided by 25.6 to get
-    // float values in the rough range of 0.0 to 26.0. This scaling is performed
-    // for historical reasons, to match up with the output of other feature
-    // generators.
-    // The process is then further complicated when we quantize the model. This
-    // means we have to scale the 0.0 to 26.0 real values to the -128 to 127
-    // signed integer numbers.
-    // All this means that to get matching values from our integer feature
-    // output into the tensor input, we have to perform:
-    // input = (((feature / 25.6) / 26.0) * 256) - 128
-    // To simplify this and perform it in 32-bit integer math, we rearrange to:
-    // input = (feature * 256) / (25.6 * 26.0) - 128
-    constexpr int32_t value_scale = 256;
-    constexpr int32_t value_div = static_cast<int32_t>((25.6f * 26.0f) + 0.5f);
-    int32_t value =
-        ((frontend_output.values[i] * value_scale) + (value_div / 2)) /
-        value_div;
-    value -= 128;
-    if (value < -128) {
-      value = -128;
-    }
-    if (value > 127) {
-      value = 127;
-    }
-    output[i] = value;
-  }
-
-  return kTfLiteOk;
-}
diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator.h b/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator.h
deleted file mode 100644
index 7ee0d2b..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_FEATURES_GENERATOR_H_
-#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_FEATURES_GENERATOR_H_
-
-#include "tensorflow/lite/c/common.h"
-
-// Sets up any resources needed for the feature generation pipeline.
-TfLiteStatus InitializeMicroFeatures();
-
-// Converts audio sample data into a more compact form that's appropriate for
-// feeding into a neural network.
-TfLiteStatus GenerateMicroFeatures(const int16_t* input, int input_size,
-                                   int output_size, int8_t* output,
-                                   size_t* num_samples_read);
-
-#endif  // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_FEATURES_GENERATOR_H_
diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator_test.cc b/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator_test.cc
deleted file mode 100644
index e41c44f..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator_test.cc
+++ /dev/null
@@ -1,95 +0,0 @@
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator.h"
-
-#include "tensorflow/lite/c/common.h"
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/no_feature_data_slice.h"
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/yes_feature_data_slice.h"
-#include "tensorflow/lite/micro/examples/micro_speech/testdata/no_30ms_audio_data.h"
-#include "tensorflow/lite/micro/examples/micro_speech/testdata/yes_30ms_audio_data.h"
-#include "tensorflow/lite/micro/micro_log.h"
-#include "tensorflow/lite/micro/testing/micro_test.h"
-
-// This is a test-only API, not exposed in any public headers, so declare it.
-void SetMicroFeaturesNoiseEstimates(const uint32_t* estimate_presets);
-
-TF_LITE_MICRO_TESTS_BEGIN
-
-TF_LITE_MICRO_TEST(TestMicroFeaturesGeneratorYes) {
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, InitializeMicroFeatures());
-
-  // The micro features pipeline retains state from previous calls to help
-  // estimate the background noise. Unfortunately this makes it harder to
-  // exactly reproduce results in a test environment, so use a known snapshot
-  // of the parameters at the point that the golden feature values were
-  // created.
-  const uint32_t yes_estimate_presets[] = {
-      1062898, 2644477, 1257642, 1864718, 412722, 725703, 395721, 474082,
-      173046,  255856,  158966,  153736,  69181,  199100, 144493, 227740,
-      110573,  164330,  79666,   144650,  122947, 476799, 398553, 497493,
-      322152,  1140005, 566716,  690605,  308902, 347481, 109891, 170457,
-      73901,   100975,  42963,   72325,   34183,  20207,  6640,   9468,
-  };
-  SetMicroFeaturesNoiseEstimates(yes_estimate_presets);
-
-  int8_t yes_calculated_data[g_yes_feature_data_slice_size] = {};
-  size_t num_samples_read = 0;
-  TfLiteStatus yes_status = GenerateMicroFeatures(
-      g_yes_30ms_audio_data, g_yes_30ms_audio_data_size,
-      g_yes_feature_data_slice_size, yes_calculated_data, &num_samples_read);
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, yes_status);
-
-  for (int i = 0; i < g_yes_feature_data_slice_size; ++i) {
-    const int expected = g_yes_feature_data_slice[i];
-    const int actual = yes_calculated_data[i];
-    TF_LITE_MICRO_EXPECT_EQ(expected, actual);
-    if (expected != actual) {
-      MicroPrintf("Expected value %d but found %d", expected, actual);
-    }
-  }
-}
-
-TF_LITE_MICRO_TEST(TestMicroFeaturesGeneratorNo) {
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, InitializeMicroFeatures());
-  // As we did for the previous features, set known good noise state
-  // parameters.
-  const uint32_t no_estimate_presets[] = {
-      2563964, 1909393, 559801, 538670, 203643, 175959, 75088, 139491,
-      59691,   95307,   43865,  129263, 52517,  80058,  51330, 100731,
-      76674,   76262,   15497,  22598,  13778,  21460,  8946,  17806,
-      10023,   18810,   8002,   10842,  7578,   9983,   6267,  10759,
-      8946,    18488,   9691,   39785,  9939,   17835,  9671,  18512,
-  };
-  SetMicroFeaturesNoiseEstimates(no_estimate_presets);
-
-  int8_t no_calculated_data[g_no_feature_data_slice_size] = {};
-  size_t num_samples_read = 0;
-  TfLiteStatus no_status = GenerateMicroFeatures(
-      g_no_30ms_audio_data, g_no_30ms_audio_data_size,
-      g_no_feature_data_slice_size, no_calculated_data, &num_samples_read);
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, no_status);
-
-  for (size_t i = 0; i < g_no_feature_data_slice_size; ++i) {
-    const int expected = g_no_feature_data_slice[i];
-    const int actual = no_calculated_data[i];
-    TF_LITE_MICRO_EXPECT_EQ(expected, actual);
-    if (expected != actual) {
-      MicroPrintf("Expected value %d but found %d", expected, actual);
-    }
-  }
-}
-
-TF_LITE_MICRO_TESTS_END
diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.cc b/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.cc
deleted file mode 100644
index 47d12ba..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.cc
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h"
-
-const char* kCategoryLabels[kCategoryCount] = {
-    "silence",
-    "unknown",
-    "yes",
-    "no",
-};
diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h b/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h
deleted file mode 100644
index e542213..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_MODEL_SETTINGS_H_
-#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_MODEL_SETTINGS_H_
-
-// Keeping these as constant expressions allow us to allocate fixed-sized arrays
-// on the stack for our working memory.
-
-// The size of the input time series data we pass to the FFT to produce the
-// frequency information. This has to be a power of two, and since we're dealing
-// with 30ms of 16KHz inputs, which means 480 samples, this is the next value.
-constexpr int kMaxAudioSampleSize = 512;
-constexpr int kAudioSampleFrequency = 16000;
-
-// The following values are derived from values used during model training.
-// If you change the way you preprocess the input, update all these constants.
-constexpr int kFeatureSliceSize = 40;
-constexpr int kFeatureSliceCount = 49;
-constexpr int kFeatureElementCount = (kFeatureSliceSize * kFeatureSliceCount);
-constexpr int kFeatureSliceStrideMs = 20;
-constexpr int kFeatureSliceDurationMs = 30;
-
-// Variables for the model's output categories.
-constexpr int kSilenceIndex = 0;
-constexpr int kUnknownIndex = 1;
-// If you modify the output categories, you need to update the following values.
-constexpr int kCategoryCount = 4;
-extern const char* kCategoryLabels[kCategoryCount];
-
-#endif  // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_MODEL_SETTINGS_H_
diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_features/no_feature_data_slice.cc b/tensorflow/lite/micro/examples/micro_speech/micro_features/no_feature_data_slice.cc
deleted file mode 100644
index 684f702..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/micro_features/no_feature_data_slice.cc
+++ /dev/null
@@ -1,25 +0,0 @@
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// See the header for documentation on the meaning of this data.
-
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/no_feature_data_slice.h"
-
-alignas(16) const int8_t
-    g_no_feature_data_slice[g_no_feature_data_slice_size] = {
-        89,  68, 96,  83, 111, 96, 115, 87, 99,  76, 105, 84, 105, 86,
-        113, 91, 108, 87, 110, 78, 80,  46, 22,  74, 88,  72, 103, 86,
-        80,  68, 48,  24, 68,  48, 55,  36, 108, 90, 90,  63,
-};
diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_features/no_feature_data_slice.h b/tensorflow/lite/micro/examples/micro_speech/micro_features/no_feature_data_slice.h
deleted file mode 100644
index 01e6605..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/micro_features/no_feature_data_slice.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// This data was extracted from the larger feature data held in
-// no_features_data.cc and consists of the 29th spectrogram slice of 43 values.
-// This is the expected result of running the sample data in
-// no_30ms_sample_data.cc through the preprocessing pipeline.
-
-#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_FEATURE_DATA_SLICE_H_
-#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_FEATURE_DATA_SLICE_H_
-
-#include <cstdint>
-
-constexpr int g_no_feature_data_slice_size = 40;
-extern const int8_t g_no_feature_data_slice[];
-
-#endif  // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_FEATURE_DATA_SLICE_H_
diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_features/no_micro_features_data.cc b/tensorflow/lite/micro/examples/micro_speech/micro_features/no_micro_features_data.cc
deleted file mode 100644
index f481486..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/micro_features/no_micro_features_data.cc
+++ /dev/null
@@ -1,188 +0,0 @@
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/no_micro_features_data.h"
-
-// Golden test values for the expected spectrogram from a "no" sample file
-// speech_commands_test_set_v0.02/no/f9643d42_nohash_4.wav.
-
-const int g_no_micro_f9643d42_nohash_4_width = 40;
-const int g_no_micro_f9643d42_nohash_4_height = 49;
-alignas(16) const signed char g_no_micro_f9643d42_nohash_4_data[] = {
-    103,  78,   64,   76,   75,   54,   53,   67,   77,   60,   56,   70,
-    76,   71,   68,   58,   74,   32,   23,   -2,   -18,  11,   13,   15,
-    9,    20,   5,    -7,   -18,  -2,   -10,  -18,  -10,  -12,  9,    7,
-    -33,  -12,  -4,   -18,  57,   17,   55,   62,   70,   45,   61,   37,
-    67,   52,   48,   47,   55,   46,   57,   47,   73,   17,   27,   20,
-    19,   8,    15,   -6,   -1,   10,   -12,  -29,  -6,   -23,  -18,  -3,
-    -1,   5,    3,    -4,   -12,  -8,   -1,   -14,  65,   48,   58,   43,
-    48,   19,   39,   39,   57,   57,   58,   55,   67,   58,   49,   50,
-    70,   27,   9,    16,   37,   4,    25,   4,    11,   9,    7,    -33,
-    -7,   -12,  3,    -6,   -29,  -7,   -7,   -18,  -12,  -18,  -2,   -1,
-    0,    31,   60,   -8,   51,   59,   70,   40,   71,   57,   52,   38,
-    66,   48,   17,   6,    59,   8,    15,   7,    18,   4,    18,   -23,
-    -8,   -4,   -3,   -12,  -3,   -26,  1,    10,   2,    -29,  -29,  -37,
-    -7,   -4,   6,    -33,  67,   44,   59,   -4,   64,   51,   68,   55,
-    74,   9,    40,   15,   57,   33,   60,   18,   40,   25,   27,   -20,
-    25,   -16,  6,    17,   -10,  -12,  -23,  -43,  -23,  -23,  -29,  -37,
-    -4,   -16,  -16,  -60,  -20,  -23,  -10,  -29,  -12,  15,   12,   -37,
-    27,   15,   61,   44,   50,   8,    48,   22,   49,   -18,  46,   33,
-    42,   34,   46,   -8,   4,    -18,  -43,  -43,  -10,  1,    -10,  -16,
-    -10,  -77,  -16,  -33,  11,   -26,  -23,  -37,  0,    -8,   -16,  -29,
-    42,   40,   68,   24,   47,   46,   53,   -128, 30,   2,    42,   21,
-    21,   -4,   43,   2,    43,   5,    32,   -26,  7,    -37,  -43,  -23,
-    -2,   -8,   2,    -37,  -50,  -60,  -1,   -7,   -33,  -77,  -6,   -18,
-    -16,  -50,  -12,  -33,  53,   8,    52,   18,   51,   35,   69,   26,
-    44,   8,    27,   -128, 21,   -33,  17,   -14,  38,   -128, -14,  -18,
-    17,   -20,  -14,  -37,  8,    -60,  -33,  -33,  -33,  -43,  -12,  -29,
-    -12,  -128, -33,  -60,  -26,  -77,  -26,  -50,  57,   29,   11,   30,
-    53,   -10,  45,   15,   18,   -10,  42,   2,    31,   -29,  10,   -4,
-    42,   -37,  -50,  -128, -4,   -43,  -20,  -77,  -14,  -26,  -33,  -128,
-    -12,  -43,  -8,   -33,  -33,  -60,  -43,  -77,  -12,  -60,  -26,  -50,
-    40,   -23,  36,   35,   50,   -2,   37,   27,   26,   -77,  49,   -7,
-    28,   -43,  6,    11,   41,   -37,  33,   -26,  -14,  -12,  -6,   -33,
-    -16,  -26,  -20,  -77,  -14,  -43,  -8,   -50,  -14,  -37,  -26,  -77,
-    -26,  -77,  -14,  -29,  50,   -60,  25,   -26,  57,   38,   51,   1,
-    50,   1,    53,   -18,  30,   -23,  11,   -128, 18,   -43,  20,   -26,
-    -10,  -26,  -12,  -128, -50,  -60,  -37,  -77,  -20,  -43,  -50,  -128,
-    -77,  -128, -77,  -128, -33,  -77,  -20,  -60,  53,   -10,  -37,  -128,
-    10,   -128, 60,   18,   -8,   13,   37,   -37,  8,    -128, 3,    -77,
-    32,   -29,  14,   10,   -12,  -77,  -37,  -77,  -37,  -60,  -23,  -128,
-    -43,  -50,  -16,  -77,  -6,   -33,  0,    -60,  -43,  -128, -16,  -60,
-    20,   -2,   51,   19,   43,   2,    63,   20,   60,   -4,   42,   -50,
-    4,    -128, 2,    -3,   32,   -33,  -26,  -128, -18,  -128, -33,  -43,
-    -7,   -60,  -50,  -77,  -29,  -77,  -23,  -128, -16,  -26,  -23,  -60,
-    -37,  -77,  -37,  -128, -1,   -33,  39,   48,   60,   5,    8,    -128,
-    44,   11,   4,    0,    13,   -77,  -2,   -20,  33,   -128, -33,  -77,
-    -8,   -128, -14,  -128, -33,  -18,  -12,  -77,  -16,  -128, -37,  -128,
-    -12,  -77,  -60,  -128, -23,  -60,  -23,  -128, 36,   -50,  46,   -128,
-    66,   39,   18,   -14,  -12,  -77,  -20,  -6,   24,   -128, 28,   -26,
-    21,   -77,  -6,   -33,  1,    -128, -43,  -128, -1,   -50,  -37,  -128,
-    -50,  -128, -33,  -128, -18,  -128, -60,  -8,   -7,   -60,  -60,  -128,
-    -6,   -29,  20,   -1,   73,   40,   -43,  -14,  33,   -43,  33,   -3,
-    15,   -29,  29,   -43,  20,   -60,  -29,  -128, -20,  -26,  4,    -77,
-    -16,  -60,  -33,  -50,  -29,  -128, -60,  -128, -77,  -128, -37,  -50,
-    0,    -77,  -33,  -128, 39,   8,    47,   10,   62,   16,   2,    1,
-    10,   7,    4,    -7,   6,    -128, -77,  -50,  19,   -77,  -77,  -128,
-    -77,  -128, -50,  -128, -60,  -60,  -33,  -50,  -37,  -128, -128, -128,
-    -60,  -128, -37,  -60,  -18,  -128, -33,  -77,  37,   23,   29,   -128,
-    -128, -128, -16,  -128, -16,  -33,  21,   -20,  -8,   -60,  -2,   -60,
-    11,   -128, -50,  -128, -50,  -128, -29,  -77,  -16,  -128, -26,  -128,
-    -50,  -77,  -43,  -128, -128, -128, -50,  -128, -33,  -128, -33,  -50,
-    -23,  -128, 24,   -128, -128, -77,  4,    -23,  32,   -128, 1,    -26,
-    -14,  -128, 10,   -77,  -4,   -128, 1,    -50,  -8,   -77,  -77,  -77,
-    -23,  -128, -50,  -43,  -33,  -128, -43,  -128, -128, -128, -43,  -128,
-    -50,  -128, -128, -128, 44,   15,   14,   -128, 9,    -128, 21,   0,
-    29,   -7,   18,   -7,   -7,   -128, -33,  -50,  14,   -60,  -60,  -128,
-    -60,  -128, -37,  -128, -43,  -128, -20,  -128, -50,  -128, -43,  -77,
-    -26,  -128, -60,  -50,  -60,  -128, -77,  -128, -3,   -128, 14,   -77,
-    -26,  11,   47,   -77,  -7,   -77,  45,   -43,  -12,  14,   37,   -60,
-    22,   -4,   5,    -77,  -14,  -128, -10,  -60,  22,   -77,  -12,  -60,
-    -50,  -128, -60,  -128, -60,  -128, -43,  -128, -50,  -128, -77,  -50,
-    27,   -37,  33,   -128, 4,    -29,  -4,   -50,  -20,  -128, 6,    -37,
-    -33,  -128, -50,  -128, 34,   15,   -43,  -128, -20,  -50,  -3,   -37,
-    -37,  -77,  -77,  -128, -43,  -128, -128, -128, 4,    -26,  -26,  27,
-    0,    -128, -29,  -60,  35,   -26,  23,   -128, -29,  -77,  19,   14,
-    28,   -128, -16,  -7,   31,   -1,   17,   11,   60,   44,   8,    11,
-    18,   -128, -33,  -60,  -1,   -128, -43,  -128, -23,  -128, -128, -128,
-    59,   43,   35,   61,   37,   -77,  -77,  -50,  116,  88,   98,   69,
-    78,   53,   78,   40,   48,   7,    29,   -18,  -2,   -14,  5,    12,
-    65,   35,   31,   -12,  33,   -2,   -6,   -1,   44,   -29,  -14,  -60,
-    -4,   -43,  -37,  -128, 29,   18,   38,   51,   8,    -128, -12,  -37,
-    115,  91,   113,  77,   89,   36,   60,   44,   49,   36,   27,   31,
-    63,   30,   62,   14,   55,   49,   42,   0,    45,   17,   -23,  1,
-    30,   -37,  -50,  -77,  -8,   -60,  9,    -60,  -12,  -50,  13,   4,
-    23,   -6,   28,   13,   107,  78,   101,  73,   89,   46,   63,   17,
-    34,   -43,  -6,   30,   67,   40,   77,   21,   53,   39,   38,   12,
-    -6,   5,    28,   -2,   18,   -43,  0,    -128, -29,  -77,  18,   -128,
-    -2,   -77,  39,   35,   38,   35,   50,   29,   100,  70,   94,   69,
-    86,   50,   45,   38,   45,   12,   58,   64,   74,   36,   77,   45,
-    78,   62,   8,    -60,  38,   6,    21,   7,    8,    -37,  -1,   -20,
-    48,   -37,  8,    -10,  8,    13,   45,   39,   38,   22,   49,   25,
-    94,   63,   87,   66,   84,   -128, 29,   20,   55,   51,   80,   36,
-    62,   30,   81,   72,   68,   37,   51,   27,   54,   22,   16,   -29,
-    4,    9,    57,   15,   35,   -43,  -77,  -20,  4,    6,    37,   -1,
-    40,   31,   47,   14,   89,   68,   96,   83,   111,  96,   115,  87,
-    99,   76,   105,  84,   105,  86,   113,  91,   108,  87,   110,  78,
-    80,   46,   22,   74,   88,   72,   103,  86,   80,   68,   48,   24,
-    68,   48,   55,   36,   108,  90,   90,   63,   83,   63,   87,   64,
-    90,   92,   113,  88,   102,  79,   109,  83,   100,  89,   109,  60,
-    56,   21,   75,   62,   81,   45,   63,   73,   93,   65,   94,   80,
-    89,   81,   73,   3,    43,   60,   102,  70,   84,   67,   99,   74,
-    78,   57,   79,   50,   93,   82,   98,   56,   77,   70,   91,   71,
-    85,   82,   86,   13,   45,   -18,  48,   40,   53,   28,   85,   60,
-    65,   52,   86,   78,   76,   46,   73,   19,   35,   54,   75,   40,
-    71,   60,   82,   37,   69,   42,   62,   40,   96,   70,   85,   77,
-    70,   68,   103,  84,   94,   69,   81,   -128, -128, -128, -43,  -37,
-    40,   2,    48,   45,   76,   37,   65,   16,   43,   18,   58,   20,
-    27,   12,   71,   31,   53,   44,   88,   47,   50,   33,   39,   8,
-    89,   57,   88,   69,   72,   63,   100,  68,   81,   -77,  -10,  -128,
-    -128, -128, -128, -128, 13,   -77,  8,    27,   60,   28,   41,   -128,
-    -37,  -128, 28,   -43,  -18,  -128, 47,   -37,  45,   27,   51,   -29,
-    15,   39,   52,   30,   49,   -33,  65,   15,   76,   71,   90,   19,
-    46,   -128, -16,  -128, -128, -128, -128, -128, -128, -128, -18,  -128,
-    -20,  -128, 32,   -128, 21,   -33,  45,   -128, -128, -128, -12,  -128,
-    -6,   -14,  43,   -128, -128, -128, -128, -128, 52,   -18,  69,   -43,
-    78,   55,   42,   -128, -29,  -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, 14,   -128, -16,  -128, -128, -128, 7,    -128,
-    -128, -128, -128, -128, -128, -128, 12,   -128, -128, -128, -128, -16,
-    59,   -50,  35,   -128, 42,   0,    47,   -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -33,  -128, -23,  -128,
-    -128, -128, -23,  -128, -128, -128, -128, -128, -128, -128, -33,  -128,
-    -128, -128, -128, -128, -128, -128, -8,   -128, 36,   -50,  -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -37,  -128, -128, -60,  -10,  -128, -128, -128, -128, -128,
-    -128, -128, 21,   -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -12,  -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -77,  -128, -128, -128, -29,  -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -29,  -128, -128, -128, -128, -128, -128, -128, -128, -128, -50,  -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128,
-};
diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_features/no_micro_features_data.h b/tensorflow/lite/micro/examples/micro_speech/micro_features/no_micro_features_data.h
deleted file mode 100644
index 8c1b6d5..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/micro_features/no_micro_features_data.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_MICRO_FEATURES_DATA_H_
-#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_MICRO_FEATURES_DATA_H_
-
-extern const int g_no_micro_f9643d42_nohash_4_width;
-extern const int g_no_micro_f9643d42_nohash_4_height;
-extern const signed char g_no_micro_f9643d42_nohash_4_data[];
-
-#endif  // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_MICRO_FEATURES_DATA_H_
diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_features/yes_feature_data_slice.cc b/tensorflow/lite/micro/examples/micro_speech/micro_features/yes_feature_data_slice.cc
deleted file mode 100644
index e3d006a..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/micro_features/yes_feature_data_slice.cc
+++ /dev/null
@@ -1,25 +0,0 @@
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// See the header for documentation on the meaning of this data.
-
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/yes_feature_data_slice.h"
-
-alignas(16) const int8_t
-    g_yes_feature_data_slice[g_yes_feature_data_slice_size] = {
-        86,  88,   108, 75, 108, 76,   98,  64,  75,  61, 71,  66, 85,  -1,
-        -77, -128, 46,  61, 92,  69,   100, 93,  113, 80, 108, 93, 113, 91,
-        110, 80,   85,  15, -33, -128, 12,  -50, 34,  50, 70,  55,
-};
diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_features/yes_feature_data_slice.h b/tensorflow/lite/micro/examples/micro_speech/micro_features/yes_feature_data_slice.h
deleted file mode 100644
index 18faadc..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/micro_features/yes_feature_data_slice.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// This data was extracted from the larger feature data held in
-// no_micro_features_data.cc and consists of the 26th spectrogram slice of 40
-// values. This is the expected result of running the sample data in
-// yes_30ms_sample_data.cc through the preprocessing pipeline.
-
-#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_FEATURE_DATA_SLICE_H_
-#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_FEATURE_DATA_SLICE_H_
-
-#include <cstdint>
-
-constexpr int g_yes_feature_data_slice_size = 40;
-extern const int8_t g_yes_feature_data_slice[];
-
-#endif  // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_FEATURE_DATA_SLICE_H_
diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_features/yes_micro_features_data.cc b/tensorflow/lite/micro/examples/micro_speech/micro_features/yes_micro_features_data.cc
deleted file mode 100644
index 7ee5387..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/micro_features/yes_micro_features_data.cc
+++ /dev/null
@@ -1,188 +0,0 @@
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/yes_micro_features_data.h"
-
-// Golden test values for the expected spectrogram from a "yes" sample file
-// speech_commands_test_set_v0.02/yes/f2e59fea_nohash_1.wav.
-
-const int g_yes_micro_f2e59fea_nohash_1_width = 40;
-const int g_yes_micro_f2e59fea_nohash_1_height = 49;
-alignas(16) const signed char g_yes_micro_f2e59fea_nohash_1_data[] = {
-    116,  98,   118,  95,   106,  85,   101,  81,   67,   -18,  -33,  -12,
-    -26,  -128, 9,    34,   56,   45,   9,    -12,  5,    30,   23,   28,
-    0,    -18,  0,    -128, -60,  -50,  -50,  -37,  -60,  -60,  -50,  -26,
-    -33,  -50,  -33,  -50,  83,   61,   81,   55,   76,   61,   73,   64,
-    38,   -8,   -37,  -20,  -18,  -20,  48,   29,   52,   41,   55,   18,
-    25,   37,   44,   37,   8,    15,   -6,   -60,  -128, -50,  -37,  -37,
-    -18,  -37,  -26,  -29,  -37,  -60,  -50,  -60,  95,   59,   52,   -4,
-    54,   -18,  68,   43,   31,   -18,  -26,  -33,  -37,  -29,  33,   7,
-    -3,   8,    26,   24,   36,   6,    36,   23,   14,   8,    -29,  -37,
-    -37,  -37,  -50,  -50,  -26,  -8,   -26,  -37,  -18,  -37,  -60,  -77,
-    50,   48,   83,   44,   56,   -128, -33,  -60,  1,    -26,  -60,  -43,
-    -14,  -23,  -18,  -43,  -26,  -33,  13,   -77,  -43,  -77,  -33,  -37,
-    16,   -12,  -37,  -50,  -50,  -77,  -20,  -43,  -60,  -128, -60,  -77,
-    -37,  -77,  -60,  -128, 37,   -10,  65,   -7,   28,   -128, 10,   -77,
-    -37,  -128, -77,  -128, -77,  -43,  -128, -128, -77,  -128, -128, -128,
-    -128, -128, -14,  -128, -43,  -50,  -37,  -77,  -128, -128, -77,  -43,
-    -29,  -43,  -20,  -60,  -37,  -43,  -50,  -128, -77,  -128, -18,  -128,
-    -60,  -128, -128, -128, -77,  -128, -77,  -128, -128, -128, -60,  -37,
-    -20,  -128, -60,  -128, -128, -128, -60,  -128, -77,  -60,  -128, -50,
-    -60,  -128, -77,  -128, -50,  -60,  -37,  -60,  -50,  -77,  -77,  -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -37,  -128,
-    -128, -128, -128, -128, -77,  -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -77,  -60,  -128, -128, -50,  -128, -50,  -128,
-    -50,  -128, -77,  -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -77,  -128, -77,  -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -77,  -128, -77,  -128, -77,  -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -77,  -128, -128, -128,
-    -128, -77,  -50,  -128, -128, -77,  -77,  -128, -128, -128, -50,  -128,
-    85,   43,   65,   53,   69,   60,   45,   3,    46,   -12,  9,    -23,
-    32,   -1,   -128, -128, -128, -128, -1,   37,   38,   33,   43,   36,
-    58,   70,   68,   39,   6,    10,   32,   6,    8,    -23,  -77,  -128,
-    -29,  -128, -77,  -128, 101,  87,   102,  91,   110,  88,   101,  83,
-    110,  95,   111,  83,   81,   84,   106,  90,   93,   82,   98,   91,
-    108,  95,   118,  97,   118,  97,   116,  96,   113,  90,   110,  96,
-    107,  85,   94,   66,   69,   36,   29,   0,    100,  60,   105,  68,
-    92,   93,   113,  92,   107,  85,   107,  83,   104,  91,   105,  85,
-    112,  88,   101,  80,   101,  79,   96,   80,   98,   80,   105,  83,
-    98,   81,   103,  71,   100,  79,   83,   78,   91,   47,   50,   13,
-    108,  81,   93,   78,   98,   76,   105,  76,   98,   40,   77,   72,
-    81,   62,   93,   77,   96,   80,   98,   61,   97,   69,   88,   61,
-    71,   56,   98,   68,   97,   72,   89,   51,   81,   61,   88,   75,
-    86,   56,   48,   13,   71,   22,   84,   66,   76,   -7,   48,   61,
-    77,   62,   91,   65,   95,   74,   88,   59,   75,   58,   83,   55,
-    87,   55,   76,   43,   76,   -3,   56,   60,   79,   57,   71,   54,
-    82,   33,   74,   71,   91,   45,   18,   -7,   61,   56,   77,   41,
-    73,   42,   82,   49,   59,   63,   82,   65,   66,   38,   83,   34,
-    48,   -8,   46,   20,   54,   33,   54,   6,    48,   16,   60,   37,
-    58,   22,   58,   14,   65,   53,   75,   -4,   42,   16,   16,   -50,
-    22,   -128, 80,   54,   43,   -50,  42,   -128, -10,  -77,  28,   -29,
-    68,   43,   73,   2,    25,   -60,  47,   14,   45,   7,    66,   4,
-    62,   37,   71,   7,    46,   -10,  44,   22,   55,   53,   57,   -29,
-    26,   -10,  -3,   -128, 38,   -128, 46,   -10,  16,   -128, -10,  -26,
-    60,   -7,   65,   38,   70,   -60,  35,   -8,   42,   -29,  6,    -128,
-    34,   -128, 36,   -60,  44,   -12,  -2,   -128, -7,   -60,  -60,  -128,
-    -23,  -128, 31,   -33,  22,   -77,  -37,  -43,  -128, -128, 3,    -128,
-    -23,  -128, 17,   -77,  43,   -77,  -7,   -128, -20,  -128, 17,   -43,
-    32,   -128, -43,  -128, -128, -77,  21,   -128, -50,  -128, -128, -128,
-    -128, -128, -128, -128, -37,  -128, -16,  -128, -50,  -26,  -6,   -128,
-    -128, -128, -128, -128, -23,  -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -16,  -128, 36,   -7,   16,   -128, -128, -128, -128, -128,
-    -77,  -128, -37,  -128, -50,  -128, -128, -128, -128, -128, -18,  -128,
-    11,   -128, -16,  -77,  -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -26,  -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -20,  -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -50,  -128, -77,  -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -77,  -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -1,   -18,  5,    -128,
-    40,   -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, 4,    -128, 63,   66,   75,   -128,
-    70,   60,   34,   -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    87,   86,   95,   76,   91,   62,   72,   -6,   -50,  -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, 64,   83,   104,  70,
-    98,   90,   111,  89,   109,  80,   71,   -128, -128, -128, -128, -128,
-    -20,  -6,   27,   33,   86,   88,   108,  75,   108,  76,   98,   64,
-    75,   61,   71,   66,   85,   -1,   -77,  -128, 46,   61,   92,   69,
-    100,  93,   113,  80,   108,  93,   113,  91,   110,  80,   85,   15,
-    -33,  -128, 12,   -50,  34,   50,   70,   55,   84,   72,   108,  81,
-    111,  88,   100,  80,   84,   73,   97,   86,   99,   65,   85,   43,
-    96,   78,   107,  94,   118,  98,   115,  92,   118,  94,   111,  93,
-    111,  86,   99,   52,   32,   -16,  48,   31,   81,   74,   85,   64,
-    78,   64,   98,   70,   110,  92,   96,   73,   100,  72,   94,   73,
-    98,   76,   85,   67,   101,  83,   101,  83,   112,  89,   98,   85,
-    105,  78,   98,   72,   102,  80,   95,   23,   19,   -8,   52,   57,
-    103,  91,   95,   65,   74,   8,    77,   49,   96,   76,   100,  87,
-    105,  81,   94,   62,   94,   78,   81,   72,   99,   82,   101,  78,
-    108,  65,   82,   70,   100,  63,   79,   58,   80,   59,   87,   48,
-    50,   57,   93,   67,   86,   80,   103,  56,   77,   31,   81,   57,
-    62,   41,   96,   85,   91,   71,   101,  76,   89,   78,   95,   76,
-    96,   79,   103,  81,   103,  48,   70,   57,   88,   66,   84,   11,
-    85,   67,   104,  37,   38,   67,   90,   54,   81,   62,   90,   52,
-    78,   -60,  54,   -8,   68,   40,   55,   8,    77,   52,   66,   31,
-    55,   13,   60,   26,   69,   42,   63,   -29,  57,   -128, -3,   -128,
-    3,    -128, -29,  -60,  52,   -43,  63,   56,   86,   75,   95,   75,
-    85,   63,   82,   10,   50,   -128, 31,   -77,  0,    -77,  -23,  -128,
-    12,   -77,  51,   -3,   58,   -14,  44,   0,    48,   4,    53,   47,
-    28,   -128, -128, -128, -37,  -128, -3,   -128, 49,   61,   100,  90,
-    117,  88,   107,  94,   112,  64,   96,   83,   -128, -128, 7,    -128,
-    -77,  -128, -23,  -128, -23,  -128, 16,   -37,  65,   -8,   48,   20,
-    14,   -77,  57,   -18,  -43,  -128, -128, -128, -128, -128, -128, -128,
-    24,   12,   74,   76,   105,  76,   99,   80,   108,  79,   103,  85,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    42,   -128, -8,   -128, -50,  -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -60,  -128, -128, 5,    73,   53,   93,   70,   101,  73,
-    94,   57,   86,   66,   -18,  -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -50,  -128, 36,   -128, -128, -128, -128, -128, -20,  -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, 23,   37,
-    75,   54,   97,   70,   83,   52,   85,   65,   7,    -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -43,  -128, 23,   -128, -43,  -128,
-    -33,  -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -26,  -37,  65,   33,   76,   37,   73,   50,   77,   47,
-    -12,  -128, -128, -128, -128, -128, -128, -128, -128, -128, -7,   -14,
-    -4,   -128, -14,  -128, 18,   -60,  -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -26,  -60,  71,   42,   68,   53,
-    81,   49,   73,   36,   -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -18,  -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, 15,   -26,
-    44,   -18,  59,   39,   57,   20,   62,   26,   -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, 49,   -128, 30,   8,    69,   27,   62,   38,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -43,  -128, 28,   -37,  48,   -10,
-    48,   11,   74,   37,   -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -77,  -128, 11,   -128, -7,   -60,  -77,  -4,   -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -8,   -128, -50,  -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-    -128, -128, -128, -128,
-};
diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_features/yes_micro_features_data.h b/tensorflow/lite/micro/examples/micro_speech/micro_features/yes_micro_features_data.h
deleted file mode 100644
index cd1ad10..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/micro_features/yes_micro_features_data.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_MICRO_FEATURES_DATA_H_
-#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_MICRO_FEATURES_DATA_H_
-
-extern const int g_yes_micro_f2e59fea_nohash_1_width;
-extern const int g_yes_micro_f2e59fea_nohash_1_height;
-extern const signed char g_yes_micro_f2e59fea_nohash_1_data[];
-
-#endif  // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_MICRO_FEATURES_DATA_H_
diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_model_settings.h b/tensorflow/lite/micro/examples/micro_speech/micro_model_settings.h
new file mode 100644
index 0000000..9d5b046
--- /dev/null
+++ b/tensorflow/lite/micro/examples/micro_speech/micro_model_settings.h
@@ -0,0 +1,37 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_MODEL_SETTINGS_H_
+#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_MODEL_SETTINGS_H_
+
+// The following values are derived from values used during model training.
+// If you change the way you preprocess the input, update all these constants.
+constexpr int kAudioSampleFrequency = 16000;
+constexpr int kFeatureSize = 40;
+constexpr int kFeatureCount = 49;
+constexpr int kFeatureElementCount = (kFeatureSize * kFeatureCount);
+constexpr int kFeatureStrideMs = 20;
+constexpr int kFeatureDurationMs = 30;
+
+// Variables for the model's output categories.
+constexpr int kCategoryCount = 4;
+constexpr const char* kCategoryLabels[kCategoryCount] = {
+    "silence",
+    "unknown",
+    "yes",
+    "no",
+};
+
+#endif  // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_MODEL_SETTINGS_H_
diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_speech_binary_mock_test.sh b/tensorflow/lite/micro/examples/micro_speech/micro_speech_binary_mock_test.sh
deleted file mode 100755
index 0515d7c..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/micro_speech_binary_mock_test.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-#
-# Bash unit tests for the example binary.
-
-set -e
-
-OUTPUT_LOG_FILE=${TEST_TMPDIR}/output_log.txt
-${TEST_SRCDIR}/${TEST_WORKSPACE}/tensorflow/lite/micro/examples/micro_speech/micro_speech_mock 2>&1 | head > ${OUTPUT_LOG_FILE}
-
-if ! grep -q 'Heard ' ${OUTPUT_LOG_FILE}; then
-  echo "ERROR: Expected logs not found in output '${OUTPUT_LOG_FILE}'"
-  exit 1
-fi
-
-echo
-echo "SUCCESS: micro_speech_binary_mock_test PASSED"
diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_speech_test.cc b/tensorflow/lite/micro/examples/micro_speech/micro_speech_test.cc
index 56cb156..0191958 100644
--- a/tensorflow/lite/micro/examples/micro_speech/micro_speech_test.cc
+++ b/tensorflow/lite/micro/examples/micro_speech/micro_speech_test.cc
@@ -1,4 +1,4 @@
-/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -13,132 +13,268 @@
 limitations under the License.
 ==============================================================================*/
 
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/no_micro_features_data.h"
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/yes_micro_features_data.h"
-#include "tensorflow/lite/micro/examples/micro_speech/micro_speech_model_data.h"
+#include <algorithm>
+#include <cstdint>
+#include <iterator>
+
+#include "tensorflow/lite/core/c/common.h"
+#include "tensorflow/lite/micro/examples/micro_speech/micro_model_settings.h"
+#include "tensorflow/lite/micro/examples/micro_speech/models/audio_preprocessor_int8_model_data.h"
+#include "tensorflow/lite/micro/examples/micro_speech/models/micro_speech_quantized_model_data.h"
+#include "tensorflow/lite/micro/examples/micro_speech/testdata/no_1000ms_audio_data.h"
+#include "tensorflow/lite/micro/examples/micro_speech/testdata/no_30ms_audio_data.h"
+#include "tensorflow/lite/micro/examples/micro_speech/testdata/noise_1000ms_audio_data.h"
+#include "tensorflow/lite/micro/examples/micro_speech/testdata/silence_1000ms_audio_data.h"
+#include "tensorflow/lite/micro/examples/micro_speech/testdata/yes_1000ms_audio_data.h"
+#include "tensorflow/lite/micro/examples/micro_speech/testdata/yes_30ms_audio_data.h"
 #include "tensorflow/lite/micro/micro_interpreter.h"
 #include "tensorflow/lite/micro/micro_log.h"
 #include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
 #include "tensorflow/lite/micro/testing/micro_test.h"
-#include "tensorflow/lite/schema/schema_generated.h"
+
+namespace {
+
+// Arena size is a guesstimate, followed by use of
+// MicroInterpreter::arena_used_bytes() on both the AudioPreprocessor and
+// MicroSpeech models and using the larger of the two results.
+constexpr size_t kArenaSize = 28584;  // xtensa p6
+alignas(16) uint8_t g_arena[kArenaSize];
+
+using Features = int8_t[kFeatureCount][kFeatureSize];
+Features g_features;
+
+constexpr int kAudioSampleDurationCount =
+    kFeatureDurationMs * kAudioSampleFrequency / 1000;
+constexpr int kAudioSampleStrideCount =
+    kFeatureStrideMs * kAudioSampleFrequency / 1000;
+
+using MicroSpeechOpResolver = tflite::MicroMutableOpResolver<4>;
+using AudioPreprocessorOpResolver = tflite::MicroMutableOpResolver<18>;
+
+TfLiteStatus RegisterOps(MicroSpeechOpResolver& op_resolver) {
+  TF_LITE_ENSURE_STATUS(op_resolver.AddReshape());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddFullyConnected());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddDepthwiseConv2D());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddSoftmax());
+  return kTfLiteOk;
+}
+
+TfLiteStatus RegisterOps(AudioPreprocessorOpResolver& op_resolver) {
+  TF_LITE_ENSURE_STATUS(op_resolver.AddReshape());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddCast());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddStridedSlice());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddConcatenation());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddMul());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddAdd());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddDiv());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddMinimum());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddMaximum());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddWindow());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddFftAutoScale());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddRfft());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddEnergy());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddFilterBank());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddFilterBankSquareRoot());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddFilterBankSpectralSubtraction());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddPCAN());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddFilterBankLog());
+  return kTfLiteOk;
+}
+
+TfLiteStatus LoadMicroSpeechModelAndPerformInference(
+    const Features& features, const char* expected_label) {
+  // Map the model into a usable data structure. This doesn't involve any
+  // copying or parsing, it's a very lightweight operation.
+  const tflite::Model* model =
+      tflite::GetModel(g_micro_speech_quantized_model_data);
+  TF_LITE_MICRO_EXPECT(model->version() == TFLITE_SCHEMA_VERSION);
+  TF_LITE_MICRO_CHECK_FAIL();
+
+  MicroSpeechOpResolver op_resolver;
+  TF_LITE_MICRO_EXPECT(RegisterOps(op_resolver) == kTfLiteOk);
+  TF_LITE_MICRO_CHECK_FAIL();
+
+  tflite::MicroInterpreter interpreter(model, op_resolver, g_arena, kArenaSize);
+
+  TF_LITE_MICRO_EXPECT(interpreter.AllocateTensors() == kTfLiteOk);
+  TF_LITE_MICRO_CHECK_FAIL();
+
+  MicroPrintf("MicroSpeech model arena size = %u",
+              interpreter.arena_used_bytes());
+
+  TfLiteTensor* input = interpreter.input(0);
+  TF_LITE_MICRO_EXPECT(input != nullptr);
+  TF_LITE_MICRO_CHECK_FAIL();
+  // check input shape is compatible with our feature data size
+  TF_LITE_MICRO_EXPECT_EQ(kFeatureElementCount,
+                          input->dims->data[input->dims->size - 1]);
+  TF_LITE_MICRO_CHECK_FAIL();
+
+  TfLiteTensor* output = interpreter.output(0);
+  TF_LITE_MICRO_EXPECT(output != nullptr);
+  TF_LITE_MICRO_CHECK_FAIL();
+  // check output shape is compatible with our number of prediction categories
+  TF_LITE_MICRO_EXPECT_EQ(kCategoryCount,
+                          output->dims->data[output->dims->size - 1]);
+  TF_LITE_MICRO_CHECK_FAIL();
+
+  float output_scale = output->params.scale;
+  int output_zero_point = output->params.zero_point;
+
+  std::copy_n(&features[0][0], kFeatureElementCount,
+              tflite::GetTensorData<int8_t>(input));
+  TF_LITE_MICRO_EXPECT(interpreter.Invoke() == kTfLiteOk);
+  TF_LITE_MICRO_CHECK_FAIL();
+
+  // Dequantize output values
+  float category_predictions[kCategoryCount];
+  MicroPrintf("MicroSpeech category predictions for <%s>", expected_label);
+  for (int i = 0; i < kCategoryCount; i++) {
+    category_predictions[i] =
+        (tflite::GetTensorData<int8_t>(output)[i] - output_zero_point) *
+        output_scale;
+    MicroPrintf("  %.4f %s", static_cast<double>(category_predictions[i]),
+                kCategoryLabels[i]);
+  }
+  int prediction_index =
+      std::distance(std::begin(category_predictions),
+                    std::max_element(std::begin(category_predictions),
+                                     std::end(category_predictions)));
+  TF_LITE_MICRO_EXPECT_STRING_EQ(expected_label,
+                                 kCategoryLabels[prediction_index]);
+  TF_LITE_MICRO_CHECK_FAIL();
+
+  return kTfLiteOk;
+}
+
+TfLiteStatus GenerateSingleFeature(const int16_t* audio_data,
+                                   const int audio_data_size,
+                                   int8_t* feature_output,
+                                   tflite::MicroInterpreter* interpreter) {
+  TfLiteTensor* input = interpreter->input(0);
+  TF_LITE_MICRO_EXPECT(input != nullptr);
+  TF_LITE_MICRO_CHECK_FAIL();
+  // check input shape is compatible with our audio sample size
+  TF_LITE_MICRO_EXPECT_EQ(kAudioSampleDurationCount, audio_data_size);
+  TF_LITE_MICRO_CHECK_FAIL();
+  TF_LITE_MICRO_EXPECT_EQ(kAudioSampleDurationCount,
+                          input->dims->data[input->dims->size - 1]);
+  TF_LITE_MICRO_CHECK_FAIL();
+
+  TfLiteTensor* output = interpreter->output(0);
+  TF_LITE_MICRO_EXPECT(output != nullptr);
+  TF_LITE_MICRO_CHECK_FAIL();
+  // check output shape is compatible with our feature size
+  TF_LITE_MICRO_EXPECT_EQ(kFeatureSize,
+                          output->dims->data[output->dims->size - 1]);
+  TF_LITE_MICRO_CHECK_FAIL();
+
+  std::copy_n(audio_data, audio_data_size,
+              tflite::GetTensorData<int16_t>(input));
+  TF_LITE_MICRO_EXPECT(interpreter->Invoke() == kTfLiteOk);
+  TF_LITE_MICRO_CHECK_FAIL();
+  std::copy_n(tflite::GetTensorData<int8_t>(output), kFeatureSize,
+              feature_output);
+
+  return kTfLiteOk;
+}
+
+TfLiteStatus GenerateFeatures(const int16_t* audio_data,
+                              const size_t audio_data_size,
+                              Features* features_output) {
+  // Map the model into a usable data structure. This doesn't involve any
+  // copying or parsing, it's a very lightweight operation.
+  const tflite::Model* model =
+      tflite::GetModel(g_audio_preprocessor_int8_model_data);
+  TF_LITE_MICRO_EXPECT(model->version() == TFLITE_SCHEMA_VERSION);
+  TF_LITE_MICRO_CHECK_FAIL();
+
+  AudioPreprocessorOpResolver op_resolver;
+  TF_LITE_MICRO_EXPECT(RegisterOps(op_resolver) == kTfLiteOk);
+  TF_LITE_MICRO_CHECK_FAIL();
+
+  tflite::MicroInterpreter interpreter(model, op_resolver, g_arena, kArenaSize);
+
+  TF_LITE_MICRO_EXPECT(interpreter.AllocateTensors() == kTfLiteOk);
+  TF_LITE_MICRO_CHECK_FAIL();
+
+  MicroPrintf("AudioPreprocessor model arena size = %u",
+              interpreter.arena_used_bytes());
+
+  size_t remaining_samples = audio_data_size;
+  size_t feature_index = 0;
+  while (remaining_samples >= kAudioSampleDurationCount &&
+         feature_index < kFeatureCount) {
+    TF_LITE_ENSURE_STATUS(
+        GenerateSingleFeature(audio_data, kAudioSampleDurationCount,
+                              (*features_output)[feature_index], &interpreter));
+    feature_index++;
+    audio_data += kAudioSampleStrideCount;
+    remaining_samples -= kAudioSampleStrideCount;
+  }
+
+  return kTfLiteOk;
+}
+
+TfLiteStatus TestAudioSample(const char* label, const int16_t* audio_data,
+                             const size_t audio_data_size) {
+  TF_LITE_ENSURE_STATUS(
+      GenerateFeatures(audio_data, audio_data_size, &g_features));
+  TF_LITE_ENSURE_STATUS(
+      LoadMicroSpeechModelAndPerformInference(g_features, label));
+  return kTfLiteOk;
+}
+
+}  // namespace
 
 TF_LITE_MICRO_TESTS_BEGIN
 
-TF_LITE_MICRO_TEST(TestInvoke) {
-  // Map the model into a usable data structure. This doesn't involve any
-  // copying or parsing, it's a very lightweight operation.
-  const tflite::Model* model = ::tflite::GetModel(g_micro_speech_model_data);
-  if (model->version() != TFLITE_SCHEMA_VERSION) {
-    MicroPrintf(
-        "Model provided is schema version %d not equal "
-        "to supported version %d.\n",
-        model->version(), TFLITE_SCHEMA_VERSION);
+TF_LITE_MICRO_TEST(NoFeatureTest) {
+  int8_t expected_feature[kFeatureSize] = {
+      126, 103, 124, 102, 124, 102, 123, 100, 118, 97, 118, 100, 118, 98,
+      121, 100, 121, 98,  117, 91,  96,  74,  54,  87, 100, 87,  109, 92,
+      91,  80,  64,  55,  83,  74,  74,  78,  114, 95, 101, 81,
+  };
+
+  TF_LITE_ENSURE_STATUS(GenerateFeatures(
+      g_no_30ms_audio_data, g_no_30ms_audio_data_size, &g_features));
+  for (size_t i = 0; i < kFeatureSize; i++) {
+    TF_LITE_MICRO_EXPECT_EQ(g_features[0][i], expected_feature[i]);
+    TF_LITE_MICRO_CHECK_FAIL();
   }
+}
 
-  // Pull in only the operation implementations we need.
-  // This relies on a complete list of all the ops needed by this graph.
+TF_LITE_MICRO_TEST(YesFeatureTest) {
+  int8_t expected_feature[kFeatureSize] = {
+      124, 105, 126, 103, 125, 101, 123, 100, 116, 98,  115, 97,  113, 90,
+      91,  82,  104, 96,  117, 97,  121, 103, 126, 101, 125, 104, 126, 104,
+      125, 101, 116, 90,  81,  74,  80,  71,  83,  76,  82,  71,
+  };
 
-  tflite::MicroMutableOpResolver<4> micro_op_resolver;
-  micro_op_resolver.AddDepthwiseConv2D();
-  micro_op_resolver.AddFullyConnected();
-  micro_op_resolver.AddReshape();
-  micro_op_resolver.AddSoftmax();
-
-  // Create an area of memory to use for input, output, and intermediate arrays.
-#if (defined(XTENSA) && defined(VISION_P6))
-  constexpr int tensor_arena_size = 28 * 1024;
-#elif defined(XTENSA)
-  constexpr int tensor_arena_size = 15 * 1024;
-#elif defined(HEXAGON)
-  constexpr int tensor_arena_size = 25 * 1024;
-#else
-  constexpr int tensor_arena_size = 10 * 1024;
-#endif
-  alignas(16) uint8_t tensor_arena[tensor_arena_size];
-
-  // Build an interpreter to run the model with.
-  tflite::MicroInterpreter interpreter(model, micro_op_resolver, tensor_arena,
-                                       tensor_arena_size);
-  interpreter.AllocateTensors();
-
-  // Get information about the memory area to use for the model's input.
-  TfLiteTensor* input = interpreter.input(0);
-
-  // Make sure the input has the properties we expect.
-  TF_LITE_MICRO_EXPECT(input != nullptr);
-  TF_LITE_MICRO_EXPECT_EQ(2, input->dims->size);
-  TF_LITE_MICRO_EXPECT_EQ(1, input->dims->data[0]);
-  TF_LITE_MICRO_EXPECT_EQ(1960, input->dims->data[1]);
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, input->type);
-
-  // Copy a spectrogram created from a .wav audio file of someone saying "Yes",
-  // into the memory area used for the input.
-  const int8_t* yes_features_data = g_yes_micro_f2e59fea_nohash_1_data;
-  for (size_t i = 0; i < input->bytes; ++i) {
-    input->data.int8[i] = yes_features_data[i];
+  TF_LITE_ENSURE_STATUS(GenerateFeatures(
+      g_yes_30ms_audio_data, g_yes_30ms_audio_data_size, &g_features));
+  for (size_t i = 0; i < kFeatureSize; i++) {
+    TF_LITE_MICRO_EXPECT_EQ(g_features[0][i], expected_feature[i]);
+    TF_LITE_MICRO_CHECK_FAIL();
   }
+}
 
-  // Run the model on this input and make sure it succeeds.
-  TfLiteStatus invoke_status = interpreter.Invoke();
-  if (invoke_status != kTfLiteOk) {
-    MicroPrintf("Invoke failed\n");
-  }
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status);
+TF_LITE_MICRO_TEST(NoTest) {
+  TestAudioSample("no", g_no_1000ms_audio_data, g_no_1000ms_audio_data_size);
+}
 
-  // Get the output from the model, and make sure it's the expected size and
-  // type.
-  TfLiteTensor* output = interpreter.output(0);
-  TF_LITE_MICRO_EXPECT_EQ(2, output->dims->size);
-  TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]);
-  TF_LITE_MICRO_EXPECT_EQ(4, output->dims->data[1]);
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, output->type);
+TF_LITE_MICRO_TEST(YesTest) {
+  TestAudioSample("yes", g_yes_1000ms_audio_data, g_yes_1000ms_audio_data_size);
+}
 
-  // There are four possible classes in the output, each with a score.
-  const int kSilenceIndex = 0;
-  const int kUnknownIndex = 1;
-  const int kYesIndex = 2;
-  const int kNoIndex = 3;
+TF_LITE_MICRO_TEST(SilenceTest) {
+  TestAudioSample("silence", g_silence_1000ms_audio_data,
+                  g_silence_1000ms_audio_data_size);
+}
 
-  // Make sure that the expected "Yes" score is higher than the other classes.
-  uint8_t silence_score = output->data.int8[kSilenceIndex] + 128;
-  uint8_t unknown_score = output->data.int8[kUnknownIndex] + 128;
-  uint8_t yes_score = output->data.int8[kYesIndex] + 128;
-  uint8_t no_score = output->data.int8[kNoIndex] + 128;
-  TF_LITE_MICRO_EXPECT_GT(yes_score, silence_score);
-  TF_LITE_MICRO_EXPECT_GT(yes_score, unknown_score);
-  TF_LITE_MICRO_EXPECT_GT(yes_score, no_score);
-
-  // Now test with a different input, from a recording of "No".
-  const int8_t* no_features_data = g_no_micro_f9643d42_nohash_4_data;
-  for (size_t i = 0; i < input->bytes; ++i) {
-    input->data.int8[i] = no_features_data[i];
-  }
-
-  // Run the model on this "No" input.
-  invoke_status = interpreter.Invoke();
-  if (invoke_status != kTfLiteOk) {
-    MicroPrintf("Invoke failed\n");
-  }
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status);
-
-  // Get the output from the model, and make sure it's the expected size and
-  // type.
-  output = interpreter.output(0);
-  TF_LITE_MICRO_EXPECT_EQ(2, output->dims->size);
-  TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]);
-  TF_LITE_MICRO_EXPECT_EQ(4, output->dims->data[1]);
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, output->type);
-
-  // Make sure that the expected "No" score is higher than the other classes.
-  silence_score = output->data.int8[kSilenceIndex] + 128;
-  unknown_score = output->data.int8[kUnknownIndex] + 128;
-  yes_score = output->data.int8[kYesIndex] + 128;
-  no_score = output->data.int8[kNoIndex] + 128;
-  TF_LITE_MICRO_EXPECT_GT(no_score, silence_score);
-  TF_LITE_MICRO_EXPECT_GT(no_score, unknown_score);
-  TF_LITE_MICRO_EXPECT_GT(no_score, yes_score);
-
-  MicroPrintf("Ran successfully\n");
+TF_LITE_MICRO_TEST(NoiseTest) {
+  TestAudioSample("silence", g_noise_1000ms_audio_data,
+                  g_noise_1000ms_audio_data_size);
 }
 
 TF_LITE_MICRO_TESTS_END
diff --git a/tensorflow/lite/micro/examples/micro_speech/models/audio_preprocessor_float.tflite b/tensorflow/lite/micro/examples/micro_speech/models/audio_preprocessor_float.tflite
new file mode 100644
index 0000000..8f91ec7
--- /dev/null
+++ b/tensorflow/lite/micro/examples/micro_speech/models/audio_preprocessor_float.tflite
Binary files differ
diff --git a/tensorflow/lite/micro/examples/micro_speech/models/audio_preprocessor_int8.tflite b/tensorflow/lite/micro/examples/micro_speech/models/audio_preprocessor_int8.tflite
new file mode 100644
index 0000000..790087b
--- /dev/null
+++ b/tensorflow/lite/micro/examples/micro_speech/models/audio_preprocessor_int8.tflite
Binary files differ
diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_speech.tflite b/tensorflow/lite/micro/examples/micro_speech/models/micro_speech_quantized.tflite
similarity index 100%
rename from tensorflow/lite/micro/examples/micro_speech/micro_speech.tflite
rename to tensorflow/lite/micro/examples/micro_speech/models/micro_speech_quantized.tflite
Binary files differ
diff --git a/tensorflow/lite/micro/examples/micro_speech/recognize_commands.cc b/tensorflow/lite/micro/examples/micro_speech/recognize_commands.cc
deleted file mode 100644
index 99edb47..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/recognize_commands.cc
+++ /dev/null
@@ -1,139 +0,0 @@
-/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/lite/micro/examples/micro_speech/recognize_commands.h"
-
-#include <limits>
-
-#include "tensorflow/lite/micro/micro_log.h"
-
-RecognizeCommands::RecognizeCommands(int32_t average_window_duration_ms,
-                                     uint8_t detection_threshold,
-                                     int32_t suppression_ms,
-                                     int32_t minimum_count)
-    : average_window_duration_ms_(average_window_duration_ms),
-      detection_threshold_(detection_threshold),
-      suppression_ms_(suppression_ms),
-      minimum_count_(minimum_count),
-      previous_results_() {
-  previous_top_label_ = "silence";
-  previous_top_label_time_ = std::numeric_limits<int32_t>::min();
-}
-
-TfLiteStatus RecognizeCommands::ProcessLatestResults(
-    const TfLiteTensor* latest_results, const int32_t current_time_ms,
-    const char** found_command, uint8_t* score, bool* is_new_command) {
-  if ((latest_results->dims->size != 2) ||
-      (latest_results->dims->data[0] != 1) ||
-      (latest_results->dims->data[1] != kCategoryCount)) {
-    MicroPrintf(
-        "The results for recognition should contain %d elements, but there are "
-        "%d in an %d-dimensional shape",
-        kCategoryCount, latest_results->dims->data[1],
-        latest_results->dims->size);
-    return kTfLiteError;
-  }
-
-  if (latest_results->type != kTfLiteInt8) {
-    MicroPrintf(
-        "The results for recognition should be int8_t elements, but are %d",
-        latest_results->type);
-    return kTfLiteError;
-  }
-
-  if ((!previous_results_.empty()) &&
-      (current_time_ms < previous_results_.front().time_)) {
-    MicroPrintf(
-        "Results must be fed in increasing time order, but received a "
-        "timestamp of %d that was earlier than the previous one of %d",
-        current_time_ms, previous_results_.front().time_);
-    return kTfLiteError;
-  }
-
-  // Add the latest results to the head of the queue.
-  previous_results_.push_back({current_time_ms, latest_results->data.int8});
-
-  // Prune any earlier results that are too old for the averaging window.
-  const int64_t time_limit = current_time_ms - average_window_duration_ms_;
-  while ((!previous_results_.empty()) &&
-         previous_results_.front().time_ < time_limit) {
-    previous_results_.pop_front();
-  }
-
-  // If there are too few results, assume the result will be unreliable and
-  // bail.
-  const int64_t how_many_results = previous_results_.size();
-  const int64_t earliest_time = previous_results_.front().time_;
-  const int64_t samples_duration = current_time_ms - earliest_time;
-  if ((how_many_results < minimum_count_) ||
-      (samples_duration < (average_window_duration_ms_ / 4))) {
-    *found_command = previous_top_label_;
-    *score = 0;
-    *is_new_command = false;
-    return kTfLiteOk;
-  }
-
-  // Calculate the average score across all the results in the window.
-  int32_t average_scores[kCategoryCount];
-  for (int offset = 0; offset < previous_results_.size(); ++offset) {
-    PreviousResultsQueue::Result previous_result =
-        previous_results_.from_front(offset);
-    const int8_t* scores = previous_result.scores;
-    for (int i = 0; i < kCategoryCount; ++i) {
-      if (offset == 0) {
-        average_scores[i] = scores[i] + 128;
-      } else {
-        average_scores[i] += scores[i] + 128;
-      }
-    }
-  }
-  for (int i = 0; i < kCategoryCount; ++i) {
-    average_scores[i] /= how_many_results;
-  }
-
-  // Find the current highest scoring category.
-  int current_top_index = 0;
-  int32_t current_top_score = 0;
-  for (int i = 0; i < kCategoryCount; ++i) {
-    if (average_scores[i] > current_top_score) {
-      current_top_score = average_scores[i];
-      current_top_index = i;
-    }
-  }
-  const char* current_top_label = kCategoryLabels[current_top_index];
-
-  // If we've recently had another label trigger, assume one that occurs too
-  // soon afterwards is a bad result.
-  int64_t time_since_last_top;
-  if ((previous_top_label_ == kCategoryLabels[0]) ||
-      (previous_top_label_time_ == std::numeric_limits<int32_t>::min())) {
-    time_since_last_top = std::numeric_limits<int32_t>::max();
-  } else {
-    time_since_last_top = current_time_ms - previous_top_label_time_;
-  }
-  if ((current_top_score > detection_threshold_) &&
-      ((current_top_label != previous_top_label_) ||
-       (time_since_last_top > suppression_ms_))) {
-    previous_top_label_ = current_top_label;
-    previous_top_label_time_ = current_time_ms;
-    *is_new_command = true;
-  } else {
-    *is_new_command = false;
-  }
-  *found_command = current_top_label;
-  *score = current_top_score;
-
-  return kTfLiteOk;
-}
diff --git a/tensorflow/lite/micro/examples/micro_speech/recognize_commands.h b/tensorflow/lite/micro/examples/micro_speech/recognize_commands.h
deleted file mode 100644
index 8a5a895..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/recognize_commands.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_RECOGNIZE_COMMANDS_H_
-#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_RECOGNIZE_COMMANDS_H_
-
-#include <cstdint>
-
-#include "tensorflow/lite/c/common.h"
-#include "tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h"
-#include "tensorflow/lite/micro/micro_log.h"
-
-// Partial implementation of std::dequeue, just providing the functionality
-// that's needed to keep a record of previous neural network results over a
-// short time period, so they can be averaged together to produce a more
-// accurate overall prediction. This doesn't use any dynamic memory allocation
-// so it's a better fit for microcontroller applications, but this does mean
-// there are hard limits on the number of results it can store.
-class PreviousResultsQueue {
- public:
-  PreviousResultsQueue() : front_index_(0), size_(0) {}
-
-  // Data structure that holds an inference result, and the time when it
-  // was recorded.
-  struct Result {
-    Result() : time_(0), scores() {}
-    Result(int32_t time, int8_t* input_scores) : time_(time) {
-      for (int i = 0; i < kCategoryCount; ++i) {
-        scores[i] = input_scores[i];
-      }
-    }
-    int32_t time_;
-    int8_t scores[kCategoryCount];
-  };
-
-  int size() { return size_; }
-  bool empty() { return size_ == 0; }
-  Result& front() { return results_[front_index_]; }
-  Result& back() {
-    int back_index = front_index_ + (size_ - 1);
-    if (back_index >= kMaxResults) {
-      back_index -= kMaxResults;
-    }
-    return results_[back_index];
-  }
-
-  void push_back(const Result& entry) {
-    if (size() >= kMaxResults) {
-      MicroPrintf("Couldn't push_back latest result, too many already!");
-      return;
-    }
-    size_ += 1;
-    back() = entry;
-  }
-
-  Result pop_front() {
-    if (size() <= 0) {
-      MicroPrintf("Couldn't pop_front result, none present!");
-      return Result();
-    }
-    Result result = front();
-    front_index_ += 1;
-    if (front_index_ >= kMaxResults) {
-      front_index_ = 0;
-    }
-    size_ -= 1;
-    return result;
-  }
-
-  // Most of the functions are duplicates of dequeue containers, but this
-  // is a helper that makes it easy to iterate through the contents of the
-  // queue.
-  Result& from_front(int offset) {
-    if ((offset < 0) || (offset >= size_)) {
-      MicroPrintf("Attempt to read beyond the end of the queue!");
-      offset = size_ - 1;
-    }
-    int index = front_index_ + offset;
-    if (index >= kMaxResults) {
-      index -= kMaxResults;
-    }
-    return results_[index];
-  }
-
- private:
-  static constexpr int kMaxResults = 50;
-  Result results_[kMaxResults];
-
-  int front_index_;
-  int size_;
-};
-
-// This class is designed to apply a very primitive decoding model on top of the
-// instantaneous results from running an audio recognition model on a single
-// window of samples. It applies smoothing over time so that noisy individual
-// label scores are averaged, increasing the confidence that apparent matches
-// are real.
-// To use it, you should create a class object with the configuration you
-// want, and then feed results from running a TensorFlow model into the
-// processing method. The timestamp for each subsequent call should be
-// increasing from the previous, since the class is designed to process a stream
-// of data over time.
-class RecognizeCommands {
- public:
-  // labels should be a list of the strings associated with each one-hot score.
-  // The window duration controls the smoothing. Longer durations will give a
-  // higher confidence that the results are correct, but may miss some commands.
-  // The detection threshold has a similar effect, with high values increasing
-  // the precision at the cost of recall. The minimum count controls how many
-  // results need to be in the averaging window before it's seen as a reliable
-  // average. This prevents erroneous results when the averaging window is
-  // initially being populated for example. The suppression argument disables
-  // further recognitions for a set time after one has been triggered, which can
-  // help reduce spurious recognitions.
-  explicit RecognizeCommands(int32_t average_window_duration_ms = 1000,
-                             uint8_t detection_threshold = 200,
-                             int32_t suppression_ms = 1500,
-                             int32_t minimum_count = 3);
-
-  // Call this with the results of running a model on sample data.
-  TfLiteStatus ProcessLatestResults(const TfLiteTensor* latest_results,
-                                    const int32_t current_time_ms,
-                                    const char** found_command, uint8_t* score,
-                                    bool* is_new_command);
-
- private:
-  // Configuration
-  int32_t average_window_duration_ms_;
-  uint8_t detection_threshold_;
-  int32_t suppression_ms_;
-  int32_t minimum_count_;
-
-  // Working variables
-  PreviousResultsQueue previous_results_;
-  const char* previous_top_label_;
-  int32_t previous_top_label_time_;
-};
-
-#endif  // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_RECOGNIZE_COMMANDS_H_
diff --git a/tensorflow/lite/micro/examples/micro_speech/recognize_commands_test.cc b/tensorflow/lite/micro/examples/micro_speech/recognize_commands_test.cc
deleted file mode 100644
index 7c1e4c6..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/recognize_commands_test.cc
+++ /dev/null
@@ -1,199 +0,0 @@
-/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/lite/micro/examples/micro_speech/recognize_commands.h"
-
-#include "tensorflow/lite/micro/test_helpers.h"
-#include "tensorflow/lite/micro/testing/micro_test.h"
-
-TF_LITE_MICRO_TESTS_BEGIN
-
-TF_LITE_MICRO_TEST(PreviousResultsQueueBasic) {
-  PreviousResultsQueue queue;
-  TF_LITE_MICRO_EXPECT_EQ(0, queue.size());
-
-  int8_t scores_a[4] = {0, 0, 0, 1};
-  queue.push_back({0, scores_a});
-  TF_LITE_MICRO_EXPECT_EQ(1, queue.size());
-  TF_LITE_MICRO_EXPECT_EQ(0, queue.front().time_);
-  TF_LITE_MICRO_EXPECT_EQ(0, queue.back().time_);
-
-  int8_t scores_b[4] = {0, 0, 1, 0};
-  queue.push_back({1, scores_b});
-  TF_LITE_MICRO_EXPECT_EQ(2, queue.size());
-  TF_LITE_MICRO_EXPECT_EQ(0, queue.front().time_);
-  TF_LITE_MICRO_EXPECT_EQ(1, queue.back().time_);
-
-  PreviousResultsQueue::Result pop_result = queue.pop_front();
-  TF_LITE_MICRO_EXPECT_EQ(0, pop_result.time_);
-  TF_LITE_MICRO_EXPECT_EQ(1, queue.size());
-  TF_LITE_MICRO_EXPECT_EQ(1, queue.front().time_);
-  TF_LITE_MICRO_EXPECT_EQ(1, queue.back().time_);
-
-  int8_t scores_c[4] = {0, 1, 0, 0};
-  queue.push_back({2, scores_c});
-  TF_LITE_MICRO_EXPECT_EQ(2, queue.size());
-  TF_LITE_MICRO_EXPECT_EQ(1, queue.front().time_);
-  TF_LITE_MICRO_EXPECT_EQ(2, queue.back().time_);
-}
-
-TF_LITE_MICRO_TEST(PreviousResultsQueuePushPop) {
-  PreviousResultsQueue queue;
-  TF_LITE_MICRO_EXPECT_EQ(0, queue.size());
-
-  for (int i = 0; i < 123; ++i) {
-    int8_t scores[4] = {0, 0, 0, 1};
-    queue.push_back({i, scores});
-    TF_LITE_MICRO_EXPECT_EQ(1, queue.size());
-    TF_LITE_MICRO_EXPECT_EQ(i, queue.front().time_);
-    TF_LITE_MICRO_EXPECT_EQ(i, queue.back().time_);
-
-    PreviousResultsQueue::Result pop_result = queue.pop_front();
-    TF_LITE_MICRO_EXPECT_EQ(i, pop_result.time_);
-    TF_LITE_MICRO_EXPECT_EQ(0, queue.size());
-  }
-}
-
-TF_LITE_MICRO_TEST(RecognizeCommandsTestBasic) {
-  RecognizeCommands recognize_commands;
-
-  const int8_t result_data[] = {127, -128, -128, -128};
-  int result_dims[] = {2, 1, 4};
-  TfLiteTensor results = tflite::testing::CreateQuantizedTensor(
-      result_data, tflite::testing::IntArrayFromInts(result_dims), -128.0f,
-      127.0f);
-
-  const char* found_command;
-  uint8_t score;
-  bool is_new_command;
-  TF_LITE_MICRO_EXPECT_EQ(
-      kTfLiteOk, recognize_commands.ProcessLatestResults(
-                     &results, 0, &found_command, &score, &is_new_command));
-}
-
-TF_LITE_MICRO_TEST(RecognizeCommandsTestFindCommands) {
-  RecognizeCommands recognize_commands(1000, 51);
-
-  const int8_t yes_data[] = {-128, -128, 127, -128};
-  int yes_dims[] = {2, 1, 4};
-  TfLiteTensor yes_results = tflite::testing::CreateQuantizedTensor(
-      yes_data, tflite::testing::IntArrayFromInts(yes_dims), -128.0f, 127.0f);
-
-  bool has_found_new_command = false;
-  const char* new_command;
-  for (int i = 0; i < 10; ++i) {
-    const char* found_command;
-    uint8_t score;
-    bool is_new_command;
-    int32_t current_time_ms = 0 + (i * 100);
-    TF_LITE_MICRO_EXPECT_EQ(
-        kTfLiteOk, recognize_commands.ProcessLatestResults(
-                       &yes_results, current_time_ms, &found_command, &score,
-                       &is_new_command));
-    if (is_new_command) {
-      TF_LITE_MICRO_EXPECT(!has_found_new_command);
-      has_found_new_command = true;
-      new_command = found_command;
-    }
-  }
-  TF_LITE_MICRO_EXPECT(has_found_new_command);
-  if (has_found_new_command) {
-    TF_LITE_MICRO_EXPECT_EQ(0, tflite::testing::TestStrcmp("yes", new_command));
-  }
-
-  const int8_t no_data[] = {-128, -128, -128, 127};
-  int no_dims[] = {2, 1, 4};
-  TfLiteTensor no_results = tflite::testing::CreateQuantizedTensor(
-      no_data, tflite::testing::IntArrayFromInts(no_dims), -128.0f, 127.0f);
-  has_found_new_command = false;
-  new_command = "";
-  uint8_t score;
-  for (int i = 0; i < 10; ++i) {
-    const char* found_command;
-    bool is_new_command;
-    int32_t current_time_ms = 1000 + (i * 100);
-    TF_LITE_MICRO_EXPECT_EQ(
-        kTfLiteOk, recognize_commands.ProcessLatestResults(
-                       &no_results, current_time_ms, &found_command, &score,
-                       &is_new_command));
-    if (is_new_command) {
-      TF_LITE_MICRO_EXPECT(!has_found_new_command);
-      has_found_new_command = true;
-      new_command = found_command;
-    }
-  }
-  TF_LITE_MICRO_EXPECT(has_found_new_command);
-  if (has_found_new_command) {
-    TF_LITE_MICRO_EXPECT_EQ(231, score);
-    TF_LITE_MICRO_EXPECT_EQ(0, tflite::testing::TestStrcmp("no", new_command));
-  }
-}
-
-TF_LITE_MICRO_TEST(RecognizeCommandsTestBadInputLength) {
-  RecognizeCommands recognize_commands(1000, 51);
-
-  const int8_t bad_data[] = {-128, -128, 127};
-  int bad_dims[] = {2, 1, 3};
-  TfLiteTensor bad_results = tflite::testing::CreateQuantizedTensor(
-      bad_data, tflite::testing::IntArrayFromInts(bad_dims), -128.0f, 127.0f);
-
-  const char* found_command;
-  uint8_t score;
-  bool is_new_command;
-  TF_LITE_MICRO_EXPECT_NE(
-      kTfLiteOk, recognize_commands.ProcessLatestResults(
-                     &bad_results, 0, &found_command, &score, &is_new_command));
-}
-
-TF_LITE_MICRO_TEST(RecognizeCommandsTestBadInputTimes) {
-  RecognizeCommands recognize_commands(1000, 51);
-
-  const int8_t result_data[] = {-128, -128, 127, -128};
-  int result_dims[] = {2, 1, 4};
-  TfLiteTensor results = tflite::testing::CreateQuantizedTensor(
-      result_data, tflite::testing::IntArrayFromInts(result_dims), -128.0f,
-      127.0f);
-
-  const char* found_command;
-  uint8_t score;
-  bool is_new_command;
-  TF_LITE_MICRO_EXPECT_EQ(
-      kTfLiteOk, recognize_commands.ProcessLatestResults(
-                     &results, 100, &found_command, &score, &is_new_command));
-  TF_LITE_MICRO_EXPECT_NE(
-      kTfLiteOk, recognize_commands.ProcessLatestResults(
-                     &results, 0, &found_command, &score, &is_new_command));
-}
-
-TF_LITE_MICRO_TEST(RecognizeCommandsTestTooFewInputs) {
-  RecognizeCommands recognize_commands(1000, 51);
-
-  const int8_t result_data[] = {-128, -128, 127, -128};
-  int result_dims[] = {2, 1, 4};
-  TfLiteTensor results = tflite::testing::CreateQuantizedTensor(
-      result_data, tflite::testing::IntArrayFromInts(result_dims), -128.0f,
-      127.0f);
-
-  const char* found_command;
-  uint8_t score;
-  bool is_new_command;
-  TF_LITE_MICRO_EXPECT_EQ(
-      kTfLiteOk, recognize_commands.ProcessLatestResults(
-                     &results, 100, &found_command, &score, &is_new_command));
-  TF_LITE_MICRO_EXPECT_EQ(0, score);
-  TF_LITE_MICRO_EXPECT_EQ(false, is_new_command);
-}
-
-TF_LITE_MICRO_TESTS_END
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/CMSIS/simple_features_generator.cc b/tensorflow/lite/micro/examples/micro_speech/simple_features/CMSIS/simple_features_generator.cc
deleted file mode 100644
index 33c1e24..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/CMSIS/simple_features_generator.cc
+++ /dev/null
@@ -1,96 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator.h"
-
-#include "tensorflow/lite/micro/micro_log.h"
-
-extern "C" {
-#define IFFT_FLAG_R 0
-#define BIT_REVERSE_FLAG 1
-#define FFT_SIZE 512
-#define FFT_SIZE_DIV2 256
-#include <arm_math.h>
-
-#include "arm_cmplx_mag_squared_q10p6.h"
-#include "tensorflow/lite/micro/examples/micro_speech/CMSIS/hanning.h"
-}
-
-void quantize(q15_t* bufA, q15_t* bufB, uint8_t* output);
-
-q15_t bufA[FFT_SIZE];
-q15_t bufB[FFT_SIZE];
-arm_rfft_instance_q15 S_arm_fft;
-arm_status arm_math_status;
-
-namespace {
-// These constants allow us to allocate fixed-sized arrays on the stack for our
-// working memory.
-constexpr int kInputSize = 512;
-constexpr int kAverageWindowSize = 6;
-constexpr int kOutputSize =
-    ((kInputSize / 2) + (kAverageWindowSize - 1)) / kAverageWindowSize;
-}  // namespace
-
-TfLiteStatus GenerateSimpleFeatures(const int16_t* input, int input_size,
-                                    int output_size, uint8_t* output) {
-  if (input_size > kInputSize) {
-    MicroPrintf("Input size %d larger than %d", input_size, kInputSize);
-    return kTfLiteError;
-  }
-  if (output_size != kOutputSize) {
-    MicroPrintf("Requested output size %d doesn't match %d", output_size,
-                kOutputSize);
-    return kTfLiteError;
-  }
-
-  // 30ms at 16 kHz = 480 samples
-  // We want to pad the rest of the 512-sample buffer with zeros
-  arm_mult_q15((q15_t*)input, g_hanning, bufB, 480);
-  int i;
-  for (i = 480; i < 512; i++) {
-    bufB[i] = 0;
-  }
-
-  // Should move init code outside of Preprocess() function
-  arm_math_status =
-      arm_rfft_init_q15(&S_arm_fft, FFT_SIZE, IFFT_FLAG_R, BIT_REVERSE_FLAG);
-  arm_rfft_q15(&S_arm_fft, bufB, bufA);
-
-  // The rfft function packs data as follows:
-  // {real[0], real[N/2], real[1], imag[1], ..., real[N/2-1], imag[N/2-1]}
-  // Below we pack as follows:
-  // {real[0], 0, real[1], imag[1], ..., real[N/2-1], imag[N/2-1, real[N/2], 0}
-  bufA[FFT_SIZE_DIV2] = bufA[1];
-  bufA[FFT_SIZE_DIV2 + 1] = 0;
-  bufA[1] = 0;
-  arm_cmplx_mag_squared_q10p6(bufA, bufB, FFT_SIZE_DIV2 + 1);
-
-  quantize(bufA, bufB, output);
-
-  return kTfLiteOk;
-}
-
-void quantize(q15_t* bufA, q15_t* bufB, uint8_t* output) {
-  int i;
-  for (i = 0; i < 42; i++) {
-    arm_mean_q15(bufB + 6 * i, 6, bufA + i);
-  }
-  arm_mean_q15(bufB + 252, 5, bufA + 42);
-
-  for (i = 0; i < 43; i++) {
-    output[i] = (uint8_t)(bufA[i] >> 5);
-  }
-}
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/fixed_point/simple_features_generator.cc b/tensorflow/lite/micro/examples/micro_speech/simple_features/fixed_point/simple_features_generator.cc
deleted file mode 100644
index 03e8b27..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/fixed_point/simple_features_generator.cc
+++ /dev/null
@@ -1,212 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// Reference implementation of the preprocessing pipeline, with the same
-// results as the audio tutorial at
-// https://www.tensorflow.org/tutorials/sequences/audio_recognition
-// This module takes 30ms of PCM-encoded signed 16-bit audio samples (at 16KHz,
-// so 480 values), and extracts a power spectrum of frequencies. There are 43
-// frequency bands in the result, derived from the original 256 output from the
-// discrete Fourier transform, and averaged together in groups of 6.
-// It's expected that most platforms will have optimized versions of the
-// functions used here, for example replacing the DFT with an FFT, so this
-// version shouldn't be used where performance is critical.
-// This implementation uses fixed point for any non-constant calculations,
-// instead of floating point, to help show how this can work on platforms that
-// don't have good float support.
-
-#include "tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator.h"
-
-#include <cmath>
-
-#include "tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.h"
-#include "tensorflow/lite/micro/micro_log.h"
-
-namespace {
-
-// q format notation: qx.y => 1 sign bit, x-1 integer bits, y fraction bits.
-// Use standard (non-saturating) arithmetic with signed ints of size x+y bits.
-// Sacrifice some precision to avoid use of 64-bit ints.
-
-// q1.15 * q1.15 => q2.30
-inline int32_t Q1_15_FixedMultiply_Q2_30(int16_t a, int16_t b) {
-  int32_t big_a = a;
-  int32_t big_b = b;
-  return big_a * big_b;
-}
-
-// q2.30 * q2.30 => q10.22
-inline int32_t Q2_30_FixedMultiply_Q10_22(int32_t a, int32_t b) {
-  // q2.30 result
-  int32_t tmp = (a >> 15) * (b >> 15);
-  // q10.22 result
-  return tmp >> 8;
-}
-
-// q10.22 * q10.22 => q10.22
-// Will overflow if product is >= 512.
-// Largest product in small test set is 465.25
-inline int32_t Q10_22_FixedMultiply_Q10_22(int32_t a, int32_t b) {
-  // q10.22 result
-  return (a >> 11) * (b >> 11);
-}
-
-// float => q2.30
-// No checking for saturation.  Only used for inputs in range [-1, 1].
-inline int32_t FloatToFixed_Q2_30(float input) {
-  return static_cast<int32_t>(roundf(input * (1 << 30)));
-}
-
-// Performs a discrete Fourier transform on the real inputs. This corresponds to
-// rdft() in the FFT package at http://www.kurims.kyoto-u.ac.jp/~ooura/fft.html,
-// and to kiss_fftr() in KISSFFT at https://github.com/mborgerding/kissfft.
-// It takes in an array of float real values, and returns a result of the same
-// length with q10.22 fixed point real and imaginary components interleaved, so
-// fourier_output[0] is the first real value, fourier_output[1] is the first
-// imaginary, fourier_output[2] is the second real, and so on.
-// The calling function should ensure that the array passed in as fourier_output
-// is at least time_series_size in length. Most optimized FFT implementations
-// require the length to be a power of two as well, but this version doesn't
-// enforce that.
-
-// input: q2.30 fixed point.  output: q10.22 fixed point.
-// Outputs interpreted as q10.22 fixed point are un-scaled.
-void CalculateDiscreteFourierTransform(int32_t* time_series,
-                                       int time_series_size,
-                                       int32_t* fourier_output) {
-  for (int i = 0; i < time_series_size / 2; ++i) {
-    int32_t real = 0;
-    for (int j = 0; j < time_series_size; ++j) {
-      const int32_t real_scale =
-          FloatToFixed_Q2_30(cos(j * i * M_PI * 2 / time_series_size));
-      real += Q2_30_FixedMultiply_Q10_22(time_series[j], real_scale);
-    }
-    int32_t imaginary = 0;
-    for (int j = 0; j < time_series_size; ++j) {
-      const int32_t imaginary_scale =
-          FloatToFixed_Q2_30(sin(j * i * M_PI * 2 / time_series_size));
-      imaginary -= Q2_30_FixedMultiply_Q10_22(time_series[j], imaginary_scale);
-    }
-    fourier_output[(i * 2) + 0] = real;
-    fourier_output[(i * 2) + 1] = imaginary;
-  }
-}
-
-// Produces a simple sine curve that is used to ensure frequencies at the center
-// of the current sample window are weighted more heavily than those at the end.
-// q1.15 output format.
-void CalculatePeriodicHann(int window_length, int16_t* window_function) {
-  for (int i = 0; i < window_length; ++i) {
-    const float real_value = (0.5 - 0.5 * cos((2 * M_PI * i) / window_length));
-    int tmp = static_cast<int32_t>(roundf(real_value * (1 << 15)));
-    // Saturate the 0x8000 value to 0x7fff
-    if (tmp > 0x7fff) tmp = 0x7fff;
-    window_function[i] = tmp;
-  }
-}
-
-}  // namespace
-
-TfLiteStatus GenerateSimpleFeatures(const int16_t* input, int input_size,
-                                    int output_size, uint8_t* output) {
-  // Ensure our input and output data arrays are valid.
-  if (input_size > kMaxAudioSampleSize) {
-    MicroPrintf("Input size %d larger than %d", input_size,
-                kMaxAudioSampleSize);
-    return kTfLiteError;
-  }
-  if (output_size != kFeatureSliceSize) {
-    MicroPrintf("Requested output size %d doesn't match %d", output_size,
-                kFeatureSliceSize);
-    return kTfLiteError;
-  }
-
-  // Pre-calculate the window function we'll be applying to the input data.
-  // In a real application, we'd calculate this table once in an initialization
-  // function and store it for repeated reuse.
-  // q1.15 format.
-  int16_t window_function[kMaxAudioSampleSize];
-  CalculatePeriodicHann(input_size, window_function);
-
-  // Apply the window function to our time series input, and pad it with zeroes
-  // to the next power of two.
-  int32_t fixed_input[kMaxAudioSampleSize];
-  for (int i = 0; i < kMaxAudioSampleSize; ++i) {
-    if (i < input_size) {
-      // input is int16_t.  Treat as q1.15 fixed point value in range [-1,1)
-      // window_function is also q1.15 fixed point number
-      fixed_input[i] = Q1_15_FixedMultiply_Q2_30(input[i], window_function[i]);
-    } else {
-      fixed_input[i] = 0;
-    }
-  }
-
-  // Pull the frequency data from the time series sample.
-  // Calculated in q10.22 format from q2.30 inputs.
-  int32_t fourier_values[kMaxAudioSampleSize];
-  CalculateDiscreteFourierTransform(fixed_input, kMaxAudioSampleSize,
-                                    fourier_values);
-
-  // We have the complex numbers giving us information about each frequency
-  // band, but all we want to know is how strong each frequency is, so calculate
-  // the squared magnitude by adding together the squares of each component.
-  int32_t power_spectrum[kMaxAudioSampleSize / 2];
-  for (int i = 0; i < (kMaxAudioSampleSize / 2); ++i) {
-    const int32_t real = fourier_values[(i * 2) + 0];
-    const int32_t imaginary = fourier_values[(i * 2) + 1];
-    // q10.22 results
-    power_spectrum[i] = Q10_22_FixedMultiply_Q10_22(real, real) +
-                        Q10_22_FixedMultiply_Q10_22(imaginary, imaginary);
-  }
-
-  // Finally, reduce the size of the output by averaging together six adjacent
-  // frequencies into each slot, producing an array of 43 values.
-  // Power_spectrum numbers are q10.22.  Divide by kAverageWindowSize inside
-  // loop to prevent overflow.
-  for (int i = 0; i < kFeatureSliceSize; ++i) {
-    int32_t average = 0;
-    for (int j = 0; j < kAverageWindowSize; ++j) {
-      const int index = (i * kAverageWindowSize) + j;
-      if (index < (kMaxAudioSampleSize / 2)) {
-        average += power_spectrum[index] / kAverageWindowSize;
-      }
-    }
-    // Quantize the result into eight bits, effectively multiplying by two.
-    // The 127.5 constant here has to match the features_max value defined in
-    // tensorflow/examples/speech_commands/input_data.py, and this also assumes
-    // that features_min is zero.
-    //
-    // q10.22 input
-    // integer output
-    //
-    // output = (input - features_min) *
-    //     (output_max - output_min) / (features_max - features_min)
-    // == (input) * (255) / (127.5)
-    // == input * 2
-    // == input << 1
-    // Also want to round to nearest integer and only keep integer bits
-    // => ((input << 1) + 0x200000) >> 22
-    // == (input + 0x100000) >> 21
-    int32_t quantized_average = (average + 0x100000) >> 21;
-    if (quantized_average < 0) {
-      quantized_average = 0;
-    }
-    if (quantized_average > 255) {
-      quantized_average = 255;
-    }
-    output[i] = quantized_average;
-  }
-  return kTfLiteOk;
-}
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/model.cc b/tensorflow/lite/micro/examples/micro_speech/simple_features/model.cc
deleted file mode 100644
index e8fea5b..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/model.cc
+++ /dev/null
@@ -1,1674 +0,0 @@
-/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// This is a standard TensorFlow Lite FlatBuffer model file that has been
-// converted into a C data array, so it can be easily compiled into a binary
-// for devices that don't have a file system. It was created using the command:
-// xxd -i model.tflite > model.cc
-
-#include "tensorflow/lite/micro/examples/micro_speech/simple_features/model.h"
-
-const unsigned char g_model[] = {
-    0x18, 0x00, 0x00, 0x00, 0x54, 0x46, 0x4c, 0x33, 0x00, 0x00, 0x0e, 0x00,
-    0x18, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x10, 0x00, 0x14, 0x00,
-    0x0e, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x08, 0x4d, 0x00, 0x00,
-    0x0c, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
-    0x01, 0x00, 0x00, 0x00, 0xf4, 0x47, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00,
-    0x54, 0x4f, 0x43, 0x4f, 0x20, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74,
-    0x65, 0x64, 0x2e, 0x00, 0x09, 0x00, 0x00, 0x00, 0xd4, 0x47, 0x00, 0x00,
-    0xb4, 0x47, 0x00, 0x00, 0xe4, 0x02, 0x00, 0x00, 0xb4, 0x02, 0x00, 0x00,
-    0xac, 0x02, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00,
-    0x0c, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xb8, 0xb3, 0xff, 0xff,
-    0xbc, 0xb3, 0xff, 0xff, 0xc0, 0xb3, 0xff, 0xff, 0x1e, 0xb4, 0xff, 0xff,
-    0x04, 0x00, 0x00, 0x00, 0x80, 0x02, 0x00, 0x00, 0x89, 0xa5, 0xe8, 0xc1,
-    0xb1, 0x89, 0x5b, 0xc6, 0x4f, 0x9b, 0xd3, 0x74, 0x93, 0x88, 0xff, 0xaf,
-    0x89, 0xff, 0xf4, 0x70, 0xcc, 0x75, 0x78, 0xbf, 0x92, 0xcd, 0xa9, 0xa8,
-    0xd6, 0x6a, 0x6f, 0x7b, 0x7f, 0xd8, 0xa8, 0xb1, 0xe6, 0x32, 0x21, 0x70,
-    0xa0, 0x9c, 0x6f, 0xc8, 0xc6, 0x59, 0x67, 0x93, 0x97, 0xca, 0x3f, 0xde,
-    0xcb, 0x74, 0x7c, 0xb5, 0xa4, 0xd9, 0x66, 0xc6, 0x87, 0x98, 0xa5, 0xd0,
-    0xbb, 0xb9, 0xc2, 0xb2, 0xaa, 0x79, 0x25, 0xb9, 0x6d, 0x5a, 0xc8, 0x7f,
-    0x70, 0x85, 0x79, 0xbc, 0x6a, 0x9b, 0xd1, 0x9a, 0x9c, 0x51, 0x53, 0x71,
-    0x89, 0xc0, 0xb4, 0xac, 0xae, 0x47, 0x67, 0x70, 0x79, 0xd2, 0x81, 0xa5,
-    0xd2, 0x09, 0x38, 0x82, 0x74, 0xc9, 0x5d, 0xaf, 0xc1, 0x4f, 0x53, 0x99,
-    0xcb, 0xb7, 0x3a, 0xba, 0xe8, 0x7f, 0x76, 0xb9, 0xb3, 0xd3, 0x60, 0xc0,
-    0x93, 0x9f, 0x87, 0xbd, 0xd0, 0xb8, 0xca, 0xc1, 0xb6, 0x6c, 0x01, 0xc1,
-    0x5c, 0x5d, 0xb2, 0x82, 0x76, 0x77, 0x39, 0xbc, 0x72, 0x6a, 0xc3, 0xb4,
-    0x79, 0x21, 0x48, 0x42, 0x86, 0xa6, 0xbd, 0xaf, 0xae, 0x23, 0x9c, 0x69,
-    0x78, 0xc3, 0x6b, 0xb3, 0xab, 0x43, 0xb2, 0x88, 0x71, 0xc6, 0x6b, 0xbe,
-    0xc3, 0x75, 0xc2, 0xc3, 0xa5, 0xcf, 0x32, 0xbe, 0xcb, 0xb0, 0xb8, 0xc1,
-    0x9c, 0xcf, 0x64, 0xc4, 0xb4, 0x96, 0xa8, 0xb9, 0xcb, 0xc0, 0xc0, 0xb8,
-    0xb8, 0x77, 0x65, 0xc0, 0xc4, 0xb3, 0xc5, 0x77, 0x9b, 0x61, 0xd4, 0xac,
-    0x7e, 0x36, 0xb1, 0xae, 0x36, 0x36, 0xb8, 0x39, 0x6b, 0x70, 0x9c, 0xb5,
-    0x88, 0x5c, 0xb3, 0x6a, 0xad, 0xc5, 0x7b, 0xb4, 0xad, 0xaa, 0xc4, 0x84,
-    0x5e, 0xc4, 0x67, 0xc1, 0xde, 0xba, 0xcf, 0xbd, 0xa0, 0xd3, 0x35, 0xb3,
-    0xe7, 0xc8, 0xb8, 0xb8, 0xaf, 0xb4, 0x59, 0xb8, 0xb4, 0xac, 0xac, 0xaa,
-    0xc7, 0xad, 0xc8, 0xb6, 0xac, 0x99, 0xa0, 0xcb, 0xc1, 0xc8, 0xcb, 0x89,
-    0xc3, 0xac, 0xca, 0x8b, 0x97, 0x1f, 0xbd, 0xbf, 0x13, 0xad, 0xc8, 0x41,
-    0x56, 0x3c, 0x86, 0xb2, 0x61, 0xc4, 0xbb, 0x71, 0xba, 0x92, 0x8d, 0xc3,
-    0x86, 0xcb, 0xc5, 0x8d, 0x88, 0xc8, 0x6a, 0xbf, 0x9c, 0xcd, 0xcd, 0xc0,
-    0x81, 0xb1, 0x47, 0xb5, 0xf0, 0xce, 0xb1, 0xc1, 0xaa, 0xa8, 0x54, 0xcb,
-    0xbc, 0xc7, 0xc5, 0x8e, 0xc3, 0xce, 0xc7, 0xb9, 0xb9, 0xa1, 0xc5, 0xbd,
-    0xb8, 0xb8, 0xb7, 0x81, 0xb6, 0xba, 0xd2, 0x90, 0xbc, 0x96, 0xbe, 0xba,
-    0x53, 0xb5, 0xc7, 0x3c, 0x3c, 0x1f, 0x90, 0xaa, 0x5a, 0xb8, 0xba, 0x7e,
-    0xbc, 0x9e, 0xc2, 0xb1, 0x6e, 0xc0, 0xc4, 0x91, 0xf0, 0xb5, 0x60, 0xad,
-    0x73, 0xba, 0xcd, 0xba, 0x6e, 0x94, 0x39, 0xb5, 0xe4, 0xbe, 0xb4, 0xb5,
-    0xa0, 0xa9, 0x51, 0xac, 0xbc, 0xc2, 0xb3, 0x8a, 0xbd, 0x9a, 0xca, 0xb3,
-    0xbf, 0xaf, 0xb5, 0x9a, 0xb9, 0xc3, 0xb6, 0x92, 0xb5, 0xc1, 0xb0, 0x95,
-    0xd6, 0xcc, 0xbb, 0xbb, 0xa9, 0xb9, 0xac, 0x4a, 0x62, 0x27, 0xa7, 0xa7,
-    0x30, 0xbd, 0xb1, 0x73, 0xa1, 0x74, 0xc2, 0xb7, 0x58, 0xc0, 0xae, 0x8f,
-    0xe1, 0xac, 0x4e, 0xb0, 0x55, 0xc9, 0xc8, 0x9f, 0x83, 0x8e, 0x3e, 0xd5,
-    0xb5, 0xbe, 0xcd, 0xb2, 0xa6, 0xc8, 0x64, 0xac, 0xc0, 0xc8, 0xaf, 0x99,
-    0xc5, 0x9e, 0xb8, 0xbd, 0xa9, 0xc2, 0xb3, 0x81, 0xb4, 0xc2, 0xb4, 0x8f,
-    0xbc, 0xb8, 0x9c, 0x88, 0xbe, 0xc6, 0xbf, 0xba, 0xc8, 0xb4, 0xab, 0x5b,
-    0x92, 0x51, 0xb1, 0x9a, 0x44, 0xb9, 0xab, 0x80, 0xa5, 0x3e, 0xc0, 0xa5,
-    0x5c, 0xb6, 0xa8, 0xa2, 0xb3, 0x9a, 0x6b, 0xb3, 0x34, 0xc6, 0x7e, 0x96,
-    0xcb, 0x88, 0x48, 0xc6, 0xa3, 0xbb, 0xd2, 0xa2, 0xaf, 0xd0, 0x6e, 0xae,
-    0xb4, 0xce, 0xc8, 0x8f, 0xd7, 0xad, 0xc8, 0xb0, 0xae, 0xb7, 0xb2, 0x70,
-    0xb9, 0xad, 0xc1, 0xa0, 0xcb, 0xa2, 0xb0, 0x9b, 0xbe, 0xd3, 0xca, 0xb6,
-    0xbd, 0xaf, 0xa9, 0x82, 0xa1, 0xd7, 0xbc, 0x9b, 0x8b, 0xac, 0xaa, 0xac,
-    0xad, 0x37, 0xb7, 0xb6, 0x46, 0xae, 0xa9, 0xbd, 0x6b, 0x90, 0x5e, 0xcd,
-    0x23, 0xa4, 0x76, 0xa1, 0xc4, 0x96, 0x50, 0xcc, 0x95, 0x99, 0x93, 0xa7,
-    0xb2, 0xe1, 0x7c, 0xbd, 0xbd, 0xb5, 0xbf, 0x9a, 0xca, 0x80, 0xd7, 0xae,
-    0x79, 0xa8, 0xaa, 0xb2, 0xbc, 0x51, 0xda, 0xa3, 0x80, 0x8b, 0xa2, 0xc8,
-    0xd1, 0x94, 0xe1, 0xc4, 0xbd, 0xae, 0xae, 0xcc, 0xb3, 0xca, 0xd5, 0xa1,
-    0xd5, 0xa7, 0xaf, 0xd2, 0xb4, 0x8d, 0xcc, 0xc8, 0x63, 0xa3, 0xa4, 0xdf,
-    0x6f, 0x7e, 0x98, 0xdf, 0x1b, 0x7b, 0x43, 0x99, 0xb0, 0x99, 0x71, 0xdb,
-    0x63, 0x7b, 0x69, 0x9c, 0xba, 0xcd, 0x90, 0xd0, 0xb6, 0xa6, 0x9e, 0x95,
-    0x50, 0xb6, 0xff, 0xff, 0xae, 0xb6, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00,
-    0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0xc7, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x00, 0x00,
-    0xda, 0xb6, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0xc0, 0x44, 0x00, 0x00,
-    0x2c, 0x30, 0x38, 0x5a, 0x3d, 0x4c, 0x44, 0x3b, 0x48, 0x48, 0x44, 0x57,
-    0x3f, 0x43, 0x45, 0x3a, 0x24, 0x32, 0x21, 0x5c, 0x3f, 0x3a, 0x38, 0x3a,
-    0x35, 0x35, 0x2f, 0x51, 0x3c, 0x3a, 0x45, 0x3a, 0x3b, 0x41, 0x39, 0x55,
-    0x3c, 0x41, 0x39, 0x44, 0x3a, 0x40, 0x37, 0x48, 0x33, 0x47, 0x36, 0x3e,
-    0x3c, 0x41, 0x3f, 0x3e, 0x3e, 0x47, 0x36, 0x3e, 0x41, 0x33, 0x3e, 0x3b,
-    0x3a, 0x46, 0x45, 0x40, 0x48, 0x3a, 0x35, 0x4b, 0x45, 0x4d, 0x3c, 0x49,
-    0x42, 0x44, 0x3c, 0x4c, 0x3e, 0x3c, 0x44, 0x32, 0x33, 0x41, 0x36, 0x4b,
-    0x38, 0x3b, 0x3c, 0x38, 0x3b, 0x45, 0x34, 0x46, 0x40, 0x4e, 0x44, 0x35,
-    0x43, 0x36, 0x3d, 0x40, 0x3e, 0x48, 0x40, 0x34, 0x3a, 0x46, 0x45, 0x43,
-    0x45, 0x3f, 0x47, 0x37, 0x36, 0x35, 0x44, 0x3a, 0x3e, 0x37, 0x39, 0x40,
-    0x3a, 0x3f, 0x3f, 0x4c, 0x3e, 0x41, 0x43, 0x35, 0x3f, 0x3d, 0x3d, 0x4c,
-    0x3c, 0x4a, 0x46, 0x3c, 0x3a, 0x41, 0x40, 0x4e, 0x36, 0x47, 0x40, 0x3b,
-    0x47, 0x42, 0x38, 0x4d, 0x48, 0x47, 0x3c, 0x3c, 0x33, 0x3b, 0x3e, 0x42,
-    0x3f, 0x3e, 0x3a, 0x3d, 0x32, 0x39, 0x41, 0x46, 0x3a, 0x3a, 0x3e, 0x3e,
-    0x47, 0x48, 0x4e, 0x36, 0x44, 0x40, 0x41, 0x45, 0x3a, 0x3c, 0x38, 0x55,
-    0x2e, 0x26, 0x2f, 0x32, 0x3f, 0x41, 0x3e, 0x4c, 0x45, 0x36, 0x40, 0x31,
-    0x17, 0x2e, 0x14, 0x53, 0x34, 0x30, 0x34, 0x3f, 0x2e, 0x44, 0x2b, 0x4e,
-    0x34, 0x3e, 0x34, 0x43, 0x3d, 0x35, 0x3f, 0x46, 0x39, 0x40, 0x38, 0x3e,
-    0x35, 0x3b, 0x35, 0x45, 0x3d, 0x40, 0x38, 0x37, 0x40, 0x3e, 0x32, 0x3e,
-    0x41, 0x39, 0x30, 0x41, 0x3a, 0x32, 0x3e, 0x3d, 0x39, 0x31, 0x33, 0x3e,
-    0x41, 0x47, 0x40, 0x47, 0x35, 0x33, 0x3c, 0x32, 0x40, 0x3c, 0x42, 0x49,
-    0x34, 0x38, 0x39, 0x37, 0x39, 0x35, 0x40, 0x4d, 0x37, 0x43, 0x42, 0x3e,
-    0x3f, 0x3c, 0x3e, 0x51, 0x36, 0x37, 0x42, 0x41, 0x36, 0x31, 0x43, 0x3d,
-    0x46, 0x43, 0x37, 0x46, 0x32, 0x45, 0x42, 0x36, 0x3f, 0x42, 0x42, 0x41,
-    0x3d, 0x46, 0x39, 0x41, 0x3c, 0x3f, 0x38, 0x3c, 0x43, 0x43, 0x3d, 0x3c,
-    0x3d, 0x41, 0x38, 0x42, 0x3a, 0x3d, 0x43, 0x42, 0x41, 0x40, 0x39, 0x36,
-    0x3a, 0x3c, 0x3c, 0x4f, 0x44, 0x36, 0x39, 0x35, 0x46, 0x46, 0x36, 0x4a,
-    0x3a, 0x42, 0x43, 0x39, 0x3f, 0x3d, 0x3c, 0x47, 0x38, 0x3f, 0x43, 0x40,
-    0x36, 0x3c, 0x45, 0x3b, 0x33, 0x36, 0x3b, 0x39, 0x3c, 0x35, 0x40, 0x38,
-    0x40, 0x3e, 0x3f, 0x48, 0x3f, 0x34, 0x40, 0x53, 0x26, 0x2c, 0x29, 0x39,
-    0x2a, 0x38, 0x3f, 0x45, 0x32, 0x31, 0x4a, 0x37, 0x1c, 0x28, 0x09, 0x43,
-    0x35, 0x3b, 0x33, 0x3c, 0x32, 0x3f, 0x28, 0x41, 0x36, 0x35, 0x3a, 0x37,
-    0x41, 0x39, 0x32, 0x3c, 0x40, 0x3c, 0x3c, 0x32, 0x38, 0x39, 0x37, 0x44,
-    0x3a, 0x33, 0x41, 0x36, 0x37, 0x3c, 0x35, 0x3a, 0x3d, 0x30, 0x3d, 0x41,
-    0x37, 0x3c, 0x45, 0x3a, 0x37, 0x2f, 0x36, 0x3c, 0x3a, 0x3d, 0x39, 0x48,
-    0x46, 0x33, 0x3a, 0x3e, 0x40, 0x3d, 0x3b, 0x52, 0x38, 0x45, 0x34, 0x47,
-    0x39, 0x36, 0x37, 0x56, 0x42, 0x3f, 0x33, 0x36, 0x38, 0x3f, 0x40, 0x53,
-    0x3e, 0x37, 0x3d, 0x3c, 0x48, 0x3a, 0x3d, 0x33, 0x39, 0x40, 0x3e, 0x35,
-    0x3d, 0x46, 0x38, 0x36, 0x37, 0x43, 0x3a, 0x3c, 0x40, 0x38, 0x39, 0x3b,
-    0x39, 0x3a, 0x42, 0x3d, 0x34, 0x3f, 0x35, 0x43, 0x3a, 0x35, 0x46, 0x3a,
-    0x48, 0x38, 0x3b, 0x48, 0x3c, 0x35, 0x42, 0x3d, 0x3a, 0x3d, 0x38, 0x42,
-    0x3e, 0x3c, 0x33, 0x39, 0x34, 0x30, 0x42, 0x44, 0x41, 0x3d, 0x3c, 0x39,
-    0x3c, 0x3a, 0x39, 0x41, 0x3d, 0x44, 0x3c, 0x40, 0x3f, 0x3e, 0x42, 0x3f,
-    0x37, 0x40, 0x39, 0x3b, 0x42, 0x43, 0x49, 0x37, 0x39, 0x46, 0x35, 0x3c,
-    0x3e, 0x39, 0x45, 0x52, 0x24, 0x2d, 0x38, 0x35, 0x3a, 0x3a, 0x3c, 0x44,
-    0x39, 0x32, 0x51, 0x3f, 0x16, 0x34, 0x0a, 0x49, 0x39, 0x38, 0x39, 0x3e,
-    0x2f, 0x36, 0x24, 0x3f, 0x37, 0x34, 0x38, 0x3b, 0x34, 0x34, 0x30, 0x3b,
-    0x3d, 0x36, 0x35, 0x42, 0x33, 0x40, 0x37, 0x35, 0x43, 0x3f, 0x3f, 0x39,
-    0x3a, 0x43, 0x36, 0x3e, 0x39, 0x3d, 0x3f, 0x3d, 0x47, 0x3b, 0x39, 0x37,
-    0x35, 0x42, 0x3f, 0x3b, 0x41, 0x3a, 0x42, 0x4b, 0x3d, 0x3f, 0x3d, 0x3e,
-    0x38, 0x3b, 0x34, 0x4e, 0x3f, 0x39, 0x36, 0x43, 0x39, 0x35, 0x41, 0x4d,
-    0x3c, 0x39, 0x43, 0x33, 0x37, 0x3b, 0x41, 0x48, 0x3c, 0x3f, 0x39, 0x32,
-    0x35, 0x3d, 0x42, 0x35, 0x3d, 0x3e, 0x37, 0x3b, 0x38, 0x3a, 0x44, 0x36,
-    0x42, 0x35, 0x48, 0x40, 0x3a, 0x44, 0x44, 0x39, 0x43, 0x41, 0x3c, 0x37,
-    0x47, 0x3b, 0x42, 0x42, 0x45, 0x3a, 0x40, 0x46, 0x35, 0x3f, 0x3a, 0x48,
-    0x35, 0x44, 0x3f, 0x37, 0x33, 0x3e, 0x45, 0x49, 0x39, 0x43, 0x47, 0x37,
-    0x3f, 0x3f, 0x3b, 0x44, 0x38, 0x3d, 0x39, 0x42, 0x37, 0x3e, 0x40, 0x45,
-    0x3b, 0x3f, 0x40, 0x34, 0x42, 0x3f, 0x43, 0x3c, 0x43, 0x41, 0x38, 0x38,
-    0x38, 0x41, 0x55, 0x33, 0x33, 0x39, 0x39, 0x3c, 0x35, 0x39, 0x38, 0x42,
-    0x27, 0x26, 0x32, 0x41, 0x41, 0x32, 0x3f, 0x47, 0x3a, 0x38, 0x48, 0x37,
-    0x11, 0x27, 0x08, 0x49, 0x35, 0x42, 0x3c, 0x2e, 0x34, 0x43, 0x25, 0x3b,
-    0x3a, 0x33, 0x37, 0x30, 0x3c, 0x36, 0x2d, 0x3c, 0x3b, 0x39, 0x3b, 0x40,
-    0x46, 0x3a, 0x30, 0x42, 0x35, 0x32, 0x36, 0x3a, 0x3a, 0x34, 0x34, 0x33,
-    0x3d, 0x30, 0x3b, 0x42, 0x41, 0x3f, 0x3d, 0x3b, 0x44, 0x3d, 0x41, 0x41,
-    0x3d, 0x3f, 0x40, 0x51, 0x42, 0x42, 0x36, 0x45, 0x30, 0x40, 0x32, 0x4f,
-    0x3a, 0x3c, 0x40, 0x39, 0x3d, 0x3b, 0x3e, 0x4b, 0x3d, 0x37, 0x42, 0x46,
-    0x40, 0x40, 0x47, 0x3d, 0x35, 0x3c, 0x3f, 0x46, 0x37, 0x37, 0x3a, 0x2e,
-    0x3d, 0x3c, 0x3a, 0x46, 0x3a, 0x44, 0x3c, 0x3a, 0x32, 0x44, 0x31, 0x41,
-    0x43, 0x36, 0x49, 0x39, 0x3d, 0x37, 0x3f, 0x41, 0x3b, 0x3b, 0x3c, 0x42,
-    0x3c, 0x34, 0x3f, 0x3b, 0x40, 0x3e, 0x48, 0x47, 0x3e, 0x3c, 0x38, 0x39,
-    0x3f, 0x35, 0x39, 0x3f, 0x3e, 0x3e, 0x3b, 0x43, 0x41, 0x40, 0x43, 0x41,
-    0x3f, 0x37, 0x39, 0x41, 0x46, 0x32, 0x3d, 0x41, 0x36, 0x3f, 0x3e, 0x3f,
-    0x36, 0x48, 0x43, 0x3d, 0x43, 0x3f, 0x34, 0x3d, 0x34, 0x35, 0x4f, 0x32,
-    0x3c, 0x3f, 0x3d, 0x3f, 0x39, 0x3c, 0x3d, 0x47, 0x23, 0x36, 0x33, 0x45,
-    0x37, 0x2e, 0x42, 0x42, 0x39, 0x34, 0x4f, 0x3f, 0x19, 0x2b, 0x01, 0x50,
-    0x35, 0x3f, 0x37, 0x3c, 0x33, 0x35, 0x25, 0x32, 0x38, 0x3e, 0x40, 0x40,
-    0x2f, 0x38, 0x35, 0x3d, 0x31, 0x42, 0x44, 0x3c, 0x3a, 0x3d, 0x2d, 0x3e,
-    0x3b, 0x3e, 0x3d, 0x31, 0x3b, 0x37, 0x35, 0x31, 0x36, 0x35, 0x34, 0x31,
-    0x41, 0x3a, 0x33, 0x32, 0x3c, 0x31, 0x3e, 0x3d, 0x40, 0x3b, 0x34, 0x45,
-    0x36, 0x39, 0x3e, 0x3f, 0x3c, 0x45, 0x37, 0x4b, 0x42, 0x3d, 0x33, 0x43,
-    0x3e, 0x40, 0x35, 0x4e, 0x38, 0x36, 0x3a, 0x33, 0x38, 0x44, 0x3f, 0x3c,
-    0x3f, 0x40, 0x3a, 0x3c, 0x3c, 0x3c, 0x44, 0x29, 0x3a, 0x40, 0x35, 0x3a,
-    0x3d, 0x48, 0x3b, 0x30, 0x45, 0x41, 0x45, 0x40, 0x37, 0x32, 0x3a, 0x35,
-    0x3f, 0x38, 0x3b, 0x43, 0x3b, 0x3f, 0x33, 0x40, 0x3b, 0x40, 0x38, 0x33,
-    0x39, 0x3c, 0x3c, 0x3f, 0x43, 0x33, 0x43, 0x40, 0x43, 0x3d, 0x33, 0x42,
-    0x40, 0x32, 0x3e, 0x36, 0x40, 0x38, 0x43, 0x40, 0x44, 0x38, 0x34, 0x3c,
-    0x3e, 0x39, 0x47, 0x43, 0x40, 0x3b, 0x3f, 0x3f, 0x3c, 0x3b, 0x4b, 0x33,
-    0x36, 0x49, 0x32, 0x41, 0x48, 0x45, 0x57, 0x3a, 0x40, 0x42, 0x40, 0x46,
-    0x36, 0x35, 0x3c, 0x46, 0x22, 0x2e, 0x33, 0x3e, 0x3c, 0x39, 0x44, 0x4d,
-    0x3f, 0x41, 0x51, 0x44, 0x15, 0x2e, 0x02, 0x4e, 0x39, 0x3a, 0x3c, 0x35,
-    0x30, 0x38, 0x1e, 0x31, 0x40, 0x3b, 0x39, 0x3d, 0x3a, 0x37, 0x35, 0x36,
-    0x46, 0x36, 0x3c, 0x3e, 0x39, 0x3e, 0x32, 0x40, 0x3b, 0x35, 0x42, 0x41,
-    0x41, 0x38, 0x41, 0x35, 0x42, 0x36, 0x3c, 0x42, 0x3d, 0x41, 0x35, 0x31,
-    0x3f, 0x44, 0x3e, 0x41, 0x3f, 0x35, 0x42, 0x4b, 0x3e, 0x36, 0x37, 0x34,
-    0x36, 0x3d, 0x40, 0x49, 0x41, 0x3e, 0x3d, 0x3b, 0x38, 0x37, 0x40, 0x47,
-    0x35, 0x32, 0x43, 0x38, 0x36, 0x3b, 0x33, 0x47, 0x33, 0x34, 0x3d, 0x47,
-    0x3c, 0x37, 0x3d, 0x2b, 0x3a, 0x36, 0x3b, 0x3d, 0x43, 0x38, 0x35, 0x32,
-    0x32, 0x37, 0x43, 0x36, 0x3f, 0x48, 0x38, 0x30, 0x3a, 0x3c, 0x42, 0x34,
-    0x37, 0x3c, 0x37, 0x40, 0x48, 0x3e, 0x35, 0x3b, 0x3f, 0x38, 0x39, 0x3e,
-    0x37, 0x35, 0x36, 0x3d, 0x3b, 0x3c, 0x40, 0x3d, 0x34, 0x40, 0x46, 0x42,
-    0x3f, 0x3c, 0x3c, 0x3e, 0x40, 0x40, 0x3d, 0x3f, 0x3f, 0x44, 0x46, 0x41,
-    0x32, 0x43, 0x40, 0x41, 0x3c, 0x42, 0x39, 0x38, 0x48, 0x44, 0x3d, 0x38,
-    0x34, 0x40, 0x4e, 0x31, 0x3c, 0x42, 0x39, 0x48, 0x3c, 0x33, 0x3e, 0x40,
-    0x20, 0x27, 0x39, 0x45, 0x45, 0x36, 0x47, 0x4c, 0x35, 0x3e, 0x4a, 0x36,
-    0x16, 0x2f, 0x04, 0x4f, 0x3a, 0x35, 0x36, 0x3a, 0x2d, 0x36, 0x21, 0x34,
-    0x3b, 0x32, 0x3d, 0x3c, 0x3c, 0x3f, 0x3b, 0x3b, 0x41, 0x46, 0x40, 0x3d,
-    0x3b, 0x44, 0x33, 0x42, 0x34, 0x33, 0x3e, 0x45, 0x3f, 0x46, 0x39, 0x33,
-    0x3b, 0x37, 0x37, 0x37, 0x42, 0x47, 0x3c, 0x35, 0x31, 0x41, 0x44, 0x3a,
-    0x3b, 0x33, 0x39, 0x44, 0x42, 0x33, 0x3d, 0x3f, 0x43, 0x33, 0x41, 0x4a,
-    0x35, 0x46, 0x36, 0x3e, 0x39, 0x41, 0x41, 0x4c, 0x34, 0x3d, 0x38, 0x33,
-    0x3c, 0x3f, 0x43, 0x44, 0x37, 0x35, 0x35, 0x3c, 0x43, 0x34, 0x3e, 0x2d,
-    0x3f, 0x35, 0x38, 0x3c, 0x33, 0x35, 0x43, 0x2a, 0x40, 0x33, 0x34, 0x40,
-    0x3d, 0x38, 0x36, 0x2d, 0x36, 0x3c, 0x43, 0x3d, 0x37, 0x3d, 0x39, 0x38,
-    0x3b, 0x3e, 0x3c, 0x46, 0x35, 0x35, 0x43, 0x44, 0x39, 0x40, 0x34, 0x39,
-    0x3d, 0x34, 0x40, 0x45, 0x38, 0x35, 0x3e, 0x39, 0x3c, 0x44, 0x48, 0x44,
-    0x41, 0x3e, 0x3c, 0x45, 0x3a, 0x3c, 0x3c, 0x46, 0x3a, 0x40, 0x39, 0x43,
-    0x35, 0x35, 0x3e, 0x45, 0x3a, 0x34, 0x3c, 0x39, 0x46, 0x3a, 0x4f, 0x35,
-    0x32, 0x3d, 0x36, 0x41, 0x32, 0x38, 0x3f, 0x45, 0x2d, 0x34, 0x2a, 0x35,
-    0x43, 0x3f, 0x41, 0x49, 0x41, 0x3c, 0x4b, 0x3f, 0x17, 0x31, 0x02, 0x4f,
-    0x30, 0x38, 0x39, 0x40, 0x33, 0x3a, 0x25, 0x38, 0x35, 0x3c, 0x39, 0x35,
-    0x34, 0x41, 0x34, 0x43, 0x40, 0x40, 0x46, 0x3d, 0x40, 0x38, 0x3f, 0x3b,
-    0x35, 0x39, 0x3c, 0x39, 0x34, 0x38, 0x3f, 0x36, 0x3a, 0x38, 0x44, 0x3f,
-    0x3f, 0x38, 0x3c, 0x33, 0x41, 0x42, 0x38, 0x33, 0x3c, 0x3b, 0x3c, 0x46,
-    0x38, 0x3b, 0x3f, 0x33, 0x3f, 0x48, 0x3b, 0x49, 0x3f, 0x3a, 0x3d, 0x3f,
-    0x47, 0x3d, 0x30, 0x45, 0x36, 0x42, 0x3d, 0x36, 0x43, 0x38, 0x3b, 0x3d,
-    0x3c, 0x30, 0x3b, 0x43, 0x3d, 0x41, 0x34, 0x2e, 0x43, 0x3d, 0x43, 0x46,
-    0x43, 0x3c, 0x3c, 0x2e, 0x3c, 0x43, 0x34, 0x43, 0x3e, 0x43, 0x3f, 0x2b,
-    0x45, 0x40, 0x3a, 0x43, 0x36, 0x39, 0x3f, 0x3d, 0x3a, 0x3c, 0x35, 0x3b,
-    0x36, 0x3f, 0x45, 0x3e, 0x45, 0x40, 0x3f, 0x36, 0x45, 0x42, 0x35, 0x3e,
-    0x3a, 0x3a, 0x3f, 0x40, 0x3e, 0x3c, 0x39, 0x46, 0x43, 0x3e, 0x3f, 0x3f,
-    0x40, 0x3c, 0x40, 0x4b, 0x41, 0x35, 0x3b, 0x3e, 0x49, 0x32, 0x3e, 0x41,
-    0x31, 0x37, 0x3d, 0x3b, 0x3f, 0x45, 0x50, 0x3a, 0x3f, 0x3c, 0x44, 0x36,
-    0x43, 0x37, 0x3d, 0x4b, 0x29, 0x39, 0x2f, 0x38, 0x45, 0x36, 0x40, 0x4e,
-    0x39, 0x3f, 0x48, 0x43, 0x23, 0x3c, 0x06, 0x51, 0x37, 0x3b, 0x3e, 0x3b,
-    0x28, 0x45, 0x2b, 0x37, 0x3f, 0x33, 0x3f, 0x41, 0x31, 0x36, 0x33, 0x3a,
-    0x3a, 0x35, 0x3b, 0x33, 0x3e, 0x36, 0x35, 0x40, 0x3a, 0x34, 0x3a, 0x38,
-    0x34, 0x3a, 0x3a, 0x34, 0x42, 0x45, 0x40, 0x3e, 0x40, 0x38, 0x39, 0x34,
-    0x38, 0x37, 0x3f, 0x3e, 0x3c, 0x32, 0x3f, 0x46, 0x3f, 0x44, 0x3b, 0x3e,
-    0x44, 0x45, 0x36, 0x3e, 0x36, 0x3f, 0x3b, 0x40, 0x39, 0x34, 0x38, 0x41,
-    0x42, 0x3e, 0x3d, 0x47, 0x3e, 0x45, 0x33, 0x40, 0x3e, 0x3a, 0x44, 0x3d,
-    0x3c, 0x3a, 0x3a, 0x2c, 0x3a, 0x3d, 0x35, 0x45, 0x3c, 0x41, 0x36, 0x30,
-    0x32, 0x32, 0x3a, 0x3b, 0x35, 0x3c, 0x43, 0x2d, 0x35, 0x3f, 0x41, 0x37,
-    0x3f, 0x46, 0x34, 0x39, 0x3c, 0x43, 0x40, 0x3e, 0x3e, 0x36, 0x3e, 0x3c,
-    0x37, 0x3a, 0x3d, 0x3a, 0x3c, 0x38, 0x44, 0x41, 0x3f, 0x3b, 0x3c, 0x47,
-    0x40, 0x3b, 0x41, 0x47, 0x3e, 0x45, 0x39, 0x3e, 0x37, 0x45, 0x4b, 0x4c,
-    0x37, 0x37, 0x37, 0x3c, 0x3c, 0x3d, 0x40, 0x38, 0x39, 0x3e, 0x43, 0x3f,
-    0x38, 0x45, 0x51, 0x3c, 0x31, 0x34, 0x3b, 0x48, 0x46, 0x41, 0x40, 0x40,
-    0x2c, 0x39, 0x32, 0x42, 0x3c, 0x2e, 0x49, 0x4d, 0x3c, 0x3f, 0x45, 0x38,
-    0x20, 0x38, 0x03, 0x55, 0x33, 0x3e, 0x32, 0x39, 0x32, 0x3b, 0x24, 0x2b,
-    0x42, 0x35, 0x45, 0x32, 0x2e, 0x3b, 0x2f, 0x3f, 0x3c, 0x37, 0x39, 0x3b,
-    0x34, 0x34, 0x3d, 0x36, 0x3d, 0x39, 0x3b, 0x30, 0x3c, 0x3e, 0x40, 0x32,
-    0x3d, 0x3c, 0x3c, 0x3e, 0x33, 0x33, 0x3f, 0x3a, 0x33, 0x3e, 0x46, 0x36,
-    0x3a, 0x3d, 0x40, 0x40, 0x3f, 0x41, 0x3a, 0x42, 0x34, 0x32, 0x34, 0x46,
-    0x3b, 0x31, 0x40, 0x37, 0x37, 0x32, 0x3e, 0x47, 0x3f, 0x3b, 0x3e, 0x43,
-    0x49, 0x45, 0x3a, 0x3d, 0x3e, 0x44, 0x40, 0x31, 0x39, 0x3e, 0x3b, 0x2d,
-    0x3b, 0x3a, 0x33, 0x3d, 0x39, 0x37, 0x3e, 0x32, 0x41, 0x3c, 0x3a, 0x37,
-    0x3b, 0x40, 0x39, 0x2f, 0x3e, 0x3f, 0x47, 0x32, 0x3e, 0x3b, 0x3e, 0x3e,
-    0x40, 0x3e, 0x40, 0x3c, 0x41, 0x39, 0x38, 0x46, 0x45, 0x32, 0x47, 0x31,
-    0x36, 0x47, 0x37, 0x49, 0x3a, 0x3f, 0x47, 0x3a, 0x41, 0x3b, 0x3c, 0x4f,
-    0x3e, 0x36, 0x3b, 0x47, 0x35, 0x39, 0x41, 0x4e, 0x3d, 0x3e, 0x3b, 0x46,
-    0x38, 0x39, 0x3b, 0x45, 0x3e, 0x3f, 0x44, 0x42, 0x44, 0x3f, 0x55, 0x3b,
-    0x41, 0x3d, 0x43, 0x43, 0x37, 0x3f, 0x3d, 0x4c, 0x28, 0x3d, 0x36, 0x3c,
-    0x3e, 0x3e, 0x48, 0x50, 0x3e, 0x39, 0x45, 0x41, 0x22, 0x37, 0x07, 0x4f,
-    0x2e, 0x33, 0x38, 0x3f, 0x31, 0x3a, 0x1b, 0x36, 0x34, 0x38, 0x3c, 0x37,
-    0x37, 0x3e, 0x36, 0x35, 0x36, 0x3b, 0x3d, 0x38, 0x42, 0x48, 0x3d, 0x40,
-    0x40, 0x44, 0x3d, 0x39, 0x37, 0x3b, 0x3d, 0x33, 0x3d, 0x35, 0x42, 0x3c,
-    0x39, 0x3e, 0x43, 0x2d, 0x3c, 0x40, 0x43, 0x43, 0x45, 0x35, 0x3c, 0x44,
-    0x34, 0x3c, 0x3d, 0x31, 0x39, 0x40, 0x39, 0x3d, 0x3e, 0x34, 0x3e, 0x3b,
-    0x40, 0x38, 0x42, 0x4a, 0x40, 0x3b, 0x35, 0x3d, 0x36, 0x38, 0x35, 0x42,
-    0x3c, 0x3c, 0x3d, 0x3b, 0x38, 0x39, 0x45, 0x28, 0x3a, 0x37, 0x37, 0x35,
-    0x3a, 0x3d, 0x35, 0x2a, 0x3c, 0x3f, 0x37, 0x34, 0x37, 0x3f, 0x3e, 0x2b,
-    0x39, 0x43, 0x3b, 0x45, 0x35, 0x36, 0x36, 0x42, 0x33, 0x38, 0x3b, 0x35,
-    0x31, 0x3f, 0x41, 0x41, 0x3c, 0x41, 0x45, 0x42, 0x3b, 0x3c, 0x39, 0x46,
-    0x3c, 0x3e, 0x3a, 0x41, 0x39, 0x3d, 0x41, 0x4b, 0x40, 0x3f, 0x43, 0x3d,
-    0x39, 0x39, 0x44, 0x44, 0x37, 0x42, 0x3f, 0x44, 0x3e, 0x37, 0x42, 0x35,
-    0x44, 0x3f, 0x40, 0x42, 0x3f, 0x3a, 0x47, 0x3d, 0x38, 0x3a, 0x3b, 0x3a,
-    0x42, 0x36, 0x3a, 0x97, 0x32, 0x31, 0x30, 0x36, 0x47, 0x3e, 0x46, 0x51,
-    0x42, 0x34, 0x50, 0x34, 0x26, 0x3b, 0x06, 0x55, 0x3c, 0x3b, 0x2d, 0x3a,
-    0x37, 0x37, 0x1b, 0x32, 0x39, 0x3d, 0x36, 0x40, 0x3b, 0x3f, 0x33, 0x33,
-    0x3d, 0x37, 0x35, 0x37, 0x44, 0x3f, 0x35, 0x39, 0x33, 0x3c, 0x43, 0x39,
-    0x3f, 0x42, 0x3e, 0x34, 0x38, 0x38, 0x39, 0x3c, 0x48, 0x3c, 0x2f, 0x30,
-    0x40, 0x3c, 0x41, 0x3e, 0x3f, 0x3e, 0x36, 0x43, 0x40, 0x3c, 0x36, 0x43,
-    0x43, 0x38, 0x3a, 0x47, 0x3e, 0x37, 0x39, 0x3a, 0x43, 0x45, 0x38, 0x43,
-    0x3b, 0x45, 0x37, 0x44, 0x36, 0x45, 0x3a, 0x3e, 0x3e, 0x3e, 0x3d, 0x33,
-    0x39, 0x36, 0x48, 0x33, 0x30, 0x42, 0x33, 0x39, 0x37, 0x3a, 0x3f, 0x34,
-    0x34, 0x40, 0x40, 0x40, 0x3f, 0x3d, 0x3f, 0x33, 0x41, 0x40, 0x3b, 0x43,
-    0x3b, 0x3a, 0x40, 0x3a, 0x38, 0x3e, 0x38, 0x3b, 0x38, 0x42, 0x40, 0x40,
-    0x41, 0x35, 0x37, 0x38, 0x3b, 0x3c, 0x39, 0x4b, 0x32, 0x39, 0x42, 0x3c,
-    0x36, 0x3d, 0x32, 0x52, 0x3a, 0x31, 0x40, 0x40, 0x3a, 0x43, 0x3d, 0x46,
-    0x3c, 0x3e, 0x3e, 0x33, 0x3f, 0x41, 0x4d, 0x37, 0x39, 0x39, 0x3e, 0x3b,
-    0x40, 0x39, 0x53, 0x2d, 0x46, 0x3c, 0x32, 0x42, 0x3d, 0x40, 0x40, 0x4d,
-    0x2e, 0x34, 0x39, 0x3b, 0x46, 0x3b, 0x42, 0x4f, 0x3d, 0x39, 0x4e, 0x36,
-    0x1a, 0x31, 0x0e, 0x56, 0x36, 0x42, 0x38, 0x44, 0x36, 0x3a, 0x20, 0x30,
-    0x36, 0x34, 0x37, 0x38, 0x40, 0x41, 0x2a, 0x35, 0x3b, 0x3b, 0x3a, 0x38,
-    0x33, 0x39, 0x36, 0x41, 0x43, 0x39, 0x35, 0x3d, 0x37, 0x3d, 0x33, 0x31,
-    0x45, 0x33, 0x3f, 0x3b, 0x44, 0x38, 0x39, 0x34, 0x38, 0x39, 0x38, 0x3d,
-    0x3a, 0x3a, 0x41, 0x40, 0x44, 0x3e, 0x3f, 0x45, 0x34, 0x31, 0x34, 0x43,
-    0x3b, 0x34, 0x42, 0x3c, 0x3c, 0x43, 0x35, 0x45, 0x36, 0x38, 0x3d, 0x3c,
-    0x3f, 0x3d, 0x3e, 0x45, 0x41, 0x43, 0x35, 0x3f, 0x40, 0x3f, 0x3a, 0x34,
-    0x3d, 0x32, 0x41, 0x3d, 0x48, 0x42, 0x37, 0x2a, 0x3c, 0x3a, 0x3e, 0x49,
-    0x38, 0x36, 0x38, 0x2e, 0x36, 0x37, 0x34, 0x3e, 0x3c, 0x43, 0x43, 0x39,
-    0x39, 0x3b, 0x44, 0x46, 0x44, 0x43, 0x37, 0x46, 0x43, 0x34, 0x3b, 0x35,
-    0x42, 0x41, 0x3f, 0x3d, 0x3d, 0x3a, 0x42, 0x3e, 0x38, 0x47, 0x3d, 0x49,
-    0x45, 0x49, 0x3a, 0x3c, 0x3e, 0x37, 0x40, 0x46, 0x41, 0x33, 0x45, 0x36,
-    0x37, 0x44, 0x49, 0x3b, 0x44, 0x40, 0x33, 0x46, 0x37, 0x39, 0x4e, 0x3a,
-    0x43, 0x38, 0x3a, 0x42, 0x3a, 0x3d, 0x45, 0x50, 0x26, 0x34, 0x3b, 0x3c,
-    0x46, 0x46, 0x4c, 0x54, 0x3f, 0x35, 0x4e, 0x47, 0x21, 0x39, 0x0e, 0x54,
-    0x3a, 0x3a, 0x2f, 0x40, 0x2d, 0x3a, 0x1f, 0x31, 0x31, 0x42, 0x34, 0x45,
-    0x37, 0x36, 0x30, 0x3b, 0x3a, 0x3a, 0x36, 0x40, 0x32, 0x36, 0x3c, 0x3c,
-    0x37, 0x42, 0x35, 0x3e, 0x39, 0x47, 0x36, 0x32, 0x41, 0x30, 0x42, 0x39,
-    0x39, 0x44, 0x37, 0x30, 0x41, 0x3b, 0x3d, 0x3d, 0x43, 0x3b, 0x38, 0x45,
-    0x3b, 0x3a, 0x39, 0x3a, 0x31, 0x33, 0x43, 0x46, 0x3f, 0x41, 0x44, 0x3f,
-    0x3b, 0x44, 0x3a, 0x4c, 0x33, 0x33, 0x33, 0x3e, 0x37, 0x3e, 0x45, 0x45,
-    0x36, 0x42, 0x3e, 0x43, 0x40, 0x34, 0x36, 0x31, 0x38, 0x34, 0x41, 0x3b,
-    0x32, 0x38, 0x3e, 0x29, 0x47, 0x33, 0x37, 0x45, 0x3c, 0x3d, 0x43, 0x2c,
-    0x36, 0x3a, 0x3c, 0x40, 0x3d, 0x46, 0x3c, 0x37, 0x40, 0x44, 0x37, 0x38,
-    0x3e, 0x41, 0x3c, 0x40, 0x33, 0x3f, 0x44, 0x32, 0x44, 0x3a, 0x43, 0x42,
-    0x3e, 0x38, 0x44, 0x3b, 0x41, 0x48, 0x3f, 0x4e, 0x3f, 0x44, 0x35, 0x45,
-    0x34, 0x3f, 0x42, 0x4b, 0x37, 0x37, 0x3e, 0x45, 0x46, 0x45, 0x46, 0x3d,
-    0x3e, 0x39, 0x3b, 0x3a, 0x46, 0x3a, 0x56, 0x35, 0x46, 0x3d, 0x40, 0x3b,
-    0x36, 0x39, 0x3f, 0x54, 0x27, 0x2b, 0x34, 0x3c, 0x48, 0x3d, 0x49, 0x4c,
-    0x3e, 0x3d, 0x4e, 0x42, 0x25, 0x3b, 0x10, 0x4d, 0x30, 0x36, 0x3e, 0x36,
-    0x2e, 0x31, 0x1d, 0x37, 0x3a, 0x39, 0x33, 0x3f, 0x39, 0x38, 0x2e, 0x36,
-    0x44, 0x3e, 0x41, 0x37, 0x3b, 0x30, 0x3b, 0x48, 0x31, 0x39, 0x41, 0x3e,
-    0x37, 0x37, 0x34, 0x2f, 0x35, 0x3b, 0x3a, 0x3e, 0x45, 0x3e, 0x3f, 0x35,
-    0x39, 0x39, 0x3b, 0x44, 0x43, 0x3c, 0x3e, 0x46, 0x40, 0x3a, 0x36, 0x45,
-    0x41, 0x40, 0x36, 0x44, 0x3a, 0x37, 0x47, 0x47, 0x3d, 0x36, 0x43, 0x4e,
-    0x3b, 0x38, 0x40, 0x48, 0x44, 0x43, 0x45, 0x3f, 0x43, 0x3c, 0x3b, 0x37,
-    0x43, 0x41, 0x39, 0x2f, 0x3d, 0x45, 0x3e, 0x3e, 0x42, 0x40, 0x41, 0x2f,
-    0x47, 0x38, 0x3a, 0x48, 0x3e, 0x35, 0x37, 0x2a, 0x34, 0x38, 0x41, 0x3b,
-    0x3d, 0x37, 0x3b, 0x35, 0x38, 0x3e, 0x41, 0x3c, 0x41, 0x43, 0x3d, 0x46,
-    0x47, 0x47, 0x3d, 0x35, 0x48, 0x41, 0x3d, 0x3e, 0x34, 0x47, 0x38, 0x38,
-    0x39, 0x3e, 0x38, 0x4d, 0x43, 0x36, 0x42, 0x40, 0x3e, 0x41, 0x3f, 0x4c,
-    0x3e, 0x3e, 0x37, 0x44, 0x3e, 0x3b, 0x47, 0x3e, 0x3f, 0x3b, 0x39, 0x3c,
-    0x3c, 0x3c, 0x53, 0x3b, 0x3b, 0x32, 0x3e, 0x3f, 0x32, 0x3c, 0x37, 0x4b,
-    0x33, 0x30, 0x2f, 0x41, 0x47, 0x42, 0x49, 0x4f, 0x3b, 0x42, 0x4c, 0x44,
-    0x1f, 0x37, 0x16, 0x4e, 0x3b, 0x3f, 0x30, 0x36, 0x35, 0x38, 0x26, 0x36,
-    0x32, 0x3b, 0x38, 0x3c, 0x30, 0x3e, 0x34, 0x3e, 0x3d, 0x34, 0x39, 0x3c,
-    0x36, 0x47, 0x34, 0x41, 0x31, 0x39, 0x44, 0x3e, 0x39, 0x41, 0x32, 0x36,
-    0x3b, 0x3f, 0x32, 0x3d, 0x36, 0x3e, 0x40, 0x3d, 0x45, 0x32, 0x45, 0x42,
-    0x38, 0x43, 0x40, 0x42, 0x34, 0x3a, 0x43, 0x38, 0x47, 0x3f, 0x41, 0x47,
-    0x34, 0x44, 0x41, 0x39, 0x3c, 0x46, 0x36, 0x4f, 0x41, 0x3e, 0x38, 0x38,
-    0x3a, 0x3b, 0x43, 0x44, 0x37, 0x3f, 0x35, 0x43, 0x34, 0x3d, 0x40, 0x32,
-    0x3a, 0x3b, 0x3d, 0x34, 0x35, 0x43, 0x31, 0x2c, 0x3b, 0x36, 0x38, 0x41,
-    0x3c, 0x38, 0x3d, 0x31, 0x45, 0x46, 0x42, 0x41, 0x33, 0x3f, 0x3f, 0x3a,
-    0x36, 0x3f, 0x3c, 0x3c, 0x3c, 0x3e, 0x39, 0x3e, 0x40, 0x37, 0x47, 0x3e,
-    0x35, 0x39, 0x3d, 0x3d, 0x37, 0x36, 0x3e, 0x45, 0x38, 0x3d, 0x45, 0x43,
-    0x3a, 0x32, 0x3b, 0x3a, 0x32, 0x3c, 0x3d, 0x43, 0x3d, 0x33, 0x3b, 0x3d,
-    0x46, 0x3a, 0x44, 0x45, 0x3b, 0x3e, 0x3c, 0x42, 0x37, 0x37, 0x52, 0x2a,
-    0x3a, 0x35, 0x35, 0x3f, 0x40, 0x38, 0x40, 0x5b, 0x35, 0x32, 0x2b, 0x3d,
-    0x4a, 0x3c, 0x46, 0x56, 0x44, 0x30, 0x4d, 0x39, 0x20, 0x32, 0x0f, 0x4f,
-    0x33, 0x3c, 0x35, 0x35, 0x3a, 0x45, 0x29, 0x3b, 0x31, 0x38, 0x34, 0x38,
-    0x42, 0x45, 0x37, 0x3e, 0x37, 0x2e, 0x36, 0x43, 0x3f, 0x38, 0x2f, 0x41,
-    0x3f, 0x41, 0x3c, 0x31, 0x37, 0x36, 0x37, 0x39, 0x41, 0x3a, 0x3a, 0x40,
-    0x3e, 0x47, 0x3d, 0x37, 0x3c, 0x38, 0x35, 0x39, 0x3a, 0x43, 0x3f, 0x42,
-    0x42, 0x38, 0x3e, 0x40, 0x3c, 0x3a, 0x45, 0x48, 0x37, 0x3a, 0x3e, 0x35,
-    0x3a, 0x3d, 0x45, 0x4a, 0x3d, 0x37, 0x38, 0x3a, 0x3d, 0x46, 0x46, 0x41,
-    0x37, 0x41, 0x40, 0x48, 0x37, 0x34, 0x3b, 0x2c, 0x39, 0x34, 0x37, 0x35,
-    0x3a, 0x43, 0x39, 0x2e, 0x39, 0x3f, 0x40, 0x3e, 0x40, 0x40, 0x3c, 0x2d,
-    0x3e, 0x3c, 0x37, 0x39, 0x3c, 0x3b, 0x3d, 0x3f, 0x41, 0x48, 0x3b, 0x3d,
-    0x3b, 0x41, 0x45, 0x3e, 0x3a, 0x38, 0x3f, 0x3c, 0x3d, 0x3e, 0x40, 0x42,
-    0x46, 0x38, 0x43, 0x34, 0x35, 0x47, 0x3d, 0x46, 0x3f, 0x3e, 0x32, 0x3f,
-    0x3e, 0x3d, 0x47, 0x46, 0x38, 0x41, 0x45, 0x3f, 0x34, 0x3f, 0x41, 0x43,
-    0x3e, 0x3e, 0x44, 0x3b, 0x3b, 0x36, 0x51, 0x32, 0x37, 0x3c, 0x42, 0x43,
-    0x33, 0x39, 0x42, 0x61, 0x2c, 0x3b, 0x2e, 0x39, 0x42, 0x39, 0x42, 0x54,
-    0x3c, 0x3a, 0x48, 0x35, 0x26, 0x34, 0x15, 0x51, 0x35, 0x40, 0x36, 0x3c,
-    0x2d, 0x37, 0x25, 0x38, 0x33, 0x3d, 0x3d, 0x39, 0x3e, 0x3b, 0x2e, 0x4b,
-    0x3d, 0x3b, 0x42, 0x37, 0x37, 0x40, 0x37, 0x40, 0x35, 0x45, 0x37, 0x37,
-    0x3f, 0x41, 0x36, 0x39, 0x3c, 0x32, 0x3e, 0x38, 0x41, 0x40, 0x3e, 0x3f,
-    0x3b, 0x3c, 0x43, 0x35, 0x3e, 0x3d, 0x44, 0x44, 0x3a, 0x36, 0x39, 0x3f,
-    0x3a, 0x31, 0x42, 0x4d, 0x40, 0x33, 0x40, 0x45, 0x44, 0x3d, 0x40, 0x49,
-    0x41, 0x3f, 0x42, 0x3a, 0x34, 0x46, 0x38, 0x46, 0x42, 0x34, 0x3a, 0x40,
-    0x40, 0x41, 0x3d, 0x32, 0x35, 0x48, 0x35, 0x3e, 0x44, 0x41, 0x40, 0x2c,
-    0x46, 0x38, 0x38, 0x3f, 0x36, 0x40, 0x38, 0x2a, 0x43, 0x41, 0x3e, 0x35,
-    0x46, 0x3a, 0x45, 0x46, 0x46, 0x42, 0x3a, 0x3b, 0x40, 0x38, 0x35, 0x43,
-    0x38, 0x3d, 0x3b, 0x41, 0x36, 0x44, 0x3f, 0x3f, 0x34, 0x3e, 0x3c, 0x3d,
-    0x49, 0x36, 0x37, 0x4b, 0x38, 0x3c, 0x43, 0x37, 0x3a, 0x3f, 0x31, 0x45,
-    0x3b, 0x39, 0x3f, 0x40, 0x37, 0x3c, 0x42, 0x3f, 0x3c, 0x33, 0x40, 0x3b,
-    0x32, 0x3c, 0x52, 0x31, 0x3d, 0x44, 0x3b, 0x31, 0x46, 0x38, 0x40, 0x60,
-    0x2b, 0x3c, 0x37, 0x34, 0x43, 0x38, 0x45, 0x57, 0x37, 0x39, 0x49, 0x33,
-    0x2d, 0x3f, 0x18, 0x4e, 0x39, 0x39, 0x32, 0x3b, 0x34, 0x3b, 0x2c, 0x45,
-    0x33, 0x37, 0x45, 0x42, 0x3d, 0x37, 0x2a, 0x4c, 0x3d, 0x3f, 0x3c, 0x36,
-    0x37, 0x3c, 0x39, 0x47, 0x3d, 0x44, 0x3d, 0x40, 0x3d, 0x41, 0x34, 0x3e,
-    0x40, 0x34, 0x3b, 0x3a, 0x41, 0x36, 0x37, 0x40, 0x3e, 0x3f, 0x3a, 0x36,
-    0x3e, 0x35, 0x3b, 0x48, 0x41, 0x40, 0x3c, 0x42, 0x34, 0x41, 0x3f, 0x44,
-    0x34, 0x39, 0x33, 0x39, 0x39, 0x47, 0x40, 0x48, 0x38, 0x3a, 0x43, 0x43,
-    0x48, 0x3a, 0x3f, 0x46, 0x35, 0x3a, 0x33, 0x36, 0x32, 0x3c, 0x40, 0x34,
-    0x40, 0x3a, 0x42, 0x3a, 0x39, 0x38, 0x41, 0x35, 0x3a, 0x3f, 0x35, 0x40,
-    0x3f, 0x39, 0x39, 0x36, 0x38, 0x40, 0x3e, 0x3e, 0x3a, 0x31, 0x32, 0x44,
-    0x40, 0x47, 0x3a, 0x3c, 0x43, 0x43, 0x46, 0x48, 0x40, 0x35, 0x3d, 0x37,
-    0x44, 0x37, 0x33, 0x44, 0x3b, 0x3e, 0x3f, 0x37, 0x36, 0x3a, 0x38, 0x47,
-    0x3a, 0x44, 0x36, 0x42, 0x3e, 0x44, 0x34, 0x46, 0x33, 0x43, 0x44, 0x3e,
-    0x30, 0x48, 0x37, 0x38, 0x33, 0x3c, 0x46, 0x42, 0x38, 0x3d, 0x50, 0x39,
-    0x33, 0x38, 0x3e, 0x40, 0x3b, 0x2b, 0x3b, 0x5f, 0x2b, 0x32, 0x2f, 0x37,
-    0x3f, 0x3a, 0x40, 0x4e, 0x34, 0x38, 0x47, 0x37, 0x27, 0x2b, 0x1b, 0x4f,
-    0x36, 0x38, 0x3a, 0x3a, 0x3b, 0x38, 0x2e, 0x3f, 0x3f, 0x42, 0x42, 0x42,
-    0x36, 0x3e, 0x3c, 0x55, 0x39, 0x40, 0x44, 0x43, 0x3e, 0x33, 0x3c, 0x43,
-    0x38, 0x44, 0x3b, 0x46, 0x3f, 0x45, 0x34, 0x38, 0x3c, 0x41, 0x42, 0x3d,
-    0x42, 0x36, 0x43, 0x3f, 0x3c, 0x39, 0x3e, 0x39, 0x39, 0x42, 0x33, 0x47,
-    0x36, 0x3d, 0x3f, 0x3b, 0x40, 0x39, 0x3b, 0x49, 0x36, 0x40, 0x3d, 0x41,
-    0x40, 0x34, 0x3b, 0x4e, 0x3b, 0x36, 0x3b, 0x45, 0x40, 0x32, 0x3b, 0x49,
-    0x37, 0x38, 0x3a, 0x47, 0x37, 0x40, 0x3e, 0x38, 0x40, 0x3f, 0x3c, 0x3a,
-    0x47, 0x41, 0x42, 0x30, 0x40, 0x3c, 0x42, 0x3f, 0x31, 0x44, 0x39, 0x38,
-    0x3b, 0x38, 0x42, 0x43, 0x41, 0x35, 0x3a, 0x39, 0x3e, 0x38, 0x39, 0x3e,
-    0x3c, 0x42, 0x3d, 0x49, 0x47, 0x3c, 0x3f, 0x35, 0x41, 0x3a, 0x36, 0x43,
-    0x43, 0x3b, 0x39, 0x3b, 0x36, 0x43, 0x43, 0x4e, 0x3e, 0x35, 0x37, 0x3b,
-    0x3f, 0x37, 0x41, 0x48, 0x32, 0x44, 0x43, 0x32, 0x38, 0x39, 0x45, 0x39,
-    0x3e, 0x3d, 0x35, 0x39, 0x35, 0x39, 0x50, 0x37, 0x39, 0x40, 0x43, 0x47,
-    0x32, 0x2a, 0x40, 0x62, 0x24, 0x30, 0x36, 0x3e, 0x41, 0x32, 0x47, 0x58,
-    0x39, 0x36, 0x44, 0x34, 0x26, 0x34, 0x1e, 0x50, 0x3c, 0x3b, 0x3f, 0x42,
-    0x35, 0x3d, 0x2a, 0x4e, 0x40, 0x38, 0x36, 0x31, 0x3a, 0x30, 0x37, 0x4b,
-    0x3c, 0x3b, 0x3b, 0x41, 0x3b, 0x3c, 0x2e, 0x45, 0x44, 0x3f, 0x3b, 0x35,
-    0x3e, 0x33, 0x37, 0x3d, 0x40, 0x39, 0x39, 0x37, 0x40, 0x3e, 0x3a, 0x3e,
-    0x3c, 0x3c, 0x45, 0x40, 0x3c, 0x3f, 0x3a, 0x51, 0x47, 0x3a, 0x34, 0x39,
-    0x3b, 0x34, 0x44, 0x4c, 0x36, 0x3d, 0x3a, 0x35, 0x34, 0x36, 0x38, 0x4b,
-    0x3f, 0x40, 0x3f, 0x3e, 0x40, 0x41, 0x47, 0x43, 0x32, 0x38, 0x46, 0x44,
-    0x46, 0x43, 0x43, 0x37, 0x39, 0x49, 0x37, 0x36, 0x3e, 0x3d, 0x37, 0x3c,
-    0x39, 0x37, 0x34, 0x43, 0x45, 0x32, 0x3a, 0x3a, 0x38, 0x43, 0x3b, 0x40,
-    0x3b, 0x3f, 0x3d, 0x41, 0x40, 0x3d, 0x3a, 0x3b, 0x48, 0x37, 0x3d, 0x41,
-    0x40, 0x3e, 0x38, 0x41, 0x3d, 0x3a, 0x38, 0x49, 0x40, 0x3c, 0x42, 0x41,
-    0x3a, 0x38, 0x38, 0x4c, 0x3e, 0x41, 0x40, 0x3b, 0x3d, 0x3e, 0x3c, 0x46,
-    0x3e, 0x42, 0x41, 0x38, 0x42, 0x42, 0x41, 0x3e, 0x3e, 0x37, 0x3c, 0x43,
-    0x43, 0x3b, 0x54, 0x2b, 0x45, 0x3b, 0x43, 0x41, 0x41, 0x26, 0x3f, 0x60,
-    0x25, 0x2b, 0x2e, 0x3a, 0x40, 0x31, 0x40, 0x49, 0x40, 0x31, 0x46, 0x3c,
-    0x1e, 0x2a, 0x1a, 0x47, 0x33, 0x37, 0x37, 0x34, 0x31, 0x36, 0x25, 0x41,
-    0x2e, 0x36, 0x35, 0x33, 0x33, 0x34, 0x31, 0x45, 0x3a, 0x3f, 0x3d, 0x40,
-    0x3c, 0x41, 0x30, 0x3c, 0x3f, 0x46, 0x37, 0x3c, 0x3a, 0x3c, 0x36, 0x3a,
-    0x47, 0x3d, 0x31, 0x3f, 0x40, 0x3e, 0x36, 0x44, 0x41, 0x3d, 0x36, 0x3f,
-    0x37, 0x3f, 0x34, 0x4b, 0x31, 0x47, 0x43, 0x3e, 0x3e, 0x3a, 0x3b, 0x4b,
-    0x37, 0x32, 0x38, 0x3d, 0x37, 0x47, 0x46, 0x4d, 0x36, 0x3c, 0x3f, 0x3a,
-    0x41, 0x31, 0x47, 0x43, 0x3d, 0x3d, 0x3e, 0x35, 0x3d, 0x46, 0x49, 0x2a,
-    0x37, 0x3c, 0x39, 0x3d, 0x47, 0x3c, 0x34, 0x2c, 0x3e, 0x38, 0x47, 0x32,
-    0x36, 0x36, 0x41, 0x38, 0x35, 0x44, 0x48, 0x3b, 0x39, 0x3e, 0x38, 0x3e,
-    0x40, 0x36, 0x37, 0x46, 0x39, 0x3b, 0x34, 0x45, 0x40, 0x3b, 0x48, 0x36,
-    0x34, 0x44, 0x37, 0x46, 0x3f, 0x42, 0x33, 0x36, 0x43, 0x3c, 0x41, 0x46,
-    0x31, 0x42, 0x43, 0x44, 0x44, 0x3e, 0x42, 0x3b, 0x3b, 0x3a, 0x3c, 0x37,
-    0x42, 0x41, 0x46, 0x38, 0x41, 0x3b, 0x40, 0x44, 0x37, 0x3c, 0x4c, 0x2e,
-    0x3a, 0x3e, 0x3b, 0x36, 0x33, 0x27, 0x37, 0x5d, 0x27, 0x34, 0x32, 0x41,
-    0x41, 0x3f, 0x40, 0x5d, 0x40, 0x3d, 0x48, 0x39, 0x2e, 0x30, 0x1f, 0x3f,
-    0x38, 0x3f, 0x40, 0x33, 0x40, 0x38, 0x31, 0x3f, 0x42, 0x3e, 0x3b, 0x3a,
-    0x42, 0x36, 0x3a, 0x42, 0x3c, 0x3b, 0x3d, 0x41, 0x3d, 0x40, 0x40, 0x3e,
-    0x36, 0x41, 0x47, 0x3d, 0x33, 0x32, 0x33, 0x44, 0x3e, 0x3a, 0x3e, 0x3d,
-    0x45, 0x3f, 0x38, 0x3f, 0x40, 0x3a, 0x3c, 0x46, 0x32, 0x42, 0x3c, 0x51,
-    0x33, 0x38, 0x3a, 0x38, 0x41, 0x34, 0x45, 0x4e, 0x35, 0x3c, 0x42, 0x3e,
-    0x3f, 0x45, 0x44, 0x4e, 0x39, 0x47, 0x3a, 0x33, 0x3e, 0x3b, 0x45, 0x42,
-    0x37, 0x3a, 0x3e, 0x33, 0x41, 0x48, 0x32, 0x2a, 0x3b, 0x37, 0x3f, 0x3d,
-    0x3a, 0x42, 0x41, 0x2f, 0x34, 0x3e, 0x49, 0x3b, 0x38, 0x3e, 0x3d, 0x3a,
-    0x37, 0x3c, 0x44, 0x41, 0x39, 0x42, 0x3f, 0x39, 0x40, 0x35, 0x3d, 0x41,
-    0x3b, 0x45, 0x44, 0x48, 0x3d, 0x42, 0x36, 0x33, 0x3e, 0x44, 0x3f, 0x41,
-    0x42, 0x40, 0x49, 0x34, 0x48, 0x41, 0x3f, 0x40, 0x3c, 0x45, 0x47, 0x34,
-    0x41, 0x37, 0x47, 0x3e, 0x41, 0x41, 0x39, 0x42, 0x3f, 0x3a, 0x46, 0x33,
-    0x39, 0x41, 0x38, 0x38, 0x3e, 0x42, 0x41, 0x38, 0x35, 0x32, 0x33, 0x38,
-    0x3a, 0x3f, 0x45, 0x66, 0x33, 0x47, 0x38, 0x3c, 0x41, 0x2f, 0x48, 0x55,
-    0x33, 0x3e, 0x49, 0x3b, 0x3c, 0x30, 0x24, 0x45, 0x3c, 0x44, 0x43, 0x32,
-    0x3d, 0x3f, 0x35, 0x3b, 0x3e, 0x36, 0x38, 0x3a, 0x36, 0x37, 0x3b, 0x41,
-    0x38, 0x42, 0x3e, 0x43, 0x39, 0x3f, 0x3c, 0x40, 0x37, 0x43, 0x3e, 0x3b,
-    0x3d, 0x35, 0x35, 0x3d, 0x43, 0x3f, 0x3a, 0x35, 0x37, 0x3c, 0x31, 0x47,
-    0x44, 0x45, 0x40, 0x32, 0x44, 0x36, 0x38, 0x51, 0x3c, 0x41, 0x45, 0x37,
-    0x39, 0x44, 0x3e, 0x4f, 0x3c, 0x3a, 0x38, 0x40, 0x3f, 0x34, 0x39, 0x4e,
-    0x3d, 0x39, 0x45, 0x3f, 0x3e, 0x3c, 0x3b, 0x42, 0x3b, 0x3b, 0x34, 0x3d,
-    0x41, 0x44, 0x39, 0x2e, 0x37, 0x44, 0x45, 0x37, 0x3d, 0x41, 0x3f, 0x33,
-    0x3f, 0x3e, 0x3e, 0x40, 0x44, 0x3f, 0x37, 0x32, 0x35, 0x3e, 0x43, 0x41,
-    0x39, 0x37, 0x35, 0x3f, 0x48, 0x3d, 0x43, 0x49, 0x38, 0x35, 0x3f, 0x48,
-    0x3b, 0x3a, 0x34, 0x3f, 0x3c, 0x44, 0x3a, 0x40, 0x36, 0x35, 0x44, 0x36,
-    0x44, 0x3b, 0x3d, 0x38, 0x3c, 0x44, 0x47, 0x3a, 0x3b, 0x45, 0x41, 0x3a,
-    0x39, 0x35, 0x44, 0x3a, 0x49, 0x36, 0x48, 0x31, 0x42, 0x43, 0x42, 0x34,
-    0x41, 0x40, 0x4d, 0x36, 0x3e, 0x35, 0x39, 0x3b, 0x3f, 0x41, 0x38, 0x39,
-    0x3c, 0x44, 0x3f, 0x39, 0x3a, 0x36, 0x3d, 0x36, 0x3a, 0x3a, 0x34, 0x3b,
-    0x38, 0x2f, 0x40, 0x34, 0x32, 0x4d, 0x43, 0x45, 0x4e, 0x3f, 0x48, 0x35,
-    0x3b, 0x4d, 0x4f, 0x39, 0x42, 0x36, 0x46, 0x36, 0x4a, 0x3c, 0x37, 0x41,
-    0x40, 0x43, 0x50, 0x36, 0x3e, 0x39, 0x44, 0x40, 0x36, 0x47, 0x3f, 0x36,
-    0x45, 0x40, 0x45, 0x41, 0x3b, 0x37, 0x41, 0x39, 0x3b, 0x48, 0x37, 0x34,
-    0x41, 0x45, 0x49, 0x3f, 0x39, 0x49, 0x3f, 0x3a, 0x42, 0x34, 0x38, 0x37,
-    0x44, 0x34, 0x3c, 0x3d, 0x40, 0x47, 0x3a, 0x36, 0x3f, 0x3c, 0x41, 0x3e,
-    0x47, 0x46, 0x46, 0x43, 0x3f, 0x38, 0x3b, 0x40, 0x3f, 0x48, 0x3b, 0x4c,
-    0x3d, 0x4b, 0x34, 0x3b, 0x44, 0x43, 0x3c, 0x49, 0x38, 0x42, 0x41, 0x36,
-    0x33, 0x36, 0x40, 0x46, 0x40, 0x3a, 0x42, 0x3c, 0x3d, 0x35, 0x3c, 0x52,
-    0x3e, 0x40, 0x43, 0x43, 0x41, 0x3b, 0x3e, 0x44, 0x3f, 0x40, 0x40, 0x43,
-    0x3d, 0x3f, 0x36, 0x42, 0x3f, 0x3c, 0x34, 0x3d, 0x33, 0x41, 0x3c, 0x39,
-    0x34, 0x43, 0x3f, 0x34, 0x3c, 0x3a, 0x3a, 0x37, 0x42, 0x41, 0x40, 0x3e,
-    0x3d, 0x3c, 0x41, 0x3c, 0x38, 0x33, 0x49, 0x46, 0x40, 0x40, 0x3a, 0x46,
-    0x38, 0x3c, 0x37, 0x34, 0x3e, 0x3d, 0x32, 0x38, 0x3c, 0x4c, 0x3a, 0x34,
-    0x35, 0x32, 0x39, 0x40, 0x3a, 0x58, 0x40, 0x46, 0x42, 0x33, 0x45, 0x39,
-    0x34, 0x4f, 0x53, 0x45, 0x43, 0x3e, 0x41, 0x36, 0x3e, 0x3f, 0x40, 0x47,
-    0x4e, 0x3d, 0x53, 0x2b, 0x41, 0x36, 0x3e, 0x38, 0x47, 0x41, 0x3f, 0x34,
-    0x47, 0x40, 0x38, 0x39, 0x3d, 0x42, 0x3f, 0x3c, 0x48, 0x3a, 0x35, 0x3c,
-    0x45, 0x49, 0x3c, 0x33, 0x33, 0x3f, 0x3c, 0x46, 0x43, 0x3f, 0x45, 0x31,
-    0x35, 0x43, 0x46, 0x3a, 0x45, 0x3c, 0x37, 0x3a, 0x37, 0x36, 0x35, 0x3f,
-    0x38, 0x49, 0x34, 0x3f, 0x3c, 0x42, 0x49, 0x3e, 0x3e, 0x3c, 0x39, 0x49,
-    0x3e, 0x3c, 0x3b, 0x43, 0x44, 0x45, 0x39, 0x4b, 0x47, 0x47, 0x3e, 0x33,
-    0x3c, 0x31, 0x34, 0x4f, 0x45, 0x43, 0x40, 0x3d, 0x42, 0x3b, 0x43, 0x50,
-    0x3c, 0x3b, 0x37, 0x42, 0x47, 0x42, 0x3e, 0x4a, 0x3f, 0x3a, 0x48, 0x3d,
-    0x48, 0x45, 0x3e, 0x40, 0x3a, 0x3c, 0x3d, 0x39, 0x41, 0x42, 0x3c, 0x42,
-    0x43, 0x3c, 0x3b, 0x3d, 0x47, 0x49, 0x38, 0x3c, 0x46, 0x3a, 0x3c, 0x3f,
-    0x3a, 0x46, 0x3a, 0x3b, 0x3d, 0x3a, 0x49, 0x46, 0x38, 0x40, 0x3e, 0x38,
-    0x37, 0x32, 0x40, 0x3c, 0x42, 0x3d, 0x3b, 0x40, 0x3a, 0x38, 0x49, 0x33,
-    0x40, 0x38, 0x2b, 0x3a, 0x3c, 0x4f, 0x4d, 0x3e, 0x35, 0x3d, 0x3b, 0x40,
-    0x3a, 0x54, 0x3e, 0x3e, 0x43, 0x30, 0x47, 0x3d, 0x3b, 0x53, 0x52, 0x4a,
-    0x43, 0x41, 0x49, 0x37, 0x3b, 0x35, 0x44, 0x3c, 0x45, 0x40, 0x4f, 0x36,
-    0x4b, 0x42, 0x41, 0x3a, 0x41, 0x44, 0x47, 0x32, 0x43, 0x35, 0x3f, 0x37,
-    0x43, 0x41, 0x43, 0x36, 0x3f, 0x3b, 0x3d, 0x38, 0x3d, 0x40, 0x42, 0x36,
-    0x44, 0x3a, 0x39, 0x47, 0x37, 0x34, 0x42, 0x3a, 0x37, 0x38, 0x37, 0x3f,
-    0x36, 0x3b, 0x45, 0x3f, 0x3f, 0x3d, 0x39, 0x3d, 0x39, 0x41, 0x37, 0x3f,
-    0x3f, 0x3d, 0x3f, 0x41, 0x43, 0x41, 0x45, 0x43, 0x41, 0x3c, 0x3e, 0x40,
-    0x40, 0x39, 0x41, 0x4f, 0x47, 0x42, 0x46, 0x48, 0x3b, 0x3b, 0x3c, 0x46,
-    0x47, 0x3e, 0x46, 0x37, 0x38, 0x3d, 0x38, 0x52, 0x36, 0x46, 0x3c, 0x3a,
-    0x3b, 0x37, 0x48, 0x4b, 0x3f, 0x42, 0x3c, 0x36, 0x40, 0x37, 0x33, 0x4c,
-    0x39, 0x34, 0x41, 0x34, 0x3f, 0x3b, 0x35, 0x4b, 0x3b, 0x45, 0x43, 0x31,
-    0x3e, 0x39, 0x30, 0x3d, 0x32, 0x43, 0x44, 0x3c, 0x3e, 0x38, 0x43, 0x41,
-    0x3e, 0x37, 0x41, 0x39, 0x39, 0x44, 0x43, 0x38, 0x3f, 0x37, 0x48, 0x3f,
-    0x3b, 0x44, 0x37, 0x3f, 0x3a, 0x3f, 0x3b, 0x33, 0x42, 0x3e, 0x2f, 0x42,
-    0x44, 0x4f, 0x52, 0x3c, 0x34, 0x33, 0x39, 0x46, 0x31, 0x55, 0x43, 0x4e,
-    0x49, 0x38, 0x4d, 0x48, 0x34, 0x4d, 0x5c, 0x4d, 0x49, 0x37, 0x4f, 0x40,
-    0x3c, 0x3d, 0x41, 0x42, 0x3f, 0x51, 0x4b, 0x2f, 0x46, 0x35, 0x39, 0x3c,
-    0x49, 0x3d, 0x4e, 0x32, 0x43, 0x47, 0x31, 0x3e, 0x42, 0x4a, 0x4c, 0x39,
-    0x43, 0x46, 0x3e, 0x3f, 0x44, 0x3c, 0x42, 0x30, 0x3e, 0x34, 0x3b, 0x3b,
-    0x3a, 0x3c, 0x42, 0x3d, 0x3d, 0x48, 0x48, 0x36, 0x3a, 0x45, 0x38, 0x40,
-    0x3c, 0x41, 0x3f, 0x49, 0x42, 0x41, 0x38, 0x3d, 0x3d, 0x44, 0x3b, 0x3d,
-    0x35, 0x48, 0x43, 0x3b, 0x32, 0x41, 0x3e, 0x3a, 0x46, 0x41, 0x40, 0x54,
-    0x38, 0x3f, 0x3c, 0x36, 0x3b, 0x36, 0x43, 0x50, 0x38, 0x3c, 0x44, 0x3b,
-    0x43, 0x47, 0x32, 0x50, 0x3d, 0x46, 0x3d, 0x3b, 0x39, 0x37, 0x3b, 0x4a,
-    0x47, 0x43, 0x46, 0x3d, 0x3d, 0x41, 0x43, 0x45, 0x3b, 0x3c, 0x39, 0x47,
-    0x43, 0x42, 0x39, 0x4c, 0x34, 0x41, 0x45, 0x3b, 0x38, 0x3e, 0x37, 0x3f,
-    0x45, 0x43, 0x39, 0x42, 0x3c, 0x3d, 0x3d, 0x3c, 0x48, 0x39, 0x3b, 0x3a,
-    0x46, 0x45, 0x3d, 0x3a, 0x3f, 0x3a, 0x45, 0x36, 0x3d, 0x43, 0x36, 0x43,
-    0x42, 0x3d, 0x41, 0x3f, 0x3a, 0x3f, 0x31, 0x37, 0x48, 0x4f, 0x4e, 0x36,
-    0x30, 0x3a, 0x3e, 0x3e, 0x38, 0x57, 0x40, 0x47, 0x47, 0x38, 0x4f, 0x46,
-    0x3d, 0x4a, 0x50, 0x4c, 0x42, 0x3b, 0x4d, 0x3d, 0x3d, 0x33, 0x40, 0x41,
-    0x48, 0x4b, 0x46, 0x39, 0x4d, 0x30, 0x45, 0x38, 0x48, 0x3c, 0x48, 0x3b,
-    0x4d, 0x40, 0x3b, 0x40, 0x46, 0x41, 0x51, 0x34, 0x40, 0x43, 0x3f, 0x42,
-    0x45, 0x42, 0x3e, 0x35, 0x3d, 0x38, 0x37, 0x3a, 0x42, 0x40, 0x43, 0x3c,
-    0x3c, 0x3d, 0x43, 0x40, 0x45, 0x3a, 0x3e, 0x3a, 0x3e, 0x40, 0x43, 0x35,
-    0x37, 0x3f, 0x3f, 0x3e, 0x39, 0x3f, 0x47, 0x38, 0x3e, 0x44, 0x3b, 0x3c,
-    0x3b, 0x32, 0x40, 0x3e, 0x42, 0x45, 0x3a, 0x52, 0x3a, 0x3e, 0x45, 0x40,
-    0x41, 0x48, 0x3f, 0x4e, 0x3e, 0x42, 0x3d, 0x39, 0x3a, 0x33, 0x3f, 0x4b,
-    0x3e, 0x38, 0x36, 0x3e, 0x31, 0x41, 0x3a, 0x40, 0x3b, 0x37, 0x3f, 0x3e,
-    0x3e, 0x3f, 0x35, 0x44, 0x3d, 0x42, 0x3d, 0x44, 0x42, 0x3f, 0x3e, 0x44,
-    0x3e, 0x45, 0x37, 0x3a, 0x3b, 0x42, 0x3f, 0x41, 0x3b, 0x3f, 0x41, 0x41,
-    0x3e, 0x34, 0x47, 0x39, 0x46, 0x46, 0x37, 0x39, 0x3f, 0x45, 0x39, 0x39,
-    0x3a, 0x40, 0x38, 0x3a, 0x31, 0x34, 0x3a, 0x41, 0x38, 0x41, 0x3a, 0x41,
-    0x44, 0x37, 0x2d, 0x41, 0x43, 0x4d, 0x4b, 0x3b, 0x2c, 0x30, 0x42, 0x3b,
-    0x31, 0x56, 0x43, 0x47, 0x47, 0x38, 0x50, 0x44, 0x40, 0x52, 0x5a, 0x50,
-    0x44, 0x3f, 0x4b, 0x35, 0x3a, 0x36, 0x41, 0x44, 0x47, 0x4e, 0x52, 0x36,
-    0x45, 0x39, 0x38, 0x3c, 0x42, 0x44, 0x40, 0x3b, 0x4b, 0x38, 0x35, 0x35,
-    0x3f, 0x40, 0x4f, 0x39, 0x3d, 0x37, 0x34, 0x3e, 0x41, 0x4c, 0x40, 0x37,
-    0x3d, 0x3b, 0x37, 0x37, 0x40, 0x42, 0x35, 0x39, 0x41, 0x42, 0x3d, 0x34,
-    0x3c, 0x37, 0x3a, 0x3d, 0x46, 0x46, 0x46, 0x3f, 0x44, 0x3d, 0x3c, 0x40,
-    0x3c, 0x3a, 0x3d, 0x3b, 0x3b, 0x41, 0x47, 0x3a, 0x43, 0x43, 0x43, 0x3b,
-    0x3e, 0x3e, 0x42, 0x46, 0x36, 0x37, 0x45, 0x35, 0x3c, 0x3b, 0x31, 0x4b,
-    0x3c, 0x3e, 0x3a, 0x3a, 0x42, 0x42, 0x34, 0x47, 0x37, 0x34, 0x41, 0x3d,
-    0x3e, 0x39, 0x43, 0x47, 0x31, 0x3b, 0x40, 0x3b, 0x42, 0x3d, 0x44, 0x44,
-    0x37, 0x39, 0x44, 0x3b, 0x40, 0x3a, 0x3d, 0x44, 0x3c, 0x40, 0x42, 0x3b,
-    0x40, 0x3e, 0x32, 0x3d, 0x3c, 0x3e, 0x44, 0x3e, 0x47, 0x3d, 0x3f, 0x2e,
-    0x3e, 0x3d, 0x3f, 0x3b, 0x3b, 0x43, 0x43, 0x3c, 0x3a, 0x3c, 0x3a, 0x36,
-    0x38, 0x46, 0x30, 0x3e, 0x3f, 0x35, 0x3e, 0x34, 0x3c, 0x34, 0x32, 0x4a,
-    0x41, 0x48, 0x48, 0x3f, 0x34, 0x37, 0x42, 0x43, 0x36, 0x59, 0x42, 0x3f,
-    0x4b, 0x3d, 0x5d, 0x45, 0x3b, 0x51, 0x51, 0x4c, 0x41, 0x40, 0x4d, 0x36,
-    0x3f, 0x34, 0x39, 0x3d, 0x4a, 0x4b, 0x4f, 0x33, 0x48, 0x32, 0x3c, 0x32,
-    0x48, 0x4c, 0x4d, 0x3a, 0x49, 0x3a, 0x3a, 0x2e, 0x4b, 0x44, 0x4f, 0x33,
-    0x3a, 0x48, 0x34, 0x43, 0x38, 0x45, 0x44, 0x35, 0x3b, 0x3f, 0x40, 0x37,
-    0x35, 0x34, 0x38, 0x3e, 0x41, 0x3e, 0x3b, 0x47, 0x41, 0x47, 0x3c, 0x3c,
-    0x39, 0x40, 0x3e, 0x45, 0x36, 0x41, 0x3f, 0x3f, 0x3c, 0x44, 0x3f, 0x43,
-    0x3d, 0x3c, 0x49, 0x42, 0x3e, 0x3f, 0x48, 0x37, 0x43, 0x37, 0x43, 0x3d,
-    0x32, 0x42, 0x44, 0x39, 0x36, 0x37, 0x40, 0x46, 0x47, 0x3d, 0x3a, 0x42,
-    0x3f, 0x38, 0x37, 0x48, 0x39, 0x40, 0x3c, 0x37, 0x33, 0x38, 0x38, 0x40,
-    0x41, 0x3c, 0x3f, 0x3b, 0x40, 0x3a, 0x47, 0x46, 0x3a, 0x37, 0x42, 0x47,
-    0x3b, 0x3f, 0x3b, 0x40, 0x33, 0x3f, 0x3a, 0x3c, 0x38, 0x3a, 0x36, 0x38,
-    0x36, 0x40, 0x48, 0x42, 0x48, 0x3c, 0x43, 0x36, 0x32, 0x3b, 0x34, 0x39,
-    0x38, 0x46, 0x37, 0x3b, 0x44, 0x34, 0x36, 0x38, 0x3c, 0x43, 0x33, 0x3c,
-    0x3b, 0x45, 0x38, 0x38, 0x44, 0x33, 0x36, 0x4a, 0x46, 0x4c, 0x4a, 0x34,
-    0x36, 0x37, 0x43, 0x42, 0x33, 0x58, 0x43, 0x48, 0x44, 0x38, 0x5f, 0x3f,
-    0x3c, 0x4d, 0x53, 0x52, 0x43, 0x47, 0x52, 0x3e, 0x3b, 0x2d, 0x3b, 0x3a,
-    0x4b, 0x49, 0x53, 0x38, 0x4c, 0x2f, 0x38, 0x31, 0x42, 0x40, 0x48, 0x3f,
-    0x44, 0x3c, 0x3c, 0x34, 0x46, 0x3f, 0x49, 0x3a, 0x43, 0x3d, 0x34, 0x42,
-    0x36, 0x47, 0x51, 0x3c, 0x3d, 0x39, 0x39, 0x3a, 0x3b, 0x35, 0x35, 0x41,
-    0x47, 0x3c, 0x3b, 0x43, 0x3f, 0x45, 0x3e, 0x40, 0x3c, 0x3f, 0x3c, 0x42,
-    0x3b, 0x3e, 0x38, 0x3f, 0x3f, 0x41, 0x39, 0x39, 0x3d, 0x43, 0x4f, 0x3d,
-    0x48, 0x3b, 0x44, 0x45, 0x3d, 0x3b, 0x49, 0x43, 0x44, 0x3d, 0x37, 0x3b,
-    0x3c, 0x45, 0x46, 0x44, 0x35, 0x3e, 0x32, 0x35, 0x34, 0x3b, 0x40, 0x43,
-    0x3e, 0x45, 0x37, 0x3d, 0x3f, 0x43, 0x36, 0x3f, 0x3f, 0x43, 0x39, 0x44,
-    0x3e, 0x3e, 0x45, 0x40, 0x3e, 0x44, 0x3b, 0x3e, 0x42, 0x42, 0x3b, 0x3d,
-    0x3a, 0x40, 0x39, 0x3a, 0x32, 0x36, 0x41, 0x30, 0x39, 0x46, 0x33, 0x3f,
-    0x46, 0x40, 0x3c, 0x31, 0x41, 0x3a, 0x3f, 0x3f, 0x3b, 0x36, 0x3f, 0x38,
-    0x36, 0x3e, 0x35, 0x35, 0x3b, 0x3d, 0x3f, 0x39, 0x46, 0x37, 0x3a, 0x47,
-    0x37, 0x39, 0x2c, 0x55, 0x40, 0x4b, 0x4a, 0x39, 0x35, 0x42, 0x3d, 0x40,
-    0x3a, 0x54, 0x41, 0x48, 0x51, 0x3b, 0x61, 0x3e, 0x3e, 0x4d, 0x51, 0x52,
-    0x3e, 0x43, 0x52, 0x41, 0x48, 0x2d, 0x35, 0x35, 0x4b, 0x44, 0x4d, 0x3c,
-    0x54, 0x33, 0x39, 0x27, 0x4a, 0x44, 0x4a, 0x41, 0x3c, 0x3a, 0x31, 0x2f,
-    0x3d, 0x42, 0x48, 0x3f, 0x42, 0x40, 0x44, 0x3b, 0x40, 0x3e, 0x49, 0x3a,
-    0x3c, 0x35, 0x30, 0x3e, 0x3e, 0x3d, 0x36, 0x3a, 0x3e, 0x3a, 0x4a, 0x3e,
-    0x3d, 0x49, 0x40, 0x43, 0x3e, 0x45, 0x3f, 0x3c, 0x3b, 0x42, 0x3a, 0x39,
-    0x3b, 0x47, 0x3f, 0x39, 0x49, 0x46, 0x3d, 0x34, 0x32, 0x44, 0x46, 0x42,
-    0x47, 0x39, 0x49, 0x48, 0x3b, 0x38, 0x45, 0x45, 0x37, 0x38, 0x46, 0x46,
-    0x37, 0x42, 0x35, 0x34, 0x45, 0x42, 0x35, 0x43, 0x3b, 0x3a, 0x43, 0x43,
-    0x40, 0x42, 0x35, 0x3f, 0x38, 0x3f, 0x3a, 0x3a, 0x3b, 0x3f, 0x3e, 0x36,
-    0x3f, 0x3c, 0x48, 0x3b, 0x3a, 0x41, 0x41, 0x35, 0x33, 0x3f, 0x3b, 0x45,
-    0x48, 0x36, 0x40, 0x38, 0x47, 0x3d, 0x35, 0x40, 0x41, 0x42, 0x41, 0x37,
-    0x41, 0x3e, 0x36, 0x48, 0x3e, 0x3c, 0x32, 0x39, 0x41, 0x40, 0x38, 0x3f,
-    0x46, 0x43, 0x33, 0x40, 0x43, 0x43, 0x3a, 0x49, 0x3f, 0x35, 0x2c, 0x5d,
-    0x43, 0x49, 0x52, 0x3b, 0x3c, 0x41, 0x40, 0x4a, 0x33, 0x50, 0x41, 0x46,
-    0x52, 0x41, 0x68, 0x48, 0x44, 0x53, 0x54, 0x55, 0x42, 0x42, 0x57, 0x44,
-    0x47, 0x35, 0x35, 0x3e, 0x4b, 0x44, 0x4e, 0x38, 0x55, 0x2f, 0x36, 0x2d,
-    0x40, 0x48, 0x4b, 0x41, 0x48, 0x36, 0x32, 0x32, 0x44, 0x42, 0x47, 0x42,
-    0x48, 0x3d, 0x3d, 0x39, 0x3e, 0x35, 0x4b, 0x39, 0x38, 0x3a, 0x39, 0x46,
-    0x38, 0x3f, 0x3a, 0x42, 0x4b, 0x45, 0x3e, 0x32, 0x46, 0x43, 0x3b, 0x40,
-    0x45, 0x41, 0x3e, 0x43, 0x37, 0x3d, 0x43, 0x3b, 0x46, 0x48, 0x42, 0x3b,
-    0x3d, 0x48, 0x4a, 0x3c, 0x3b, 0x42, 0x40, 0x3c, 0x3a, 0x42, 0x38, 0x47,
-    0x3b, 0x3b, 0x3d, 0x41, 0x3f, 0x38, 0x3f, 0x4a, 0x44, 0x3f, 0x47, 0x3a,
-    0x47, 0x44, 0x43, 0x43, 0x34, 0x3d, 0x3a, 0x3c, 0x47, 0x3f, 0x3e, 0x39,
-    0x42, 0x4a, 0x40, 0x36, 0x40, 0x41, 0x42, 0x3f, 0x3f, 0x43, 0x39, 0x38,
-    0x3c, 0x3b, 0x4c, 0x2f, 0x41, 0x39, 0x40, 0x42, 0x3f, 0x42, 0x40, 0x36,
-    0x3b, 0x45, 0x41, 0x41, 0x44, 0x45, 0x42, 0x37, 0x3d, 0x3a, 0x33, 0x3e,
-    0x3b, 0x3b, 0x3c, 0x3d, 0x38, 0x49, 0x44, 0x39, 0x3f, 0x48, 0x3d, 0x41,
-    0x42, 0x43, 0x44, 0x3e, 0x41, 0x3d, 0x32, 0x59, 0x45, 0x4b, 0x4b, 0x38,
-    0x37, 0x3d, 0x48, 0x42, 0x3d, 0x52, 0x43, 0x46, 0x54, 0x48, 0x67, 0x4d,
-    0x45, 0x4e, 0x49, 0x52, 0x45, 0x45, 0x58, 0x3b, 0x41, 0x38, 0x3f, 0x3f,
-    0x49, 0x44, 0x4f, 0x48, 0x57, 0x31, 0x3c, 0x2a, 0x3e, 0x4c, 0x41, 0x40,
-    0x47, 0x3f, 0x33, 0x34, 0x3f, 0x42, 0x48, 0x43, 0x4b, 0x38, 0x39, 0x3d,
-    0x3f, 0x3e, 0x4b, 0x3f, 0x35, 0x36, 0x3c, 0x46, 0x3c, 0x45, 0x37, 0x3b,
-    0x3c, 0x39, 0x41, 0x40, 0x41, 0x43, 0x44, 0x41, 0x45, 0x4f, 0x44, 0x43,
-    0x44, 0x3c, 0x45, 0x34, 0x42, 0x45, 0x3f, 0x46, 0x3f, 0x43, 0x3d, 0x3a,
-    0x39, 0x47, 0x45, 0x3d, 0x3f, 0x3b, 0x3d, 0x42, 0x38, 0x48, 0x48, 0x3b,
-    0x3c, 0x3a, 0x3f, 0x41, 0x44, 0x4b, 0x44, 0x48, 0x41, 0x3c, 0x3d, 0x3c,
-    0x3e, 0x3a, 0x4a, 0x3b, 0x49, 0x35, 0x3a, 0x3d, 0x41, 0x3f, 0x49, 0x39,
-    0x44, 0x37, 0x3f, 0x3c, 0x42, 0x40, 0x4a, 0x46, 0x39, 0x38, 0x46, 0x37,
-    0x41, 0x46, 0x41, 0x45, 0x40, 0x3b, 0x3b, 0x33, 0x3b, 0x39, 0x3c, 0x43,
-    0x37, 0x3c, 0x44, 0x3d, 0x46, 0x39, 0x3c, 0x3c, 0x44, 0x48, 0x41, 0x44,
-    0x41, 0x43, 0x46, 0x3b, 0x47, 0x41, 0x31, 0x41, 0x44, 0x40, 0x43, 0x42,
-    0x3e, 0x43, 0x34, 0x65, 0x4f, 0x50, 0x4d, 0x3a, 0x37, 0x43, 0x4d, 0x4a,
-    0x3d, 0x54, 0x40, 0x42, 0x5b, 0x3b, 0x71, 0x49, 0x44, 0x4f, 0x54, 0x56,
-    0x48, 0x40, 0x52, 0x41, 0x42, 0x38, 0x3c, 0x49, 0x4a, 0x45, 0x51, 0x35,
-    0x54, 0x2f, 0x35, 0x25, 0x4d, 0x3f, 0x4d, 0x43, 0x49, 0x33, 0x32, 0x3a,
-    0x46, 0x48, 0x48, 0x3d, 0x43, 0x3a, 0x3c, 0x3a, 0x48, 0x40, 0x4b, 0x3b,
-    0x45, 0x3b, 0x3f, 0x38, 0x37, 0x41, 0x31, 0x3b, 0x41, 0x43, 0x43, 0x37,
-    0x48, 0x3f, 0x48, 0x37, 0x40, 0x4a, 0x43, 0x45, 0x3d, 0x39, 0x37, 0x37,
-    0x3c, 0x3f, 0x47, 0x48, 0x43, 0x3e, 0x41, 0x3f, 0x3e, 0x38, 0x3e, 0x37,
-    0x45, 0x45, 0x35, 0x44, 0x38, 0x3a, 0x49, 0x43, 0x40, 0x41, 0x40, 0x44,
-    0x3c, 0x3e, 0x40, 0x38, 0x42, 0x41, 0x3c, 0x41, 0x3a, 0x3b, 0x3c, 0x3a,
-    0x49, 0x3c, 0x42, 0x44, 0x3f, 0x39, 0x45, 0x32, 0x45, 0x43, 0x45, 0x39,
-    0x43, 0x41, 0x4b, 0x39, 0x32, 0x3c, 0x3c, 0x36, 0x39, 0x3f, 0x46, 0x32,
-    0x39, 0x35, 0x4f, 0x32, 0x3e, 0x40, 0x3d, 0x3e, 0x3a, 0x39, 0x4c, 0x38,
-    0x43, 0x38, 0x49, 0x3b, 0x33, 0x39, 0x3b, 0x36, 0x36, 0x43, 0x3b, 0x3c,
-    0x32, 0x3c, 0x3a, 0x45, 0x31, 0x3d, 0x37, 0x40, 0x3f, 0x3f, 0x35, 0xff,
-    0x49, 0x4e, 0x4c, 0x3c, 0x36, 0x43, 0x46, 0x45, 0x41, 0x59, 0x44, 0x4a,
-    0x53, 0x44, 0x71, 0x4a, 0x39, 0x4f, 0x50, 0x4b, 0x47, 0x42, 0x5a, 0x3c,
-    0x45, 0x38, 0x3e, 0x42, 0x53, 0x43, 0x52, 0x3a, 0x52, 0x34, 0x31, 0x20,
-    0x49, 0x4e, 0x46, 0x43, 0x4b, 0x3d, 0x2b, 0x27, 0x46, 0x46, 0x47, 0x41,
-    0x42, 0x37, 0x39, 0x38, 0x45, 0x3f, 0x51, 0x3d, 0x48, 0x3f, 0x33, 0x3f,
-    0x38, 0x45, 0x31, 0x38, 0x41, 0x3d, 0x47, 0x39, 0x42, 0x40, 0x4c, 0x3f,
-    0x40, 0x42, 0x41, 0x41, 0x41, 0x42, 0x39, 0x35, 0x3f, 0x46, 0x45, 0x36,
-    0x3f, 0x43, 0x3b, 0x39, 0x41, 0x38, 0x43, 0x37, 0x3d, 0x44, 0x3b, 0x40,
-    0x36, 0x3d, 0x42, 0x41, 0x41, 0x3d, 0x38, 0x4a, 0x40, 0x4a, 0x4c, 0x38,
-    0x3f, 0x40, 0x45, 0x3c, 0x3f, 0x4b, 0x43, 0x41, 0x43, 0x3e, 0x43, 0x3f,
-    0x36, 0x40, 0x40, 0x39, 0x3f, 0x3a, 0x3a, 0x30, 0x41, 0x3c, 0x3c, 0x34,
-    0x46, 0x38, 0x43, 0x34, 0x3a, 0x42, 0x43, 0x42, 0x40, 0x41, 0x49, 0x34,
-    0x35, 0x40, 0x47, 0x3d, 0x3d, 0x3e, 0x4c, 0x33, 0x3c, 0x3b, 0x39, 0x43,
-    0x3a, 0x3e, 0x3b, 0x37, 0x3f, 0x42, 0x31, 0x3d, 0x41, 0x3e, 0x32, 0x47,
-    0x34, 0x41, 0x3d, 0x35, 0x39, 0x40, 0x38, 0x69, 0x4f, 0x4a, 0x49, 0x37,
-    0x37, 0x44, 0x43, 0x46, 0x40, 0x58, 0x43, 0x48, 0x54, 0x46, 0x6c, 0x50,
-    0x3a, 0x50, 0x50, 0x57, 0x47, 0x46, 0x5c, 0x40, 0x40, 0x39, 0x3e, 0x46,
-    0x53, 0x46, 0x5c, 0x36, 0x4f, 0x32, 0x30, 0x2d, 0x4a, 0x48, 0x41, 0x45,
-    0x47, 0x2f, 0x32, 0x2b, 0x43, 0x40, 0x43, 0x3c, 0x40, 0x44, 0x3e, 0x37,
-    0x39, 0x3e, 0x48, 0x42, 0x45, 0x36, 0x47, 0x3f, 0x3b, 0x41, 0x35, 0x35,
-    0x3b, 0x3e, 0x35, 0x43, 0x3e, 0x41, 0x3d, 0x36, 0x41, 0x3c, 0x40, 0x44,
-    0x3d, 0x40, 0x35, 0x32, 0x48, 0x3e, 0x39, 0x42, 0x44, 0x3d, 0x39, 0x3b,
-    0x3b, 0x45, 0x40, 0x4a, 0x3f, 0x41, 0x43, 0x39, 0x42, 0x44, 0x4c, 0x3c,
-    0x3f, 0x3e, 0x3f, 0x43, 0x40, 0x42, 0x4c, 0x3b, 0x3e, 0x3d, 0x49, 0x42,
-    0x40, 0x44, 0x40, 0x34, 0x36, 0x40, 0x45, 0x39, 0x42, 0x40, 0x3e, 0x44,
-    0x45, 0x37, 0x3c, 0x38, 0x3e, 0x49, 0x3e, 0x3c, 0x41, 0x3d, 0x42, 0x32,
-    0x40, 0x45, 0x3e, 0x36, 0x44, 0x3a, 0x4e, 0x38, 0x43, 0x38, 0x40, 0x38,
-    0x49, 0x42, 0x40, 0x3d, 0x42, 0x48, 0x48, 0x3d, 0x41, 0x3a, 0x3f, 0x41,
-    0x38, 0x3c, 0x44, 0x39, 0x3a, 0x32, 0x3a, 0x3e, 0x3d, 0x3b, 0x39, 0x38,
-    0x3a, 0x43, 0x3a, 0x6b, 0x45, 0x50, 0x47, 0x33, 0x38, 0x48, 0x4d, 0x4f,
-    0x39, 0x4b, 0x46, 0x4a, 0x4f, 0x42, 0x6f, 0x4b, 0x40, 0x55, 0x54, 0x50,
-    0x42, 0x47, 0x5e, 0x46, 0x40, 0x34, 0x40, 0x47, 0x52, 0x46, 0x55, 0x3b,
-    0x4f, 0x2b, 0x35, 0x33, 0x4c, 0x44, 0x44, 0x48, 0x47, 0x37, 0x35, 0x27,
-    0x4a, 0x3b, 0x41, 0x40, 0x40, 0x3e, 0x36, 0x39, 0x3e, 0x3c, 0x45, 0x3f,
-    0x4d, 0x41, 0x3d, 0x48, 0x47, 0x46, 0x33, 0x3d, 0x3d, 0x3e, 0x34, 0x3f,
-    0x3e, 0x3a, 0x41, 0x35, 0x3b, 0x3e, 0x42, 0x3c, 0x42, 0x42, 0x40, 0x31,
-    0x37, 0x40, 0x36, 0x42, 0x48, 0x39, 0x3d, 0x3c, 0x3a, 0x43, 0x39, 0x3d,
-    0x47, 0x49, 0x43, 0x3d, 0x45, 0x39, 0x44, 0x37, 0x3e, 0x4d, 0x3d, 0x40,
-    0x3d, 0x4c, 0x4d, 0x44, 0x3c, 0x3d, 0x46, 0x41, 0x41, 0x42, 0x40, 0x40,
-    0x41, 0x3a, 0x3c, 0x3b, 0x3c, 0x44, 0x40, 0x34, 0x44, 0x38, 0x3b, 0x33,
-    0x45, 0x45, 0x44, 0x3f, 0x3e, 0x3a, 0x3b, 0x3b, 0x43, 0x39, 0x3a, 0x45,
-    0x3b, 0x3a, 0x4b, 0x39, 0x3d, 0x38, 0x41, 0x39, 0x42, 0x45, 0x43, 0x40,
-    0x3e, 0x35, 0x44, 0x3f, 0x45, 0x41, 0x40, 0x3e, 0x43, 0x42, 0x37, 0x3a,
-    0x38, 0x35, 0x3a, 0x48, 0x3e, 0x3b, 0x40, 0x38, 0x3c, 0x3c, 0x3b, 0x6a,
-    0x48, 0x4d, 0x4d, 0x34, 0x38, 0x40, 0x4a, 0x45, 0x3c, 0x4f, 0x41, 0x4b,
-    0x58, 0x46, 0x71, 0x49, 0x3d, 0x53, 0x44, 0x52, 0x42, 0x3e, 0x57, 0x4c,
-    0x4c, 0x38, 0x40, 0x3b, 0x5c, 0x4c, 0x52, 0x3e, 0x4c, 0x2d, 0x32, 0x37,
-    0x49, 0x3f, 0x41, 0x47, 0x4a, 0x3b, 0x2f, 0x26, 0x45, 0x40, 0x47, 0x42,
-    0x3d, 0x39, 0x2d, 0x2c, 0x3f, 0x45, 0x46, 0x44, 0x48, 0x43, 0x42, 0x48,
-    0x40, 0x41, 0x3b, 0x3b, 0x41, 0x3b, 0x39, 0x40, 0x3b, 0x47, 0x3f, 0x38,
-    0x3f, 0x49, 0x3b, 0x35, 0x40, 0x45, 0x38, 0x35, 0x36, 0x34, 0x3e, 0x3d,
-    0x46, 0x3e, 0x33, 0x38, 0x43, 0x48, 0x3f, 0x45, 0x31, 0x44, 0x38, 0x35,
-    0x3c, 0x41, 0x4b, 0x44, 0x3d, 0x43, 0x38, 0x48, 0x3c, 0x39, 0x4a, 0x42,
-    0x3d, 0x43, 0x3f, 0x49, 0x3e, 0x47, 0x49, 0x41, 0x3b, 0x3c, 0x47, 0x3a,
-    0x3d, 0x40, 0x4a, 0x38, 0x3d, 0x3b, 0x47, 0x3a, 0x36, 0x47, 0x42, 0x46,
-    0x3c, 0x3d, 0x45, 0x3b, 0x48, 0x3f, 0x38, 0x36, 0x39, 0x46, 0x43, 0x3a,
-    0x41, 0x3d, 0x39, 0x39, 0x46, 0x37, 0x3f, 0x3f, 0x3a, 0x46, 0x3f, 0x39,
-    0x49, 0x44, 0x42, 0x3a, 0x3a, 0x43, 0x3e, 0x42, 0x3d, 0x3d, 0x43, 0x40,
-    0x43, 0x3c, 0x3f, 0x43, 0x40, 0x42, 0x3b, 0x57, 0x4a, 0x4f, 0x4a, 0x2d,
-    0x3b, 0x48, 0x45, 0x42, 0x34, 0x4c, 0x3e, 0x4f, 0x4d, 0x40, 0x6c, 0x4b,
-    0x3b, 0x4d, 0x4c, 0x57, 0x49, 0x3d, 0x5d, 0x44, 0x43, 0x29, 0x42, 0x3f,
-    0x5b, 0x47, 0x4f, 0x3e, 0x54, 0x2e, 0x34, 0x34, 0x4b, 0x47, 0x46, 0x46,
-    0x4b, 0x34, 0x36, 0x28, 0x3e, 0x3f, 0x42, 0x40, 0x3b, 0x38, 0x39, 0x42,
-    0x49, 0x3d, 0x49, 0x47, 0x47, 0x3b, 0x43, 0x34, 0x39, 0x36, 0x42, 0x3d,
-    0x37, 0x40, 0x37, 0x38, 0x46, 0x42, 0x49, 0x37, 0x44, 0x3f, 0x38, 0x3e,
-    0x36, 0x32, 0x33, 0x38, 0x40, 0x46, 0x42, 0x34, 0x41, 0x42, 0x3e, 0x38,
-    0x44, 0x3e, 0x3f, 0x43, 0x3f, 0x43, 0x35, 0x3f, 0x4d, 0x3b, 0x43, 0x39,
-    0x40, 0x47, 0x3f, 0x4a, 0x3a, 0x3f, 0x45, 0x45, 0x48, 0x42, 0x3b, 0x47,
-    0x42, 0x4b, 0x47, 0x3e, 0x3c, 0x42, 0x46, 0x39, 0x41, 0x3f, 0x48, 0x33,
-    0x45, 0x34, 0x3d, 0x30, 0x40, 0x4c, 0x40, 0x40, 0x39, 0x37, 0x40, 0x33,
-    0x49, 0x42, 0x45, 0x38, 0x3c, 0x43, 0x45, 0x35, 0x37, 0x33, 0x34, 0x3b,
-    0x3b, 0x38, 0x39, 0x41, 0x42, 0x40, 0x3e, 0x3e, 0x41, 0x33, 0x3a, 0x36,
-    0x40, 0x3a, 0x3c, 0x45, 0x43, 0x3c, 0x40, 0x41, 0x49, 0x47, 0x35, 0x34,
-    0x3a, 0x3d, 0x3a, 0x68, 0x4f, 0x48, 0x43, 0x36, 0x37, 0x3e, 0x45, 0x49,
-    0x3a, 0x4d, 0x41, 0x3d, 0x46, 0x45, 0x65, 0x46, 0x38, 0x4d, 0x4a, 0x53,
-    0x43, 0x41, 0x5d, 0x47, 0x41, 0x34, 0x39, 0x43, 0x4e, 0x48, 0x50, 0x38,
-    0x53, 0x32, 0x30, 0x2e, 0x49, 0x4c, 0x4d, 0x3f, 0x46, 0x38, 0x34, 0x2b,
-    0x44, 0x44, 0x41, 0x41, 0x36, 0x40, 0x3f, 0x32, 0x46, 0x38, 0x50, 0x45,
-    0x3f, 0x3d, 0x3b, 0x36, 0x3b, 0x43, 0x3a, 0x34, 0x36, 0x3f, 0x39, 0x35,
-    0x3c, 0x40, 0x40, 0x37, 0x3c, 0x39, 0x3d, 0x36, 0x48, 0x3d, 0x43, 0x34,
-    0x3b, 0x46, 0x43, 0x41, 0x33, 0x3e, 0x44, 0x3d, 0x44, 0x44, 0x4c, 0x3c,
-    0x37, 0x49, 0x42, 0x35, 0x45, 0x3a, 0x3c, 0x41, 0x3a, 0x45, 0x46, 0x41,
-    0x3c, 0x48, 0x46, 0x36, 0x36, 0x42, 0x3b, 0x46, 0x42, 0x45, 0x44, 0x47,
-    0x3f, 0x44, 0x3a, 0x35, 0x37, 0x46, 0x40, 0x38, 0x40, 0x3d, 0x36, 0x2c,
-    0x34, 0x47, 0x40, 0x38, 0x3f, 0x3f, 0x44, 0x2d, 0x3b, 0x3d, 0x3e, 0x44,
-    0x3c, 0x40, 0x3e, 0x33, 0x3c, 0x3a, 0x49, 0x40, 0x42, 0x42, 0x3a, 0x3b,
-    0x33, 0x3d, 0x3c, 0x43, 0x3e, 0x3d, 0x3a, 0x3a, 0x48, 0x3e, 0x3c, 0x39,
-    0x3f, 0x44, 0x37, 0x40, 0x3f, 0x3c, 0x3e, 0x3d, 0x38, 0x42, 0x34, 0x62,
-    0x51, 0x47, 0x44, 0x3f, 0x32, 0x3c, 0x3f, 0x46, 0x3d, 0x46, 0x3e, 0x45,
-    0x4a, 0x3e, 0x5d, 0x43, 0x45, 0x49, 0x4a, 0x55, 0x41, 0x3c, 0x5a, 0x44,
-    0x43, 0x3b, 0x3c, 0x3a, 0x4b, 0x4e, 0x4d, 0x42, 0x49, 0x30, 0x3b, 0x38,
-    0x42, 0x44, 0x51, 0x40, 0x48, 0x33, 0x3f, 0x2b, 0x3c, 0x41, 0x3c, 0x45,
-    0x35, 0x39, 0x42, 0x37, 0x40, 0x46, 0x46, 0x3f, 0x41, 0x45, 0x42, 0x3d,
-    0x43, 0x38, 0x3e, 0x38, 0x3c, 0x39, 0x40, 0x38, 0x37, 0x36, 0x3d, 0x3d,
-    0x38, 0x47, 0x45, 0x3b, 0x45, 0x44, 0x42, 0x2e, 0x37, 0x40, 0x42, 0x42,
-    0x3c, 0x36, 0x3b, 0x39, 0x44, 0x4d, 0x42, 0x3f, 0x3a, 0x3e, 0x45, 0x34,
-    0x3c, 0x43, 0x47, 0x43, 0x3f, 0x48, 0x3b, 0x44, 0x3d, 0x44, 0x43, 0x3e,
-    0x40, 0x4a, 0x31, 0x42, 0x42, 0x43, 0x48, 0x45, 0x3a, 0x42, 0x36, 0x2f,
-    0x3c, 0x3e, 0x3b, 0x3b, 0x44, 0x3f, 0x3a, 0x2c, 0x47, 0x3f, 0x4a, 0x40,
-    0x40, 0x40, 0x3c, 0x2a, 0x3e, 0x44, 0x40, 0x43, 0x3a, 0x42, 0x39, 0x34,
-    0x49, 0x3e, 0x36, 0x42, 0x3f, 0x42, 0x33, 0x3b, 0x3c, 0x45, 0x39, 0x3f,
-    0x3e, 0x3f, 0x41, 0x3d, 0x32, 0x3b, 0x31, 0x40, 0x3f, 0x44, 0x3c, 0x3f,
-    0x40, 0x46, 0x45, 0x36, 0x36, 0x42, 0x30, 0x57, 0x47, 0x44, 0x48, 0x3f,
-    0x35, 0x37, 0x3f, 0x3f, 0x38, 0x4a, 0x41, 0x46, 0x50, 0x3d, 0x5b, 0x41,
-    0x3e, 0x3c, 0x4a, 0x54, 0x45, 0x41, 0x5b, 0x46, 0x3d, 0x3b, 0x43, 0x33,
-    0x45, 0x4e, 0x43, 0x3b, 0x44, 0x37, 0x37, 0x32, 0x4c, 0x3d, 0x4c, 0x3f,
-    0x49, 0x3b, 0x37, 0x3a, 0x33, 0x43, 0x3f, 0x40, 0x44, 0x36, 0x3b, 0x44,
-    0x45, 0x40, 0x3c, 0x3c, 0x41, 0x44, 0x3b, 0x3d, 0x33, 0x37, 0x3c, 0x35,
-    0x3d, 0x3f, 0x39, 0x38, 0x33, 0x43, 0x3e, 0x39, 0x3b, 0x3e, 0x41, 0x35,
-    0x40, 0x46, 0x43, 0x35, 0x41, 0x3d, 0x32, 0x39, 0x3c, 0x40, 0x3e, 0x3f,
-    0x42, 0x38, 0x3b, 0x45, 0x3a, 0x3d, 0x40, 0x36, 0x3a, 0x40, 0x46, 0x44,
-    0x48, 0x45, 0x3f, 0x3a, 0x45, 0x45, 0x3c, 0x3b, 0x40, 0x4c, 0x39, 0x3a,
-    0x38, 0x39, 0x46, 0x3a, 0x3e, 0x4b, 0x34, 0x39, 0x3d, 0x3f, 0x40, 0x39,
-    0x45, 0x31, 0x45, 0x29, 0x3f, 0x38, 0x3a, 0x3f, 0x38, 0x3b, 0x36, 0x2d,
-    0x43, 0x3d, 0x45, 0x3c, 0x46, 0x3f, 0x40, 0x3c, 0x3a, 0x3e, 0x3d, 0x38,
-    0x3f, 0x3c, 0x3f, 0x42, 0x35, 0x3f, 0x3a, 0x43, 0x3d, 0x43, 0x3d, 0x33,
-    0x3d, 0x48, 0x42, 0x3d, 0x45, 0x46, 0x3d, 0x35, 0x32, 0x44, 0x42, 0x37,
-    0x3d, 0x40, 0x3c, 0x47, 0x4a, 0x45, 0x47, 0x2f, 0x33, 0x36, 0x3f, 0x42,
-    0x38, 0x43, 0x3e, 0x3a, 0x41, 0x3f, 0x5f, 0x3f, 0x48, 0x3a, 0x44, 0x47,
-    0x41, 0x3e, 0x57, 0x42, 0x41, 0x33, 0x34, 0x39, 0x42, 0x44, 0x42, 0x3c,
-    0x49, 0x34, 0x37, 0x33, 0x47, 0x38, 0x43, 0x3d, 0x43, 0x3e, 0x3e, 0x36,
-    0x41, 0x41, 0x37, 0x40, 0x39, 0x3e, 0x3b, 0x3b, 0x3e, 0x41, 0x3d, 0x3b,
-    0x43, 0x3e, 0x39, 0x43, 0x2f, 0x3e, 0x33, 0x40, 0x45, 0x47, 0x30, 0x46,
-    0x3f, 0x3f, 0x37, 0x42, 0x3d, 0x42, 0x43, 0x37, 0x38, 0x3c, 0x35, 0x34,
-    0x41, 0x43, 0x3e, 0x3e, 0x3f, 0x49, 0x35, 0x35, 0x38, 0x36, 0x3a, 0x43,
-    0x38, 0x46, 0x48, 0x36, 0x3f, 0x39, 0x3b, 0x3e, 0x48, 0x47, 0x41, 0x34,
-    0x3b, 0x3c, 0x37, 0x3e, 0x40, 0x41, 0x3b, 0x3d, 0x43, 0x42, 0x3a, 0x39,
-    0x3b, 0x43, 0x38, 0x2b, 0x43, 0x41, 0x48, 0x35, 0x44, 0x44, 0x3e, 0x2c,
-    0x46, 0x40, 0x3e, 0x41, 0x38, 0x34, 0x35, 0x37, 0x34, 0x3f, 0x3d, 0x46,
-    0x33, 0x3c, 0x3c, 0x2e, 0x3b, 0x45, 0x3d, 0x3e, 0x3a, 0x42, 0x3c, 0x36,
-    0x3a, 0x42, 0x39, 0x43, 0x35, 0x39, 0x40, 0x44, 0x47, 0x41, 0x44, 0x3d,
-    0x41, 0x3e, 0x38, 0x39, 0x45, 0x3a, 0x35, 0x43, 0x3f, 0x44, 0x41, 0x49,
-    0x47, 0x3f, 0x44, 0x40, 0x38, 0x43, 0x40, 0x3e, 0x39, 0x42, 0x32, 0x3b,
-    0x42, 0x47, 0x57, 0x37, 0x36, 0x38, 0x43, 0x49, 0x3b, 0x34, 0x54, 0x42,
-    0x3d, 0x3f, 0x3e, 0x3b, 0x38, 0x41, 0x43, 0x3a, 0x44, 0x39, 0x34, 0x2c,
-    0x38, 0x43, 0x4b, 0x3f, 0x40, 0x3e, 0x32, 0x33, 0x3d, 0x44, 0x45, 0x44,
-    0x3e, 0x35, 0x37, 0x39, 0x40, 0x3e, 0x40, 0x3c, 0x34, 0x43, 0x37, 0x40,
-    0x39, 0x3e, 0x3d, 0x43, 0x3a, 0x44, 0x43, 0x44, 0x3d, 0x3b, 0x45, 0x3b,
-    0x3a, 0x3a, 0x3f, 0x37, 0x43, 0x3b, 0x33, 0x35, 0x40, 0x47, 0x3e, 0x3c,
-    0x39, 0x3c, 0x34, 0x29, 0x3c, 0x3e, 0x46, 0x3e, 0x3c, 0x38, 0x3f, 0x2d,
-    0x3d, 0x3d, 0x3f, 0x3f, 0x3d, 0x45, 0x3b, 0x32, 0x39, 0x3f, 0x41, 0x38,
-    0x36, 0x3e, 0x3a, 0x35, 0x40, 0x3f, 0x3b, 0x32, 0x3c, 0x39, 0x3e, 0x35,
-    0x3e, 0x45, 0x34, 0x38, 0x44, 0x39, 0x3f, 0x31, 0x34, 0x39, 0x3f, 0x38,
-    0x44, 0x42, 0x3f, 0x3b, 0x39, 0x3d, 0x39, 0x3b, 0x44, 0x46, 0x38, 0x3d,
-    0x45, 0x37, 0x40, 0x3a, 0x3a, 0x39, 0x35, 0x3c, 0x39, 0x40, 0x47, 0x3e,
-    0x38, 0x42, 0x41, 0x3b, 0x48, 0x3f, 0x3a, 0x3e, 0x3d, 0x3f, 0x32, 0x3b,
-    0x3f, 0x3d, 0x3e, 0x44, 0x43, 0x41, 0x44, 0x47, 0x48, 0x41, 0x41, 0x36,
-    0x3a, 0x33, 0x3c, 0x3c, 0x37, 0x3e, 0x40, 0x34, 0x3f, 0x42, 0x53, 0x40,
-    0x3f, 0x35, 0x3e, 0x46, 0x3a, 0x3e, 0x4b, 0x41, 0x46, 0x32, 0x39, 0x36,
-    0x3b, 0x4f, 0x36, 0x3c, 0x40, 0x3a, 0x40, 0x40, 0x47, 0x3e, 0x49, 0x37,
-    0x3f, 0x31, 0x3e, 0x40, 0x3b, 0x3f, 0x43, 0x44, 0x3a, 0x3d, 0x31, 0x41,
-    0x41, 0x33, 0x43, 0x40, 0x3c, 0x3a, 0x41, 0x40, 0x37, 0x3f, 0x34, 0x3e,
-    0x44, 0x42, 0x3d, 0x3f, 0x3f, 0x34, 0x36, 0x34, 0x31, 0x41, 0x32, 0x39,
-    0x3e, 0x3d, 0x42, 0x35, 0x3e, 0x3a, 0x41, 0x47, 0x3d, 0x42, 0x33, 0x32,
-    0x43, 0x42, 0x36, 0x41, 0x3e, 0x39, 0x46, 0x39, 0x35, 0x3d, 0x3d, 0x40,
-    0x38, 0x44, 0x3d, 0x31, 0x44, 0x39, 0x3a, 0x45, 0x42, 0x41, 0x3d, 0x36,
-    0x3f, 0x3c, 0x39, 0x3d, 0x32, 0x39, 0x42, 0x34, 0x3f, 0x38, 0x44, 0x3c,
-    0x43, 0x45, 0x41, 0x2d, 0x44, 0x42, 0x3d, 0x3f, 0x44, 0x38, 0x3d, 0x35,
-    0x3a, 0x48, 0x40, 0x3b, 0x3d, 0x36, 0x3b, 0x40, 0x3f, 0x3a, 0x3a, 0x3f,
-    0x3c, 0x33, 0x39, 0x3c, 0x3c, 0x38, 0x47, 0x36, 0x3d, 0x41, 0x46, 0x41,
-    0x34, 0x46, 0x48, 0x46, 0x3d, 0x3c, 0x40, 0x43, 0x3d, 0x41, 0x37, 0x3e,
-    0x39, 0x47, 0x3f, 0x39, 0x46, 0x43, 0x3f, 0x41, 0x45, 0x37, 0x40, 0x3a,
-    0x3d, 0x44, 0x3f, 0x3b, 0x3b, 0x40, 0x4f, 0x3d, 0x3d, 0x41, 0x3c, 0x43,
-    0x3e, 0x46, 0x4e, 0x40, 0x3f, 0x34, 0x48, 0x29, 0x45, 0x44, 0x46, 0x41,
-    0x45, 0x32, 0x3e, 0x38, 0x39, 0x3a, 0x3e, 0x3e, 0x4c, 0x34, 0x3c, 0x40,
-    0x4a, 0x44, 0x3d, 0x46, 0x3b, 0x3e, 0x42, 0x42, 0x3a, 0x41, 0x43, 0x41,
-    0x39, 0x3f, 0x3e, 0x3c, 0x36, 0x48, 0x3f, 0x3e, 0x3e, 0x37, 0x3f, 0x3f,
-    0x3b, 0x40, 0x3e, 0x35, 0x32, 0x35, 0x3f, 0x33, 0x3f, 0x38, 0x43, 0x37,
-    0x49, 0x38, 0x37, 0x3c, 0x3c, 0x40, 0x40, 0x3a, 0x3a, 0x46, 0x37, 0x34,
-    0x34, 0x3b, 0x3d, 0x2f, 0x3a, 0x38, 0x3d, 0x46, 0x3d, 0x3b, 0x3d, 0x38,
-    0x35, 0x37, 0x44, 0x3c, 0x3d, 0x3e, 0x40, 0x3a, 0x40, 0x33, 0x3e, 0x38,
-    0x40, 0x3e, 0x45, 0x37, 0x3f, 0x3b, 0x3c, 0x40, 0x3b, 0x3c, 0x3b, 0x33,
-    0x41, 0x3f, 0x3b, 0x42, 0x31, 0x3b, 0x3a, 0x39, 0x3d, 0x41, 0x39, 0x40,
-    0x43, 0x45, 0x39, 0x3b, 0x3a, 0x42, 0x43, 0x3d, 0x3f, 0x40, 0x47, 0x39,
-    0x37, 0x3f, 0x47, 0x3f, 0x45, 0x41, 0x39, 0x3a, 0x41, 0x38, 0x3c, 0x3c,
-    0x39, 0x40, 0x39, 0x3b, 0x3b, 0x3e, 0x38, 0x3b, 0x37, 0x48, 0x41, 0x3f,
-    0x3e, 0x37, 0x3d, 0x44, 0x3c, 0x3e, 0x40, 0x39, 0x41, 0x42, 0x3d, 0x45,
-    0x3b, 0x3e, 0x4c, 0x3b, 0x3a, 0x3a, 0x3e, 0x47, 0x3c, 0x3f, 0x48, 0x3f,
-    0x46, 0x3f, 0x39, 0x25, 0x44, 0x3a, 0x3b, 0x40, 0x41, 0x39, 0x39, 0x47,
-    0x3b, 0x32, 0x49, 0x42, 0x41, 0x3a, 0x43, 0x41, 0x3e, 0x35, 0x37, 0x3d,
-    0x49, 0x40, 0x45, 0x3b, 0x3c, 0x38, 0x48, 0x3c, 0x3c, 0x35, 0x3f, 0x41,
-    0x41, 0x4c, 0x36, 0x39, 0x37, 0x3d, 0x3b, 0x3e, 0x44, 0x32, 0x3d, 0x3f,
-    0x3a, 0x3b, 0x3a, 0x47, 0x38, 0x42, 0x36, 0x34, 0x43, 0x3f, 0x3e, 0x40,
-    0x34, 0x31, 0x36, 0x33, 0x42, 0x37, 0x41, 0x41, 0x40, 0x3d, 0x3d, 0x37,
-    0x43, 0x3a, 0x3e, 0x44, 0x43, 0x3c, 0x35, 0x38, 0x38, 0x3c, 0x43, 0x36,
-    0x3a, 0x38, 0x40, 0x3f, 0x3d, 0x3e, 0x37, 0x3b, 0x41, 0x3a, 0x3b, 0x3d,
-    0x3c, 0x41, 0x3c, 0x41, 0x47, 0x3f, 0x3f, 0x3b, 0x3d, 0x3f, 0x3b, 0x45,
-    0x38, 0x38, 0x40, 0x38, 0x46, 0x42, 0x39, 0x3d, 0x3d, 0x3b, 0x42, 0x36,
-    0x42, 0x41, 0x3e, 0x3e, 0x36, 0x3f, 0x37, 0x3f, 0x36, 0x48, 0x3b, 0x39,
-    0x3d, 0x3f, 0x43, 0x3e, 0x3c, 0x40, 0x48, 0x46, 0x43, 0x36, 0x42, 0x39,
-    0x46, 0x3c, 0x37, 0x38, 0x49, 0x37, 0x36, 0x39, 0x3e, 0x42, 0x48, 0x3a,
-    0x3c, 0x3e, 0x42, 0x30, 0x3e, 0x34, 0x39, 0x3b, 0x46, 0x61, 0x46, 0x1e,
-    0x4c, 0x3b, 0x40, 0x2d, 0x3c, 0x42, 0x32, 0x30, 0x49, 0x3e, 0x39, 0x34,
-    0x30, 0x40, 0x31, 0x38, 0x40, 0x3d, 0x3c, 0x35, 0x3a, 0x36, 0x40, 0x3b,
-    0x41, 0x40, 0x3b, 0x39, 0x37, 0x37, 0x3f, 0x3b, 0x3c, 0x3a, 0x40, 0x3a,
-    0x36, 0x3c, 0x42, 0x39, 0x3e, 0x36, 0x40, 0x42, 0x39, 0x40, 0x3b, 0x34,
-    0x37, 0x33, 0x36, 0x3f, 0x43, 0x33, 0x33, 0x27, 0x3d, 0x46, 0x40, 0x31,
-    0x38, 0x3e, 0x41, 0x20, 0x3f, 0x39, 0x42, 0x35, 0x35, 0x45, 0x40, 0x1e,
-    0x32, 0x35, 0x32, 0x3c, 0x35, 0x44, 0x46, 0x29, 0x3a, 0x3d, 0x37, 0x42,
-    0x3b, 0x45, 0x3a, 0x26, 0x38, 0x40, 0x30, 0x37, 0x41, 0x40, 0x39, 0x2b,
-    0x49, 0x3f, 0x43, 0x43, 0x40, 0x3a, 0x38, 0x29, 0x43, 0x3a, 0x37, 0x40,
-    0x3f, 0x35, 0x3a, 0x28, 0x36, 0x3e, 0x3f, 0x43, 0x3c, 0x39, 0x42, 0x2c,
-    0x38, 0x42, 0x38, 0x3d, 0x42, 0x38, 0x35, 0x2d, 0x34, 0x38, 0x3d, 0x43,
-    0x46, 0x3e, 0x3c, 0x27, 0x3e, 0x40, 0x46, 0x39, 0x35, 0x3d, 0x42, 0x35,
-    0x42, 0x36, 0x40, 0x3e, 0x3a, 0x3e, 0x3c, 0x37, 0x3a, 0x3c, 0x48, 0x48,
-    0x48, 0x37, 0x3d, 0x38, 0x4b, 0x40, 0x43, 0x3b, 0x41, 0x46, 0x3c, 0x34,
-    0x46, 0x3c, 0x3c, 0x3c, 0x4b, 0x64, 0x4a, 0x22, 0x52, 0x41, 0x42, 0x3b,
-    0x42, 0x4a, 0x34, 0x37, 0x4b, 0x44, 0x3b, 0x4a, 0x38, 0x3f, 0x38, 0x3a,
-    0x40, 0x41, 0x42, 0x3c, 0x33, 0x3e, 0x3c, 0x42, 0x2c, 0x4e, 0x47, 0x3f,
-    0x38, 0x33, 0x39, 0x3f, 0x3b, 0x45, 0x37, 0x3a, 0x42, 0x42, 0x44, 0x3f,
-    0x3c, 0x3c, 0x3e, 0x3d, 0x3c, 0x3c, 0x40, 0x2c, 0x3c, 0x3d, 0x42, 0x39,
-    0x3a, 0x37, 0x43, 0x2a, 0x3d, 0x40, 0x41, 0x41, 0x46, 0x46, 0x42, 0x28,
-    0x39, 0x3c, 0x37, 0x44, 0x46, 0x41, 0x47, 0x2b, 0x44, 0x33, 0x39, 0x3f,
-    0x3f, 0x43, 0x3d, 0x23, 0x3a, 0x43, 0x41, 0x3b, 0x41, 0x42, 0x33, 0x1f,
-    0x43, 0x3e, 0x3d, 0x40, 0x37, 0x33, 0x42, 0x28, 0x3b, 0x38, 0x37, 0x3c,
-    0x34, 0x40, 0x44, 0x2a, 0x3c, 0x3a, 0x41, 0x37, 0x45, 0x3f, 0x3e, 0x26,
-    0x41, 0x40, 0x35, 0x3d, 0x45, 0x3e, 0x3d, 0x29, 0x3c, 0x39, 0x3f, 0x3c,
-    0x3d, 0x39, 0x38, 0x2d, 0x39, 0x38, 0x38, 0x44, 0x3c, 0x3e, 0x38, 0x26,
-    0x40, 0x36, 0x39, 0x38, 0x3f, 0x32, 0x39, 0x35, 0x3d, 0x3e, 0x35, 0x3a,
-    0x3f, 0x3f, 0x31, 0x35, 0x34, 0x45, 0x3e, 0x43, 0x48, 0x3b, 0x37, 0x39,
-    0x4d, 0x46, 0x54, 0x40, 0x41, 0x4e, 0x3d, 0x38, 0x4d, 0x38, 0x3a, 0x3b,
-    0x49, 0x5a, 0x4a, 0x1e, 0x5e, 0x39, 0x38, 0x37, 0x3a, 0x51, 0x3a, 0x3c,
-    0x50, 0x3f, 0x40, 0x42, 0x33, 0x3b, 0x2e, 0x4a, 0x3f, 0x4a, 0x3b, 0x43,
-    0x36, 0x3e, 0x3d, 0x42, 0x39, 0x46, 0x4b, 0x3c, 0x3b, 0x3b, 0x35, 0x3e,
-    0x3d, 0x4b, 0x3f, 0x41, 0x3f, 0x3b, 0x42, 0x42, 0x38, 0x3a, 0x41, 0x3d,
-    0x36, 0x41, 0x37, 0x2f, 0x38, 0x37, 0x3f, 0x34, 0x35, 0x35, 0x45, 0x30,
-    0x31, 0x42, 0x31, 0x3a, 0x3a, 0x3e, 0x3d, 0x23, 0x3f, 0x43, 0x3b, 0x41,
-    0x35, 0x3b, 0x40, 0x25, 0x45, 0x3e, 0x42, 0x3b, 0x31, 0x40, 0x36, 0x28,
-    0x43, 0x42, 0x30, 0x42, 0x32, 0x32, 0x36, 0x2c, 0x35, 0x3a, 0x3d, 0x3a,
-    0x3c, 0x36, 0x3e, 0x30, 0x41, 0x42, 0x38, 0x41, 0x41, 0x3e, 0x3c, 0x23,
-    0x37, 0x40, 0x3c, 0x3e, 0x3e, 0x3a, 0x37, 0x2b, 0x36, 0x40, 0x41, 0x42,
-    0x3e, 0x38, 0x44, 0x22, 0x46, 0x38, 0x33, 0x3b, 0x3a, 0x3a, 0x3a, 0x24,
-    0x36, 0x3b, 0x38, 0x44, 0x34, 0x38, 0x40, 0x28, 0x38, 0x3d, 0x36, 0x44,
-    0x31, 0x3e, 0x37, 0x37, 0x36, 0x3f, 0x47, 0x38, 0x3b, 0x3e, 0x2c, 0x4c,
-    0x36, 0x3c, 0x3b, 0x41, 0x4c, 0x3d, 0x3d, 0x40, 0x49, 0x44, 0x52, 0x3f,
-    0x3b, 0x4d, 0x3c, 0x3a, 0x4f, 0x3b, 0x36, 0x3b, 0x4a, 0x5f, 0x4e, 0x1f,
-    0x57, 0x3c, 0x3d, 0x3d, 0x46, 0x59, 0x42, 0x45, 0x52, 0x3d, 0x3a, 0x41,
-    0x31, 0x39, 0x39, 0x4f, 0x43, 0x4e, 0x3e, 0x37, 0x3a, 0x37, 0x33, 0x47,
-    0x32, 0x45, 0x47, 0x43, 0x31, 0x33, 0x38, 0x43, 0x3e, 0x47, 0x3d, 0x32,
-    0x3b, 0x39, 0x3c, 0x42, 0x3d, 0x47, 0x42, 0x40, 0x3d, 0x3f, 0x3c, 0x34,
-    0x3b, 0x3e, 0x42, 0x3d, 0x43, 0x35, 0x42, 0x2c, 0x35, 0x3d, 0x3c, 0x3d,
-    0x3a, 0x3c, 0x46, 0x25, 0x43, 0x35, 0x3d, 0x39, 0x3a, 0x3c, 0x40, 0x2b,
-    0x33, 0x40, 0x3d, 0x46, 0x45, 0x37, 0x3c, 0x36, 0x43, 0x37, 0x3e, 0x3a,
-    0x3c, 0x47, 0x3f, 0x38, 0x36, 0x3e, 0x3a, 0x42, 0x3c, 0x42, 0x33, 0x39,
-    0x3c, 0x3a, 0x3c, 0x40, 0x48, 0x3b, 0x40, 0x32, 0x37, 0x47, 0x34, 0x38,
-    0x33, 0x3d, 0x49, 0x2d, 0x36, 0x42, 0x3d, 0x3e, 0x47, 0x3c, 0x42, 0x2c,
-    0x3b, 0x31, 0x3f, 0x3c, 0x3d, 0x3c, 0x3f, 0x2b, 0x41, 0x35, 0x33, 0x43,
-    0x47, 0x39, 0x34, 0x2a, 0x3a, 0x3a, 0x40, 0x3d, 0x44, 0x3c, 0x39, 0x34,
-    0x43, 0x40, 0x33, 0x3a, 0x3b, 0x42, 0x38, 0x3b, 0x34, 0x35, 0x40, 0x43,
-    0x4b, 0x41, 0x3d, 0x38, 0x49, 0x44, 0x4d, 0x37, 0x3a, 0x4b, 0x40, 0x39,
-    0x4e, 0x3b, 0x30, 0x38, 0x47, 0x5d, 0x50, 0x1f, 0x54, 0x35, 0x3a, 0x39,
-    0x40, 0x4c, 0x46, 0x42, 0x52, 0x39, 0x39, 0x45, 0x41, 0x3c, 0x30, 0x5b,
-    0x43, 0x4d, 0x4a, 0x3e, 0x31, 0x39, 0x41, 0x4c, 0x36, 0x44, 0x4c, 0x39,
-    0x32, 0x41, 0x47, 0x3e, 0x34, 0x49, 0x45, 0x3b, 0x34, 0x3a, 0x3b, 0x47,
-    0x43, 0x3e, 0x43, 0x32, 0x40, 0x3e, 0x3e, 0x38, 0x37, 0x3e, 0x37, 0x3a,
-    0x3a, 0x40, 0x48, 0x2f, 0x3e, 0x3e, 0x46, 0x3a, 0x3e, 0x35, 0x49, 0x30,
-    0x3a, 0x41, 0x3e, 0x39, 0x34, 0x45, 0x3d, 0x34, 0x48, 0x43, 0x43, 0x42,
-    0x33, 0x39, 0x3b, 0x3f, 0x30, 0x46, 0x41, 0x39, 0x48, 0x3a, 0x3c, 0x3e,
-    0x3f, 0x36, 0x40, 0x3d, 0x43, 0x40, 0x3e, 0x39, 0x44, 0x40, 0x44, 0x3b,
-    0x43, 0x42, 0x39, 0x38, 0x3a, 0x3f, 0x3b, 0x3f, 0x38, 0x3d, 0x34, 0x30,
-    0x34, 0x3d, 0x3f, 0x42, 0x44, 0x3e, 0x34, 0x32, 0x37, 0x46, 0x44, 0x38,
-    0x3c, 0x45, 0x39, 0x2b, 0x41, 0x3c, 0x40, 0x40, 0x3a, 0x3a, 0x3c, 0x32,
-    0x45, 0x42, 0x3d, 0x46, 0x38, 0x3b, 0x34, 0x35, 0x38, 0x43, 0x3d, 0x34,
-    0x42, 0x3b, 0x38, 0x3d, 0x37, 0x43, 0x3f, 0x39, 0x4e, 0x39, 0x40, 0x3f,
-    0x4d, 0x43, 0x49, 0x3f, 0x36, 0x41, 0x44, 0x39, 0x48, 0x3a, 0x35, 0x39,
-    0x48, 0x59, 0x4e, 0x25, 0x58, 0x39, 0x42, 0x35, 0x43, 0x4e, 0x42, 0x3f,
-    0x4a, 0x43, 0x3b, 0x3f, 0x3b, 0x37, 0x2b, 0x5a, 0x3d, 0x44, 0x3b, 0x40,
-    0x31, 0x38, 0x37, 0x44, 0x32, 0x3e, 0x41, 0x3d, 0x2c, 0x42, 0x42, 0x3c,
-    0x37, 0x45, 0x41, 0x41, 0x3d, 0x39, 0x41, 0x40, 0x3a, 0x46, 0x41, 0x40,
-    0x40, 0x3d, 0x38, 0x31, 0x37, 0x3f, 0x42, 0x38, 0x3f, 0x3c, 0x48, 0x30,
-    0x3e, 0x39, 0x3f, 0x3d, 0x3d, 0x44, 0x52, 0x35, 0x3b, 0x32, 0x42, 0x32,
-    0x3a, 0x43, 0x39, 0x3b, 0x31, 0x43, 0x36, 0x3c, 0x3c, 0x3c, 0x41, 0x45,
-    0x42, 0x49, 0x41, 0x3b, 0x42, 0x3e, 0x41, 0x44, 0x36, 0x41, 0x3f, 0x3c,
-    0x3e, 0x47, 0x45, 0x41, 0x38, 0x41, 0x3f, 0x43, 0x35, 0x32, 0x41, 0x39,
-    0x36, 0x47, 0x35, 0x42, 0x44, 0x3b, 0x3f, 0x34, 0x48, 0x41, 0x43, 0x42,
-    0x36, 0x3e, 0x3c, 0x3d, 0x3d, 0x3b, 0x42, 0x44, 0x3a, 0x44, 0x36, 0x2a,
-    0x41, 0x39, 0x3a, 0x41, 0x46, 0x3c, 0x44, 0x2f, 0x36, 0x39, 0x3b, 0x3f,
-    0x38, 0x45, 0x3c, 0x3c, 0x3e, 0x41, 0x3c, 0x39, 0x3e, 0x40, 0x2f, 0x45,
-    0x3b, 0x41, 0x40, 0x3c, 0x4e, 0x38, 0x3e, 0x48, 0x46, 0x40, 0x48, 0x44,
-    0x40, 0x4a, 0x45, 0x3c, 0x4f, 0x39, 0x37, 0x3a, 0x4e, 0x59, 0x5c, 0x22,
-    0x58, 0x32, 0x38, 0x34, 0x40, 0x4b, 0x43, 0x43, 0x4f, 0x3e, 0x39, 0x40,
-    0x37, 0x3e, 0x2f, 0x55, 0x3f, 0x40, 0x38, 0x3f, 0x3a, 0x33, 0x37, 0x3d,
-    0x34, 0x4c, 0x37, 0x3f, 0x32, 0x39, 0x45, 0x34, 0x44, 0x4c, 0x3f, 0x3b,
-    0x3c, 0x36, 0x36, 0x43, 0x36, 0x47, 0x41, 0x46, 0x41, 0x3e, 0x41, 0x3a,
-    0x43, 0x3a, 0x48, 0x42, 0x42, 0x3e, 0x4c, 0x36, 0x3d, 0x39, 0x43, 0x46,
-    0x3d, 0x42, 0x42, 0x3b, 0x45, 0x43, 0x3c, 0x40, 0x39, 0x37, 0x34, 0x45,
-    0x3f, 0x40, 0x34, 0x38, 0x43, 0x3f, 0x36, 0x47, 0x3f, 0x3b, 0x49, 0x3c,
-    0x3a, 0x3a, 0x42, 0x4c, 0x37, 0x3e, 0x3b, 0x32, 0x47, 0x40, 0x45, 0x4d,
-    0x39, 0x3b, 0x39, 0x40, 0x3e, 0x3c, 0x3d, 0x3a, 0x3d, 0x3b, 0x3e, 0x43,
-    0x3e, 0x3f, 0x3a, 0x3c, 0x41, 0x40, 0x39, 0x3c, 0x3a, 0x38, 0x39, 0x37,
-    0x36, 0x33, 0x43, 0x45, 0x3f, 0x45, 0x41, 0x30, 0x3b, 0x34, 0x3c, 0x39,
-    0x3b, 0x45, 0x37, 0x2e, 0x36, 0x34, 0x36, 0x44, 0x3d, 0x40, 0x3a, 0x3c,
-    0x3d, 0x3b, 0x38, 0x41, 0x42, 0x3a, 0x32, 0x4b, 0x38, 0x3e, 0x41, 0x46,
-    0x57, 0x3a, 0x44, 0x48, 0x47, 0x45, 0x47, 0x3e, 0x43, 0x42, 0x45, 0x3b,
-    0x50, 0x39, 0x37, 0x3f, 0x47, 0x51, 0x5e, 0x22, 0x59, 0x33, 0x3c, 0x37,
-    0x43, 0x50, 0x49, 0x47, 0x46, 0x42, 0x39, 0x44, 0x44, 0x3d, 0x2f, 0x53,
-    0x35, 0x41, 0x40, 0x3d, 0x2d, 0x35, 0x2f, 0x3e, 0x3f, 0x37, 0x38, 0x3e,
-    0x30, 0x45, 0x46, 0x38, 0x33, 0x3c, 0x3e, 0x3b, 0x44, 0x42, 0x47, 0x49,
-    0x43, 0x40, 0x3d, 0x3c, 0x38, 0x43, 0x3e, 0x38, 0x3d, 0x40, 0x36, 0x43,
-    0x43, 0x3e, 0x40, 0x3c, 0x44, 0x47, 0x43, 0x3d, 0x41, 0x39, 0x3e, 0x45,
-    0x39, 0x3d, 0x39, 0x40, 0x42, 0x40, 0x3b, 0x4a, 0x40, 0x41, 0x3f, 0x37,
-    0x43, 0x41, 0x37, 0x4c, 0x3f, 0x3d, 0x38, 0x3a, 0x42, 0x46, 0x43, 0x4d,
-    0x3c, 0x3a, 0x43, 0x3e, 0x3b, 0x3d, 0x46, 0x4a, 0x38, 0x3d, 0x3d, 0x39,
-    0x3e, 0x3c, 0x3b, 0x3e, 0x3a, 0x40, 0x40, 0x34, 0x41, 0x3f, 0x3e, 0x3f,
-    0x47, 0x3c, 0x32, 0x3a, 0x3c, 0x44, 0x3f, 0x42, 0x41, 0x43, 0x3e, 0x3a,
-    0x3b, 0x42, 0x41, 0x39, 0x39, 0x37, 0x39, 0x3e, 0x3d, 0x33, 0x3e, 0x35,
-    0x44, 0x37, 0x40, 0x35, 0x3f, 0x47, 0x37, 0x41, 0x35, 0x38, 0x47, 0x40,
-    0x43, 0x44, 0x2e, 0x48, 0x35, 0x44, 0x41, 0x3c, 0x47, 0x3d, 0x3d, 0x52,
-    0x48, 0x41, 0x44, 0x41, 0x42, 0x4b, 0x3e, 0x3d, 0x4e, 0x32, 0x34, 0x47,
-    0x55, 0x57, 0x5f, 0x22, 0x57, 0x33, 0x40, 0x37, 0x40, 0x4a, 0x4d, 0x47,
-    0x48, 0x38, 0x3e, 0x46, 0x37, 0x42, 0x28, 0x57, 0x38, 0x42, 0x36, 0x43,
-    0x35, 0x37, 0x39, 0x39, 0x42, 0x39, 0x38, 0x3c, 0x35, 0x3c, 0x3c, 0x3a,
-    0x3c, 0x4c, 0x45, 0x3f, 0x43, 0x3d, 0x45, 0x45, 0x40, 0x47, 0x3e, 0x3e,
-    0x3d, 0x4b, 0x49, 0x35, 0x43, 0x3c, 0x36, 0x46, 0x3c, 0x46, 0x42, 0x44,
-    0x3c, 0x42, 0x3d, 0x42, 0x44, 0x3c, 0x4a, 0x40, 0x40, 0x3c, 0x3b, 0x3c,
-    0x35, 0x34, 0x2e, 0x46, 0x38, 0x3d, 0x38, 0x44, 0x41, 0x40, 0x3c, 0x52,
-    0x3b, 0x3d, 0x3b, 0x3f, 0x42, 0x47, 0x44, 0x52, 0x44, 0x44, 0x39, 0x3f,
-    0x43, 0x35, 0x3c, 0x4d, 0x39, 0x3d, 0x3b, 0x37, 0x3e, 0x38, 0x3e, 0x49,
-    0x3a, 0x37, 0x3c, 0x49, 0x40, 0x41, 0x3c, 0x40, 0x3d, 0x38, 0x39, 0x3f,
-    0x44, 0x3e, 0x42, 0x3e, 0x47, 0x40, 0x34, 0x46, 0x48, 0x37, 0x45, 0x3e,
-    0x46, 0x3f, 0x35, 0x39, 0x38, 0x3f, 0x36, 0x2c, 0x40, 0x38, 0x3e, 0x3c,
-    0x32, 0x3c, 0x46, 0x3a, 0x3f, 0x41, 0x36, 0x49, 0x42, 0x38, 0x36, 0x43,
-    0x3d, 0x41, 0x46, 0x35, 0x4f, 0x3a, 0x41, 0x5c, 0x4a, 0x42, 0x4e, 0x42,
-    0x46, 0x54, 0x3f, 0x45, 0x4c, 0x30, 0x33, 0x44, 0x56, 0x5d, 0x68, 0x26,
-    0x60, 0x33, 0x3e, 0x3a, 0x42, 0x49, 0x52, 0x47, 0x51, 0x46, 0x40, 0x47,
-    0x41, 0x3b, 0x1b, 0x4f, 0x3c, 0x45, 0x3d, 0x3d, 0x32, 0x2f, 0x3e, 0x3c,
-    0x3c, 0x3f, 0x3b, 0x3c, 0x2c, 0x3a, 0x41, 0x3c, 0x35, 0x3e, 0x3e, 0x3c,
-    0x3d, 0x3f, 0x3e, 0x40, 0x40, 0x44, 0x42, 0x3c, 0x3c, 0x3c, 0x41, 0x3c,
-    0x3c, 0x3d, 0x3e, 0x3d, 0x3c, 0x3d, 0x4a, 0x46, 0x3f, 0x35, 0x33, 0x43,
-    0x42, 0x41, 0x4d, 0x48, 0x48, 0x44, 0x3e, 0x41, 0x41, 0x36, 0x3c, 0x4c,
-    0x34, 0x47, 0x42, 0x39, 0x3e, 0x43, 0x3a, 0x53, 0x3b, 0x3b, 0x42, 0x3d,
-    0x41, 0x3c, 0x3e, 0x52, 0x3a, 0x44, 0x34, 0x43, 0x3d, 0x3d, 0x3a, 0x50,
-    0x3e, 0x33, 0x41, 0x40, 0x3f, 0x38, 0x43, 0x42, 0x3b, 0x37, 0x3e, 0x43,
-    0x3f, 0x3c, 0x41, 0x49, 0x40, 0x32, 0x40, 0x3e, 0x3b, 0x3e, 0x44, 0x3c,
-    0x35, 0x37, 0x3d, 0x41, 0x34, 0x3f, 0x3a, 0x3c, 0x47, 0x32, 0x41, 0x3d,
-    0x3c, 0x3a, 0x4a, 0x31, 0x43, 0x38, 0x45, 0x37, 0x49, 0x3c, 0x34, 0x3f,
-    0x3d, 0x3d, 0x3d, 0x45, 0x47, 0x3e, 0x37, 0x48, 0x40, 0x3b, 0x45, 0x3d,
-    0x4e, 0x42, 0x3f, 0x57, 0x4b, 0x43, 0x4b, 0x3d, 0x3f, 0x47, 0x4a, 0x43,
-    0x4e, 0x30, 0x38, 0x45, 0x59, 0x60, 0x64, 0x2d, 0x5a, 0x2d, 0x34, 0x35,
-    0x47, 0x54, 0x4e, 0x3f, 0x44, 0x45, 0x3c, 0x43, 0x3d, 0x40, 0x1c, 0x5a,
-    0x36, 0x3f, 0x3a, 0x39, 0x37, 0x3c, 0x32, 0x3b, 0x2d, 0x4a, 0x42, 0x35,
-    0x30, 0x41, 0x43, 0x3d, 0x3d, 0x45, 0x38, 0x36, 0x3e, 0x40, 0x3a, 0x4a,
-    0x34, 0x3d, 0x44, 0x3c, 0x39, 0x3b, 0x52, 0x38, 0x40, 0x3b, 0x3f, 0x3f,
-    0x35, 0x37, 0x46, 0x48, 0x38, 0x3b, 0x40, 0x36, 0x3d, 0x3a, 0x4f, 0x45,
-    0x35, 0x3a, 0x35, 0x33, 0x37, 0x43, 0x42, 0x52, 0x37, 0x3b, 0x3d, 0x42,
-    0x44, 0x3d, 0x48, 0x58, 0x33, 0x3f, 0x41, 0x44, 0x44, 0x3f, 0x3b, 0x52,
-    0x47, 0x39, 0x32, 0x3b, 0x38, 0x35, 0x48, 0x50, 0x34, 0x30, 0x39, 0x43,
-    0x42, 0x40, 0x3b, 0x4b, 0x43, 0x3d, 0x34, 0x44, 0x33, 0x39, 0x44, 0x4b,
-    0x45, 0x3e, 0x3c, 0x3f, 0x3a, 0x3e, 0x3c, 0x45, 0x36, 0x3e, 0x3d, 0x40,
-    0x43, 0x46, 0x37, 0x3d, 0x3b, 0x42, 0x43, 0x3f, 0x3a, 0x41, 0x48, 0x2f,
-    0x3e, 0x39, 0x3a, 0x39, 0x3f, 0x3a, 0x41, 0x40, 0x40, 0x3c, 0x3b, 0x3b,
-    0x3f, 0x40, 0x3e, 0x42, 0x38, 0x3f, 0x38, 0x3c, 0x49, 0x45, 0x3f, 0x62,
-    0x55, 0x47, 0x4c, 0x3c, 0x3c, 0x4a, 0x4c, 0x46, 0x4f, 0x39, 0x3a, 0x3b,
-    0x5e, 0x58, 0x6f, 0x2b, 0x5a, 0x2f, 0x3a, 0x35, 0x4b, 0x47, 0x4a, 0x46,
-    0x45, 0x3e, 0x38, 0x4f, 0x3b, 0x3d, 0x21, 0x4b, 0x3d, 0x40, 0x37, 0x40,
-    0x2d, 0x2c, 0x43, 0x3f, 0x2b, 0x3e, 0x3d, 0x39, 0x2f, 0x39, 0x44, 0x3c,
-    0x39, 0x39, 0x43, 0x3b, 0x3d, 0x3b, 0x44, 0x39, 0x42, 0x42, 0x3e, 0x40,
-    0x3b, 0x42, 0x53, 0x40, 0x32, 0x3d, 0x35, 0x3f, 0x3d, 0x45, 0x48, 0x46,
-    0x3d, 0x43, 0x3c, 0x36, 0x35, 0x39, 0x3d, 0x4a, 0x39, 0x39, 0x3e, 0x41,
-    0x38, 0x36, 0x3b, 0x53, 0x3c, 0x36, 0x32, 0x3b, 0x43, 0x3d, 0x42, 0x57,
-    0x35, 0x2f, 0x38, 0x40, 0x2f, 0x3d, 0x3c, 0x4c, 0x40, 0x2f, 0x3a, 0x36,
-    0x39, 0x3c, 0x3a, 0x51, 0x3d, 0x37, 0x39, 0x3c, 0x42, 0x40, 0x43, 0x52,
-    0x3e, 0x42, 0x3e, 0x45, 0x36, 0x34, 0x42, 0x4b, 0x3a, 0x38, 0x37, 0x3f,
-    0x36, 0x41, 0x3a, 0x45, 0x3e, 0x38, 0x35, 0x41, 0x35, 0x34, 0x37, 0x3c,
-    0x3f, 0x31, 0x3c, 0x35, 0x33, 0x43, 0x36, 0x28, 0x44, 0x42, 0x3e, 0x42,
-    0x3a, 0x41, 0x43, 0x35, 0x3d, 0x3f, 0x40, 0x3e, 0x3d, 0x33, 0x31, 0x41,
-    0x3d, 0x40, 0x3b, 0x40, 0x51, 0x40, 0x3f, 0xfb, 0x51, 0x49, 0x4c, 0x3d,
-    0x44, 0x4e, 0x47, 0x42, 0x50, 0x39, 0x39, 0x40, 0x59, 0x5d, 0x70, 0x2c,
-    0x59, 0x39, 0x38, 0x2f, 0x46, 0x50, 0x51, 0x47, 0x4c, 0x3c, 0x39, 0x48,
-    0x44, 0x3a, 0x1a, 0x51, 0x35, 0x3e, 0x34, 0x3a, 0x3d, 0x2b, 0x41, 0x39,
-    0x37, 0x4d, 0x3e, 0x43, 0x38, 0x3b, 0x3a, 0x35, 0x36, 0x3a, 0x43, 0x39,
-    0x39, 0x3a, 0x46, 0x3b, 0x39, 0x3c, 0x46, 0x36, 0x3e, 0x3d, 0x4b, 0x3d,
-    0x3b, 0x46, 0x3a, 0x41, 0x31, 0x3c, 0x44, 0x4a, 0x37, 0x42, 0x39, 0x43,
-    0x43, 0x3e, 0x40, 0x47, 0x3c, 0x3e, 0x3b, 0x43, 0x34, 0x3a, 0x43, 0x53,
-    0x3f, 0x37, 0x39, 0x37, 0x3e, 0x3b, 0x46, 0x59, 0x37, 0x37, 0x33, 0x3d,
-    0x38, 0x42, 0x36, 0x58, 0x2e, 0x32, 0x2b, 0x45, 0x32, 0x33, 0x36, 0x50,
-    0x41, 0x3f, 0x37, 0x3d, 0x3f, 0x3d, 0x46, 0x49, 0x41, 0x38, 0x33, 0x3d,
-    0x33, 0x32, 0x3a, 0x49, 0x41, 0x41, 0x3d, 0x33, 0x3b, 0x3b, 0x3a, 0x46,
-    0x34, 0x44, 0x3f, 0x3b, 0x2f, 0x3f, 0x32, 0x3c, 0x3f, 0x43, 0x3e, 0x45,
-    0x3a, 0x3c, 0x43, 0x26, 0x46, 0x37, 0x38, 0x3e, 0x36, 0x31, 0x3e, 0x34,
-    0x39, 0x3a, 0x38, 0x42, 0x38, 0x3e, 0x32, 0x42, 0x37, 0x37, 0x3c, 0x3a,
-    0x48, 0x44, 0x3a, 0x68, 0x56, 0x46, 0x4d, 0x47, 0x40, 0x4e, 0x42, 0x46,
-    0x51, 0x40, 0x38, 0x43, 0x58, 0x5d, 0x6a, 0x31, 0x57, 0x32, 0x3c, 0x36,
-    0x49, 0x56, 0x52, 0x48, 0x4b, 0x41, 0x2f, 0x4d, 0x31, 0x43, 0x1b, 0x4c,
-    0x30, 0x44, 0x33, 0x36, 0x2c, 0x3d, 0x45, 0x3a, 0x35, 0x46, 0x3d, 0x39,
-    0x2e, 0x38, 0x3f, 0x37, 0x41, 0x44, 0x46, 0x31, 0x33, 0x46, 0x37, 0x37,
-    0x3f, 0x41, 0x45, 0x30, 0x46, 0x3b, 0x50, 0x3b, 0x40, 0x39, 0x42, 0x43,
-    0x35, 0x37, 0x40, 0x44, 0x3b, 0x41, 0x3d, 0x37, 0x3a, 0x41, 0x3d, 0x46,
-    0x36, 0x41, 0x38, 0x41, 0x38, 0x3d, 0x45, 0x58, 0x3d, 0x3a, 0x3d, 0x44,
-    0x45, 0x38, 0x48, 0x5c, 0x3d, 0x39, 0x43, 0x45, 0x41, 0x3e, 0x4a, 0x56,
-    0x40, 0x33, 0x30, 0x31, 0x42, 0x39, 0x38, 0x56, 0x30, 0x3a, 0x35, 0x3e,
-    0x3f, 0x38, 0x36, 0x47, 0x3c, 0x3a, 0x3d, 0x3f, 0x37, 0x35, 0x3b, 0x4d,
-    0x43, 0x36, 0x39, 0x37, 0x3e, 0x42, 0x3d, 0x3f, 0x40, 0x3f, 0x34, 0x3b,
-    0x3f, 0x3e, 0x3b, 0x39, 0x3b, 0x3a, 0x3a, 0x3c, 0x34, 0x3f, 0x3c, 0x2a,
-    0x49, 0x3b, 0x36, 0x3c, 0x35, 0x46, 0x38, 0x3b, 0x3c, 0x39, 0x38, 0x42,
-    0x39, 0x36, 0x2e, 0x4a, 0x3d, 0x39, 0x3f, 0x3f, 0x4b, 0x45, 0x3e, 0x67,
-    0x4b, 0x4b, 0x49, 0x3e, 0x3f, 0x53, 0x4c, 0x55, 0x47, 0x32, 0x3b, 0x39,
-    0x54, 0x5b, 0x6f, 0x29, 0x5a, 0x34, 0x3e, 0x26, 0x45, 0x52, 0x59, 0x44,
-    0x59, 0x39, 0x3c, 0x47, 0x36, 0x46, 0x16, 0x50, 0x32, 0x46, 0x34, 0x35,
-    0x35, 0x2d, 0x39, 0x38, 0x2c, 0x42, 0x43, 0x3b, 0x32, 0x3f, 0x37, 0x2f,
-    0x34, 0x43, 0x46, 0x3b, 0x3b, 0x41, 0x3c, 0x37, 0x3e, 0x43, 0x4b, 0x36,
-    0x3e, 0x3c, 0x4c, 0x42, 0x40, 0x3f, 0x49, 0x40, 0x3c, 0x40, 0x3c, 0x48,
-    0x35, 0x42, 0x3f, 0x42, 0x44, 0x40, 0x45, 0x4f, 0x3f, 0x3f, 0x40, 0x42,
-    0x3b, 0x3d, 0x49, 0x55, 0x42, 0x39, 0x41, 0x3b, 0x3f, 0x38, 0x44, 0x60,
-    0x34, 0x40, 0x3b, 0x3b, 0x35, 0x3d, 0x41, 0x4e, 0x35, 0x33, 0x30, 0x3a,
-    0x3a, 0x32, 0x42, 0x4f, 0x33, 0x34, 0x2f, 0x38, 0x49, 0x38, 0x40, 0x4c,
-    0x35, 0x38, 0x3e, 0x46, 0x3f, 0x3a, 0x3a, 0x45, 0x3b, 0x34, 0x2e, 0x39,
-    0x32, 0x3e, 0x40, 0x48, 0x35, 0x44, 0x3a, 0x34, 0x3f, 0x35, 0x3b, 0x32,
-    0x40, 0x43, 0x3e, 0x38, 0x3b, 0x43, 0x3c, 0x2b, 0x46, 0x43, 0x40, 0x32,
-    0x42, 0x3b, 0x49, 0x2e, 0x3b, 0x3a, 0x3e, 0x41, 0x3c, 0x3f, 0x31, 0x3b,
-    0x41, 0x33, 0x41, 0x3c, 0x4d, 0x40, 0x38, 0x68, 0x4c, 0x4c, 0x4e, 0x3f,
-    0x3f, 0x54, 0x4a, 0x3d, 0x4c, 0x33, 0x3b, 0x3a, 0x5d, 0x60, 0x71, 0x2b,
-    0x59, 0x33, 0x3c, 0x2c, 0x47, 0x52, 0x4f, 0x51, 0x56, 0x3d, 0x39, 0x44,
-    0x35, 0x41, 0x1b, 0x4a, 0x35, 0x41, 0x37, 0x35, 0x2c, 0x35, 0x37, 0x35,
-    0x38, 0x41, 0x38, 0x3e, 0x3c, 0x40, 0x3c, 0x2f, 0x38, 0x3e, 0x3f, 0x45,
-    0x40, 0x3d, 0x3c, 0x35, 0x3c, 0x46, 0x43, 0x39, 0x37, 0x42, 0x4e, 0x3c,
-    0x42, 0x46, 0x37, 0x33, 0x43, 0x3f, 0x47, 0x4a, 0x3d, 0x3e, 0x40, 0x40,
-    0x40, 0x3f, 0x4b, 0x54, 0x36, 0x3f, 0x37, 0x40, 0x39, 0x39, 0x47, 0x51,
-    0x3d, 0x39, 0x36, 0x36, 0x40, 0x40, 0x41, 0x5a, 0x38, 0x39, 0x42, 0x38,
-    0x40, 0x39, 0x43, 0x50, 0x3a, 0x3a, 0x32, 0x3c, 0x3c, 0x35, 0x44, 0x4a,
-    0x37, 0x35, 0x36, 0x3c, 0x35, 0x30, 0x48, 0x4b, 0x3c, 0x33, 0x37, 0x3e,
-    0x42, 0x3c, 0x42, 0x4e, 0x41, 0x32, 0x3e, 0x33, 0x49, 0x39, 0x3e, 0x42,
-    0x3d, 0x39, 0x37, 0x36, 0x35, 0x41, 0x3e, 0x37, 0x37, 0x3e, 0x3d, 0x38,
-    0x3a, 0x3c, 0x41, 0x29, 0x3c, 0x3b, 0x39, 0x40, 0x43, 0x3d, 0x3e, 0x33,
-    0x3f, 0x3f, 0x3e, 0x43, 0x43, 0x38, 0x38, 0x41, 0x3b, 0x38, 0x35, 0x3a,
-    0x4b, 0x44, 0x44, 0x55, 0x4e, 0x44, 0x4d, 0x49, 0x3e, 0x53, 0x45, 0x3f,
-    0x45, 0x3d, 0x36, 0x36, 0x4f, 0x5b, 0x6b, 0x28, 0x59, 0x34, 0x39, 0x34,
-    0x4f, 0x4d, 0x52, 0x3e, 0x51, 0x34, 0x35, 0x4a, 0x3b, 0x3f, 0x21, 0x45,
-    0x36, 0x3f, 0x38, 0x33, 0x2c, 0x37, 0x32, 0x2f, 0x2b, 0x44, 0x47, 0x3f,
-    0x38, 0x3a, 0x3f, 0x2e, 0x41, 0x3f, 0x3d, 0x41, 0x35, 0x48, 0x43, 0x40,
-    0x33, 0x44, 0x40, 0x38, 0x47, 0x44, 0x4c, 0x3d, 0x41, 0x3b, 0x39, 0x36,
-    0x3e, 0x44, 0x49, 0x48, 0x3c, 0x3b, 0x34, 0x34, 0x3f, 0x3c, 0x42, 0x52,
-    0x43, 0x41, 0x3c, 0x3c, 0x3d, 0x43, 0x48, 0x54, 0x39, 0x35, 0x39, 0x3c,
-    0x43, 0x3c, 0x44, 0x5f, 0x39, 0x3d, 0x38, 0x3f, 0x36, 0x3d, 0x43, 0x58,
-    0x33, 0x3d, 0x43, 0x33, 0x3f, 0x36, 0x39, 0x54, 0x3a, 0x37, 0x2d, 0x46,
-    0x43, 0x41, 0x47, 0x46, 0x3e, 0x42, 0x34, 0x49, 0x3a, 0x3f, 0x38, 0x50,
-    0x3a, 0x3b, 0x42, 0x3a, 0x3e, 0x3c, 0x3b, 0x40, 0x42, 0x45, 0x37, 0x3b,
-    0x2f, 0x3b, 0x46, 0x30, 0x42, 0x3b, 0x3b, 0x44, 0x3b, 0x3e, 0x40, 0x1e,
-    0x33, 0x40, 0x40, 0x3d, 0x39, 0x3a, 0x41, 0x33, 0x45, 0x3e, 0x3c, 0x3f,
-    0x3f, 0x38, 0x31, 0x46, 0x3b, 0x35, 0x42, 0x39, 0x49, 0x3e, 0x3d, 0x66,
-    0x53, 0x3f, 0x44, 0x40, 0x43, 0x45, 0x48, 0x45, 0x49, 0x2d, 0x3e, 0x3a,
-    0x4f, 0x5a, 0x62, 0x27, 0x54, 0x37, 0x35, 0x34, 0x42, 0x50, 0x54, 0x43,
-    0x4d, 0x38, 0x39, 0x48, 0x38, 0x4c, 0x21, 0x3f, 0x40, 0x3a, 0x3a, 0x2f,
-    0x37, 0x2f, 0x29, 0x2c, 0x36, 0x47, 0x3f, 0x41, 0x31, 0x33, 0x3e, 0x32,
-    0x3e, 0x40, 0x42, 0x40, 0x42, 0x3a, 0x46, 0x33, 0x44, 0x40, 0x3c, 0x43,
-    0x3d, 0x41, 0x4d, 0x3d, 0x3c, 0x47, 0x46, 0x43, 0x42, 0x3e, 0x44, 0x4e,
-    0x41, 0x3a, 0x44, 0x38, 0x45, 0x3b, 0x49, 0x4c, 0x40, 0x3f, 0x37, 0x3e,
-    0x3e, 0x46, 0x41, 0x51, 0x3f, 0x39, 0x30, 0x40, 0x3e, 0x38, 0x43, 0x5b,
-    0x33, 0x3e, 0x31, 0x42, 0x3d, 0x2f, 0x49, 0x57, 0x37, 0x31, 0x46, 0x44,
-    0x3e, 0x35, 0x40, 0x55, 0x36, 0x35, 0x3d, 0x3c, 0x38, 0x33, 0x42, 0x52,
-    0x3b, 0x39, 0x34, 0x31, 0x45, 0x34, 0x3c, 0x51, 0x33, 0x39, 0x3c, 0x40,
-    0x36, 0x36, 0x42, 0x3e, 0x37, 0x3e, 0x3b, 0x40, 0x3d, 0x36, 0x41, 0x30,
-    0x42, 0x45, 0x40, 0x49, 0x3d, 0x32, 0x46, 0x26, 0x40, 0x44, 0x3a, 0x3f,
-    0x3d, 0x46, 0x45, 0x31, 0x33, 0x34, 0x3e, 0x37, 0x46, 0x3b, 0x32, 0x3a,
-    0x3d, 0x31, 0x3c, 0x36, 0x50, 0x41, 0x3b, 0x5d, 0x53, 0x42, 0x44, 0x37,
-    0x3e, 0x4d, 0x41, 0x4b, 0x49, 0x2f, 0x35, 0x3a, 0x4e, 0x59, 0x5d, 0x27,
-    0x5c, 0x30, 0x3d, 0x3a, 0x46, 0x50, 0x57, 0x4a, 0x4c, 0x36, 0x37, 0x46,
-    0x48, 0x41, 0x24, 0x49, 0x36, 0x3e, 0x41, 0x45, 0x37, 0x38, 0x2e, 0x2e,
-    0x34, 0x3c, 0x38, 0x41, 0x36, 0x3d, 0x43, 0x36, 0x3e, 0x3e, 0x41, 0x3b,
-    0x42, 0x3c, 0x43, 0x38, 0x3e, 0x3d, 0x41, 0x48, 0x47, 0x4c, 0x45, 0x3b,
-    0x37, 0x41, 0x38, 0x41, 0x3d, 0x41, 0x46, 0x4e, 0x36, 0x45, 0x38, 0x39,
-    0x42, 0x42, 0x37, 0x4c, 0x34, 0x46, 0x3c, 0x44, 0x4a, 0x39, 0x45, 0x53,
-    0x3c, 0x3f, 0x41, 0x35, 0x3c, 0x45, 0x4c, 0x5a, 0x44, 0x41, 0x30, 0x35,
-    0x40, 0x39, 0x42, 0x5a, 0x36, 0x36, 0x3a, 0x3b, 0x43, 0x35, 0x3c, 0x56,
-    0x35, 0x38, 0x2b, 0x4a, 0x3c, 0x40, 0x45, 0x54, 0x37, 0x37, 0x3a, 0x44,
-    0x42, 0x3b, 0x3d, 0x4a, 0x3f, 0x37, 0x3b, 0x35, 0x34, 0x3f, 0x40, 0x48,
-    0x45, 0x3e, 0x37, 0x38, 0x41, 0x41, 0x3d, 0x37, 0x43, 0x3d, 0x3d, 0x45,
-    0x3a, 0x38, 0x3f, 0x23, 0x4a, 0x37, 0x42, 0x3c, 0x3f, 0x43, 0x42, 0x33,
-    0x37, 0x39, 0x35, 0x3b, 0x41, 0x36, 0x2f, 0x3b, 0x41, 0x3a, 0x44, 0x3d,
-    0x3e, 0x45, 0x44, 0x50, 0x47, 0x47, 0x48, 0x3c, 0x3f, 0x45, 0x43, 0x3f,
-    0x4a, 0x33, 0x3c, 0x3a, 0x52, 0x52, 0x5a, 0x23, 0x58, 0x31, 0x3b, 0x3b,
-    0x47, 0x44, 0x54, 0x45, 0x42, 0x38, 0x38, 0x40, 0x43, 0x3f, 0x2a, 0x46,
-    0x3b, 0x46, 0x3b, 0x46, 0x35, 0x37, 0x29, 0x35, 0x38, 0x41, 0x3a, 0x31,
-    0x44, 0x41, 0x39, 0x36, 0x45, 0x41, 0x40, 0x3e, 0x40, 0x44, 0x47, 0x37,
-    0x3f, 0x42, 0x49, 0x34, 0x46, 0x3d, 0x4b, 0x3d, 0x42, 0x3b, 0x42, 0x3e,
-    0x41, 0x3b, 0x3f, 0x43, 0x47, 0x45, 0x47, 0x41, 0x40, 0x3a, 0x3d, 0x45,
-    0x40, 0x36, 0x3b, 0x3b, 0x44, 0x37, 0x46, 0x55, 0x35, 0x42, 0x3f, 0x3a,
-    0x41, 0x41, 0x44, 0x5c, 0x31, 0x44, 0x3d, 0x46, 0x39, 0x38, 0x46, 0x59,
-    0x41, 0x3b, 0x3d, 0x39, 0x33, 0x3e, 0x41, 0x58, 0x33, 0x44, 0x34, 0x31,
-    0x48, 0x3e, 0x4d, 0x56, 0x36, 0x3c, 0x37, 0x46, 0x46, 0x38, 0x45, 0x53,
-    0x35, 0x3d, 0x3a, 0x31, 0x42, 0x48, 0x45, 0x44, 0x3b, 0x3b, 0x3c, 0x41,
-    0x3d, 0x42, 0x3f, 0x2f, 0x38, 0x3c, 0x3e, 0x41, 0x44, 0x3a, 0x4a, 0x24,
-    0x37, 0x3e, 0x37, 0x48, 0x40, 0x3f, 0x46, 0x3c, 0x47, 0x4a, 0x38, 0x47,
-    0x34, 0x45, 0x31, 0x42, 0x43, 0x44, 0x3f, 0x3f, 0x49, 0x40, 0x3c, 0x41,
-    0x4d, 0x43, 0x42, 0x39, 0x39, 0x48, 0x41, 0x38, 0x47, 0x3c, 0x3c, 0x42,
-    0x44, 0x55, 0x62, 0x2a, 0x5c, 0x32, 0x3a, 0x37, 0x4c, 0x44, 0x4f, 0x3e,
-    0x4e, 0x42, 0x3a, 0x42, 0x41, 0x4a, 0x35, 0x44, 0x45, 0x3b, 0x43, 0x41,
-    0x33, 0x38, 0x28, 0x36, 0x40, 0x47, 0x3e, 0x3e, 0x3e, 0x39, 0x3a, 0x37,
-    0x44, 0x44, 0x3f, 0x3b, 0x41, 0x3c, 0x45, 0x36, 0x38, 0x3a, 0x3c, 0x42,
-    0x42, 0x3f, 0x59, 0x3c, 0x47, 0x3d, 0x38, 0x3a, 0x42, 0x44, 0x41, 0x46,
-    0x3f, 0x43, 0x48, 0x42, 0x44, 0x35, 0x3f, 0x45, 0x36, 0x3f, 0x38, 0x3a,
-    0x44, 0x3d, 0x3d, 0x4e, 0x3e, 0x45, 0x40, 0x42, 0x3c, 0x33, 0x43, 0x5a,
-    0x38, 0x3e, 0x45, 0x3a, 0x3e, 0x42, 0x45, 0x52, 0x3c, 0x42, 0x3a, 0x38,
-    0x3d, 0x3b, 0x4a, 0x57, 0x38, 0x37, 0x47, 0x44, 0x3e, 0x3c, 0x38, 0x48,
-    0x36, 0x41, 0x3f, 0x41, 0x3a, 0x3a, 0x46, 0x47, 0x42, 0x40, 0x32, 0x33,
-    0x43, 0x37, 0x41, 0x43, 0x3e, 0x40, 0x3d, 0x3a, 0x3e, 0x38, 0x42, 0x30,
-    0x3e, 0x40, 0x46, 0x42, 0x40, 0x44, 0x42, 0x23, 0x31, 0x40, 0x3f, 0x3d,
-    0x3b, 0x33, 0x40, 0x33, 0x41, 0x33, 0x43, 0x41, 0x3a, 0x3e, 0x36, 0x40,
-    0x40, 0x45, 0x37, 0x42, 0x46, 0x42, 0x39, 0x48, 0x44, 0x40, 0x40, 0x45,
-    0x3c, 0x49, 0x41, 0x3f, 0x4c, 0x3d, 0x2f, 0x3f, 0x47, 0x52, 0x54, 0x2c,
-    0x55, 0x42, 0x44, 0x3b, 0x46, 0x4f, 0x48, 0x3c, 0x45, 0x39, 0x3f, 0x4b,
-    0x3f, 0x3f, 0x36, 0x42, 0x41, 0x48, 0x44, 0x44, 0x36, 0x3b, 0x37, 0x40,
-    0x39, 0x49, 0x3a, 0x35, 0x3e, 0x48, 0x31, 0x30, 0x44, 0x38, 0x4c, 0x3c,
-    0x41, 0x3e, 0x46, 0x32, 0x44, 0x3b, 0x42, 0x3c, 0x38, 0x3a, 0x47, 0x3f,
-    0x3a, 0x42, 0x3a, 0x43, 0x40, 0x4b, 0x47, 0x3c, 0x42, 0x46, 0x45, 0x42,
-    0x3c, 0x46, 0x3d, 0x3f, 0x3e, 0x36, 0x38, 0x3e, 0x46, 0x3c, 0x4d, 0x43,
-    0x49, 0x41, 0x48, 0x3c, 0x3d, 0x39, 0x43, 0x58, 0x3a, 0x41, 0x3f, 0x38,
-    0x37, 0x3f, 0x46, 0x5d, 0x3c, 0x3c, 0x39, 0x36, 0x3d, 0x46, 0x43, 0x50,
-    0x3a, 0x47, 0x39, 0x36, 0x41, 0x3f, 0x3e, 0x51, 0x31, 0x36, 0x3e, 0x3c,
-    0x3c, 0x3a, 0x48, 0x41, 0x3a, 0x43, 0x49, 0x3e, 0x42, 0x46, 0x3f, 0x41,
-    0x49, 0x33, 0x42, 0x41, 0x45, 0x40, 0x3d, 0x2b, 0x3d, 0x38, 0x40, 0x37,
-    0x3a, 0x31, 0x45, 0x26, 0x33, 0x3d, 0x3f, 0x39, 0x36, 0x3c, 0x38, 0x33,
-    0x34, 0x3f, 0x35, 0x44, 0x3a, 0x39, 0x32, 0x41, 0x35, 0x40, 0x3c, 0x3b,
-    0x4a, 0x3f, 0x3e, 0x3e, 0x4a, 0x3e, 0x42, 0x35, 0x38, 0x43, 0x3c, 0x37,
-    0x3d, 0x3c, 0x39, 0x43, 0x3f, 0x4e, 0x54, 0x33, 0x4b, 0x37, 0x43, 0x3b,
-    0x43, 0x48, 0x43, 0x42, 0x3d, 0x46, 0x45, 0x49, 0x3a, 0x39, 0x36, 0x4a,
-    0x48, 0x48, 0x37, 0x4b, 0x42, 0x47, 0x34, 0x34, 0x43, 0x42, 0x3a, 0x3d,
-    0x3c, 0x46, 0x34, 0x39, 0x40, 0x3b, 0x3e, 0x3e, 0x37, 0x3d, 0x53, 0x3b,
-    0x48, 0x3c, 0x43, 0x44, 0x3b, 0x3f, 0x57, 0x3d, 0x39, 0x3c, 0x39, 0x3a,
-    0x3e, 0x3f, 0x43, 0x3e, 0x41, 0x47, 0x3c, 0x41, 0x40, 0x41, 0x37, 0x3f,
-    0x3b, 0x43, 0x35, 0x3e, 0x45, 0x40, 0x47, 0x59, 0x41, 0x49, 0x3b, 0x3f,
-    0x47, 0x49, 0x4b, 0x61, 0x39, 0x48, 0x39, 0x3e, 0x44, 0x34, 0x3b, 0x59,
-    0x3c, 0x42, 0x45, 0x35, 0x42, 0x41, 0x39, 0x52, 0x42, 0x3c, 0x3d, 0x3e,
-    0x3d, 0x4a, 0x4a, 0x4d, 0x3c, 0x34, 0x44, 0x3c, 0x41, 0x34, 0x38, 0x46,
-    0x38, 0x45, 0x40, 0x45, 0x40, 0x3a, 0x3d, 0x44, 0x3a, 0x37, 0x3a, 0x3a,
-    0x3b, 0x42, 0x40, 0x34, 0x3b, 0x3c, 0x42, 0x40, 0x3d, 0x32, 0x40, 0x27,
-    0x37, 0x39, 0x37, 0x46, 0x48, 0x31, 0x40, 0x30, 0x42, 0x42, 0x3a, 0x40,
-    0x3d, 0x37, 0x2a, 0x40, 0x41, 0x37, 0x3c, 0x4a, 0x46, 0x45, 0x3d, 0x34,
-    0x48, 0x41, 0x42, 0x3e, 0x3f, 0x39, 0x3c, 0x3a, 0x4f, 0x3b, 0x32, 0x3e,
-    0x43, 0x51, 0x4f, 0x2a, 0x46, 0x3a, 0x3d, 0x3b, 0x40, 0x3d, 0x4c, 0x3c,
-    0x48, 0x40, 0x36, 0x4a, 0x3a, 0x38, 0x42, 0x43, 0x4c, 0x3d, 0x47, 0x47,
-    0x33, 0x3f, 0x2d, 0x37, 0x4a, 0x43, 0x38, 0x3e, 0x49, 0x42, 0x42, 0x3d,
-    0x43, 0x47, 0x41, 0x38, 0x46, 0x37, 0x46, 0x38, 0x47, 0x42, 0x49, 0x3d,
-    0x3b, 0x37, 0x4c, 0x3c, 0x3a, 0x45, 0x3f, 0x37, 0x36, 0x3d, 0x3c, 0x40,
-    0x3e, 0x45, 0x46, 0x41, 0x41, 0x40, 0x3c, 0x44, 0x47, 0x43, 0x37, 0x3f,
-    0x3e, 0x3a, 0x3a, 0x4b, 0x3a, 0x36, 0x3d, 0x3f, 0x38, 0x3f, 0x3c, 0x58,
-    0x40, 0x49, 0x3d, 0x42, 0x38, 0x3a, 0x47, 0x50, 0x3b, 0x49, 0x40, 0x44,
-    0x3e, 0x3c, 0x38, 0x52, 0x3a, 0x3e, 0x44, 0x3c, 0x35, 0x44, 0x3a, 0x47,
-    0x3e, 0x49, 0x3f, 0x47, 0x45, 0x39, 0x3b, 0x46, 0x44, 0x3e, 0x41, 0x46,
-    0x40, 0x41, 0x40, 0x40, 0x3a, 0x35, 0x3e, 0x36, 0x3e, 0x3e, 0x3d, 0x35,
-    0x3b, 0x3c, 0x38, 0x46, 0x3b, 0x3c, 0x41, 0x2c, 0x3f, 0x42, 0x38, 0x3b,
-    0x36, 0x3b, 0x39, 0x40, 0x40, 0x38, 0x36, 0x33, 0x34, 0x42, 0x2f, 0x44,
-    0x41, 0x40, 0x39, 0x35, 0x3b, 0x44, 0x42, 0x2c, 0x41, 0x3b, 0x44, 0x41,
-    0x35, 0x44, 0x3b, 0x34, 0x44, 0x49, 0x36, 0x39, 0x3a, 0x52, 0x4d, 0x2b,
-    0x40, 0x40, 0x3e, 0x39, 0x48, 0x42, 0x3c, 0x44, 0x46, 0x49, 0x3f, 0x54,
-    0x43, 0x40, 0x2e, 0x40, 0x4f, 0x36, 0x3e, 0x3f, 0x38, 0x48, 0x44, 0x3c,
-    0x44, 0x43, 0x41, 0x47, 0x40, 0x46, 0x40, 0x37, 0x41, 0x34, 0x3a, 0x41,
-    0x41, 0x3b, 0x49, 0x39, 0x42, 0x38, 0x3d, 0x39, 0x34, 0x35, 0x43, 0x36,
-    0x3e, 0x44, 0x3f, 0x40, 0x43, 0x40, 0x40, 0x3a, 0x47, 0x42, 0x3e, 0x42,
-    0x46, 0x35, 0x3a, 0x46, 0x3c, 0x3c, 0x3c, 0x3d, 0x3f, 0x40, 0x43, 0x4c,
-    0x3a, 0x37, 0x3f, 0x43, 0x47, 0x38, 0x42, 0x58, 0x42, 0x3b, 0x34, 0x37,
-    0x3e, 0x48, 0x3c, 0x57, 0x44, 0x3c, 0x3d, 0x3a, 0x36, 0x48, 0x3c, 0x51,
-    0x3d, 0x48, 0x45, 0x45, 0x38, 0x45, 0x40, 0x3f, 0x3b, 0x35, 0x3d, 0x3f,
-    0x38, 0x47, 0x39, 0x3b, 0x36, 0x49, 0x43, 0x40, 0x3f, 0x46, 0x38, 0x40,
-    0x3f, 0x3e, 0x39, 0x32, 0x47, 0x42, 0x35, 0x33, 0x39, 0x47, 0x3c, 0x36,
-    0x3b, 0x37, 0x43, 0x35, 0x3b, 0x3b, 0x34, 0x3b, 0x38, 0x3d, 0x3e, 0x3a,
-    0x35, 0x49, 0x38, 0x40, 0x3f, 0x3f, 0x3e, 0x37, 0x43, 0x3b, 0x3e, 0x3e,
-    0x3b, 0x40, 0x44, 0x39, 0x3d, 0x3f, 0x31, 0x42, 0x42, 0x3b, 0x41, 0x3d,
-    0x3e, 0x3c, 0x37, 0x34, 0x48, 0x3d, 0x49, 0x4a, 0x47, 0x36, 0x3a, 0x34,
-    0x37, 0x36, 0x3e, 0x38, 0x33, 0x45, 0x39, 0x44, 0x34, 0x49, 0x3a, 0x3d,
-    0x34, 0x31, 0x31, 0x3d, 0x34, 0x3d, 0x41, 0x3e, 0x49, 0x41, 0x34, 0x3f,
-    0x3a, 0x42, 0x3e, 0x40, 0x3f, 0x33, 0x46, 0x3f, 0x34, 0x39, 0x37, 0x46,
-    0x3e, 0x32, 0x3f, 0x45, 0x45, 0x41, 0x3b, 0x4b, 0x35, 0x35, 0x3b, 0x4a,
-    0x3d, 0x43, 0x3b, 0x44, 0x3c, 0x38, 0x31, 0x43, 0x39, 0x35, 0x41, 0x45,
-    0x37, 0x3e, 0x43, 0x47, 0x39, 0x40, 0x41, 0x41, 0x40, 0x32, 0x37, 0x3e,
-    0x3d, 0x39, 0x3b, 0x49, 0x33, 0x35, 0x38, 0x41, 0x45, 0x37, 0x3c, 0x49,
-    0x3b, 0x34, 0x34, 0x41, 0x3a, 0x3f, 0x3e, 0x47, 0x39, 0x3c, 0x34, 0x3a,
-    0x38, 0x44, 0x40, 0x51, 0x3a, 0x37, 0x3b, 0x3f, 0x3d, 0x3a, 0x45, 0x48,
-    0x3f, 0x46, 0x35, 0x43, 0x38, 0x43, 0x35, 0x4c, 0x42, 0x47, 0x44, 0x3d,
-    0x40, 0x3a, 0x39, 0x4e, 0x3d, 0x37, 0x3c, 0x42, 0x40, 0x48, 0x44, 0x4c,
-    0x31, 0x40, 0x42, 0x3b, 0x45, 0x45, 0x3f, 0x3e, 0x3d, 0x44, 0x3f, 0x31,
-    0x3f, 0x44, 0x45, 0x37, 0x3e, 0x3d, 0x35, 0x3b, 0x2d, 0x44, 0x4a, 0x3a,
-    0x2b, 0x37, 0x38, 0x46, 0x41, 0x39, 0x3c, 0x3c, 0x46, 0x33, 0x36, 0x3c,
-    0x4b, 0x34, 0x49, 0x50, 0x30, 0x3c, 0x33, 0x41, 0x44, 0x33, 0x43, 0x39,
-    0x36, 0x45, 0x33, 0x3b, 0x3d, 0x36, 0x47, 0x30, 0x42, 0x37, 0x49, 0x3e,
-    0x3b, 0x49, 0x3d, 0x3b, 0x3a, 0x41, 0x38, 0x44, 0x42, 0x3b, 0x3f, 0x40,
-    0x46, 0x35, 0x38, 0x3c, 0x48, 0x3a, 0x46, 0x41, 0x36, 0x36, 0x41, 0x3e,
-    0x43, 0x3e, 0x32, 0x39, 0x3a, 0x41, 0x30, 0x3e, 0x40, 0x3e, 0x36, 0x3a,
-    0x45, 0x45, 0x3a, 0x3c, 0x31, 0x3b, 0x47, 0x3f, 0x36, 0x3a, 0x3c, 0x41,
-    0x3b, 0x41, 0x39, 0x46, 0x3f, 0x3c, 0x34, 0x3e, 0x41, 0x45, 0x41, 0x42,
-    0x39, 0x40, 0x40, 0x44, 0x45, 0x42, 0x34, 0x3f, 0x3e, 0x31, 0x3b, 0x41,
-    0x33, 0x43, 0x37, 0x44, 0x44, 0x3a, 0x36, 0x36, 0x48, 0x3c, 0x37, 0x47,
-    0x39, 0x3e, 0x3e, 0x3c, 0x3c, 0x41, 0x3c, 0x44, 0x3b, 0x42, 0x3f, 0x3a,
-    0x43, 0x3b, 0x3e, 0x48, 0x36, 0x3f, 0x3d, 0x34, 0x40, 0x43, 0x35, 0x4f,
-    0x34, 0x39, 0x3b, 0x41, 0x40, 0x39, 0x37, 0x4c, 0x39, 0x36, 0x39, 0x39,
-    0x47, 0x41, 0x43, 0x3f, 0x3f, 0x33, 0x42, 0x3f, 0x42, 0x40, 0x37, 0x40,
-    0x3f, 0x34, 0x45, 0x3d, 0x2d, 0x3c, 0x44, 0x3b, 0x43, 0x37, 0x26, 0x50,
-    0x43, 0x44, 0x3d, 0x43, 0x42, 0x2d, 0x3c, 0x33, 0x4a, 0x32, 0x4a, 0x53,
-    0x33, 0x38, 0x27, 0x36, 0x42, 0x30, 0x47, 0x3d, 0x36, 0x45, 0x46, 0x36,
-    0x3b, 0x3b, 0x40, 0x33, 0x37, 0x36, 0x44, 0x46, 0x3d, 0x35, 0x40, 0x38,
-    0x3b, 0x40, 0x36, 0x3c, 0x3d, 0x37, 0x31, 0x41, 0x33, 0x3c, 0x38, 0x3f,
-    0x43, 0x3a, 0x40, 0x49, 0x38, 0x39, 0x38, 0x3d, 0x43, 0x3d, 0x39, 0x3b,
-    0x3d, 0x3f, 0x38, 0x42, 0x34, 0x43, 0x33, 0x3e, 0x43, 0x3e, 0x40, 0x42,
-    0x3b, 0x45, 0x37, 0x44, 0x43, 0x39, 0x3c, 0x3d, 0x37, 0x44, 0x3a, 0x3b,
-    0x47, 0x3f, 0x3a, 0x3c, 0x3a, 0x3b, 0x3f, 0x43, 0x3e, 0x3d, 0x46, 0x3e,
-    0x37, 0x36, 0x3f, 0x40, 0x42, 0x42, 0x37, 0x36, 0x48, 0x35, 0x44, 0x44,
-    0x39, 0x3c, 0x3b, 0x41, 0x44, 0x49, 0x3a, 0x40, 0x41, 0x36, 0x33, 0x3a,
-    0x3c, 0x3d, 0x40, 0x3f, 0x43, 0x36, 0x3c, 0x3a, 0x3f, 0x4b, 0x32, 0x49,
-    0x49, 0x3e, 0x3a, 0x3e, 0x3f, 0x41, 0x3c, 0x47, 0x40, 0x41, 0x45, 0x3e,
-    0x47, 0x47, 0x3f, 0x47, 0x45, 0x3e, 0x31, 0x43, 0x4a, 0x44, 0x36, 0x40,
-    0x41, 0x47, 0x3e, 0x42, 0x37, 0x40, 0x3b, 0x46, 0x37, 0x41, 0x3e, 0x3c,
-    0x27, 0x40, 0x49, 0x42, 0x42, 0x39, 0x30, 0x49, 0x43, 0x38, 0x3d, 0x42,
-    0x43, 0x2f, 0x3b, 0x37, 0x4b, 0x2d, 0x4f, 0x52, 0x30, 0x31, 0x2f, 0x3a,
-    0x49, 0x38, 0x4f, 0x45, 0x2e, 0x47, 0x3a, 0x32, 0x33, 0x3f, 0x4a, 0x2e,
-    0x33, 0x3b, 0x3e, 0x3e, 0x49, 0x45, 0x44, 0x38, 0x3c, 0x35, 0x45, 0x47,
-    0x41, 0x3b, 0x3c, 0x48, 0x46, 0x39, 0x39, 0x3b, 0x3f, 0x41, 0x38, 0x42,
-    0x3d, 0x46, 0x33, 0x41, 0x36, 0x3f, 0x3f, 0x3c, 0x33, 0x3e, 0x3e, 0x40,
-    0x44, 0x40, 0x3c, 0x38, 0x46, 0x3a, 0x40, 0x36, 0x42, 0x35, 0x3f, 0x3b,
-    0x3b, 0x43, 0x3c, 0x40, 0x40, 0x49, 0x2e, 0x39, 0x40, 0x3f, 0x45, 0x41,
-    0x3f, 0x30, 0x42, 0x3d, 0x40, 0x3c, 0x3a, 0x3b, 0x3b, 0x40, 0x39, 0x42,
-    0x3a, 0x3f, 0x3f, 0x3e, 0x35, 0x3b, 0x38, 0x45, 0x47, 0x35, 0x44, 0x3e,
-    0x3b, 0x3f, 0x3f, 0x40, 0x3a, 0x35, 0x30, 0x49, 0x45, 0x35, 0x3b, 0x39,
-    0x3b, 0x48, 0x3f, 0x37, 0x39, 0x40, 0x43, 0x45, 0x3d, 0x40, 0x41, 0x3a,
-    0x33, 0x3d, 0x3a, 0x4b, 0x40, 0x42, 0x40, 0x42, 0x43, 0x39, 0x3c, 0x49,
-    0x3e, 0x47, 0x3e, 0x44, 0x3f, 0x3a, 0x40, 0x41, 0x3f, 0x42, 0x42, 0x37,
-    0x3e, 0x3b, 0x36, 0x3e, 0x3b, 0x3c, 0x48, 0x43, 0x2d, 0x46, 0x4a, 0x38,
-    0x45, 0x3a, 0x29, 0x46, 0x40, 0x3c, 0x40, 0x44, 0x40, 0x33, 0x2f, 0x33,
-    0x48, 0x2e, 0x51, 0x4f, 0x2b, 0x32, 0x2e, 0x2d, 0x45, 0x33, 0x4d, 0x41,
-    0x29, 0x4b, 0x41, 0x39, 0x2f, 0x3a, 0x49, 0x31, 0x37, 0x40, 0x47, 0x4c,
-    0x3e, 0x31, 0x41, 0x3f, 0x43, 0x37, 0x45, 0x4f, 0x41, 0x3c, 0x30, 0x4a,
-    0x37, 0x37, 0x36, 0x39, 0x31, 0x3d, 0x36, 0x4b, 0x37, 0x44, 0x3c, 0x43,
-    0x44, 0x36, 0x3f, 0x3b, 0x34, 0x3e, 0x3a, 0x35, 0x38, 0x3f, 0x33, 0x37,
-    0x3b, 0x3d, 0x46, 0x38, 0x3b, 0x37, 0x38, 0x3b, 0x31, 0x3e, 0x3d, 0x3b,
-    0x3d, 0x39, 0x35, 0x33, 0x33, 0x3c, 0x39, 0x39, 0x48, 0x39, 0x39, 0x3f,
-    0x3e, 0x36, 0x47, 0x3a, 0x44, 0x40, 0x32, 0x3c, 0x37, 0x35, 0x40, 0x3f,
-    0x3a, 0x38, 0x3b, 0x3d, 0x46, 0x45, 0x36, 0x43, 0x40, 0x3d, 0x41, 0x41,
-    0x47, 0x3a, 0x3d, 0x3e, 0x43, 0x42, 0x32, 0x36, 0x41, 0x37, 0x3b, 0x35,
-    0x36, 0x44, 0x36, 0x3c, 0x43, 0x32, 0x3e, 0x3e, 0x42, 0x45, 0x32, 0x3c,
-    0x3a, 0x3b, 0x35, 0x43, 0x41, 0x3d, 0x44, 0x50, 0x43, 0x31, 0x3e, 0x44,
-    0x44, 0x41, 0x3a, 0x44, 0x36, 0x39, 0x3b, 0x3c, 0x32, 0x38, 0x3b, 0x45,
-    0x38, 0x43, 0x40, 0x42, 0x33, 0x3e, 0x4a, 0x42, 0x45, 0x39, 0x2f, 0x42,
-    0x39, 0x35, 0x44, 0x3e, 0x39, 0x2f, 0x34, 0x33, 0x49, 0x29, 0x50, 0x4f,
-    0x2b, 0x36, 0x34, 0x2d, 0x47, 0x33, 0x49, 0x3c, 0x33, 0x51, 0x49, 0x3f,
-    0x34, 0x39, 0x4a, 0x2c, 0x34, 0x45, 0x4f, 0x47, 0x34, 0x42, 0x3a, 0x3d,
-    0x36, 0x4a, 0x3b, 0x43, 0x36, 0x3f, 0x39, 0x4b, 0x38, 0x3a, 0x31, 0x3d,
-    0x32, 0x42, 0x3a, 0x47, 0x48, 0x3e, 0x44, 0x3f, 0x39, 0x3e, 0x44, 0x35,
-    0x41, 0x3c, 0x45, 0x3a, 0x3e, 0x3b, 0x3d, 0x2f, 0x37, 0x40, 0x3e, 0x43,
-    0x39, 0x39, 0x33, 0x3b, 0x37, 0x3b, 0x37, 0x37, 0x37, 0x39, 0x36, 0x31,
-    0x39, 0x3b, 0x41, 0x39, 0x3b, 0x40, 0x36, 0x37, 0x42, 0x39, 0x3a, 0x46,
-    0x3f, 0x30, 0x38, 0x39, 0x35, 0x32, 0x3e, 0x3a, 0x43, 0x43, 0x3e, 0x33,
-    0x42, 0x3f, 0x41, 0x3c, 0x46, 0x34, 0x34, 0x40, 0x43, 0x37, 0x32, 0x43,
-    0x3c, 0x37, 0x36, 0x33, 0x3d, 0x36, 0x3a, 0x40, 0x39, 0x38, 0x32, 0x3e,
-    0x32, 0x3d, 0x37, 0x49, 0x42, 0x47, 0x41, 0x3b, 0x3d, 0x3c, 0x3a, 0x37,
-    0x3c, 0x45, 0x3a, 0x45, 0x36, 0x44, 0x3a, 0x3a, 0x3a, 0x3c, 0x43, 0x3b,
-    0x3b, 0x35, 0x38, 0x47, 0x36, 0x40, 0x32, 0x43, 0x3e, 0x39, 0x42, 0x40,
-    0x2c, 0x3c, 0x4c, 0x4c, 0x43, 0x3b, 0x37, 0x4a, 0x3f, 0x3c, 0x45, 0x44,
-    0x3f, 0x30, 0x36, 0x31, 0x4f, 0x2f, 0x5d, 0x4b, 0x34, 0x34, 0x2d, 0x2b,
-    0x44, 0x31, 0x4e, 0x40, 0x2e, 0x4d, 0x48, 0x3e, 0x37, 0x2b, 0x49, 0x25,
-    0x31, 0x49, 0x44, 0x49, 0x39, 0x39, 0x4b, 0x3a, 0x3a, 0x41, 0x3e, 0x42,
-    0x3c, 0x36, 0x36, 0x4a, 0x32, 0x44, 0x3e, 0x48, 0x3e, 0x3c, 0x37, 0x49,
-    0x3d, 0x34, 0x3f, 0x37, 0x33, 0x36, 0x46, 0x3a, 0x3a, 0x31, 0x45, 0x3f,
-    0x3a, 0x31, 0x3b, 0x33, 0x41, 0x42, 0x35, 0x39, 0x38, 0x44, 0x36, 0x3a,
-    0x3f, 0x3b, 0x37, 0x3e, 0x3b, 0x38, 0x2f, 0x32, 0x44, 0x3d, 0x44, 0x41,
-    0x39, 0x36, 0x3a, 0x34, 0x39, 0x38, 0x34, 0x3f, 0x3b, 0x37, 0x34, 0x34,
-    0x40, 0x3d, 0x34, 0x3a, 0x46, 0x42, 0x3f, 0x34, 0x38, 0x33, 0x39, 0x44,
-    0x3f, 0x41, 0x3c, 0x31, 0x40, 0x32, 0x3f, 0x37, 0x37, 0x41, 0x3e, 0x35,
-    0x37, 0x48, 0x3b, 0x41, 0x3d, 0x3a, 0x3f, 0x35, 0x33, 0x3c, 0x36, 0x3b,
-    0x3a, 0x48, 0x33, 0x42, 0x37, 0x33, 0x39, 0x41, 0x3c, 0x3d, 0x3b, 0x4d,
-    0x39, 0x3a, 0x3e, 0x44, 0x3d, 0x41, 0x3b, 0x38, 0x49, 0x41, 0x3a, 0x38,
-    0x34, 0x38, 0x38, 0x3c, 0x45, 0x3c, 0x37, 0x3b, 0x36, 0x3e, 0x4a, 0x4b,
-    0x42, 0x3f, 0x32, 0x45, 0x46, 0x35, 0x46, 0x41, 0x38, 0x33, 0x39, 0x37,
-    0x44, 0x2b, 0x60, 0x4a, 0x2a, 0x2e, 0x35, 0x2d, 0x43, 0x37, 0x51, 0x47,
-    0x2f, 0x4d, 0x50, 0x3e, 0x3a, 0x33, 0x4f, 0x2a, 0x35, 0x45, 0x4a, 0x4c,
-    0x3b, 0x3d, 0x43, 0x44, 0x3d, 0x3f, 0x4a, 0x3e, 0x49, 0x37, 0x2e, 0x4f,
-    0x39, 0x3f, 0x32, 0x3c, 0x37, 0x3b, 0x39, 0x4d, 0x34, 0x3f, 0x46, 0x44,
-    0x3d, 0x40, 0x3f, 0x40, 0x39, 0x33, 0x39, 0x3e, 0x3d, 0x40, 0x31, 0x30,
-    0x35, 0x3d, 0x3e, 0x3a, 0x3e, 0x32, 0x31, 0x3e, 0x48, 0x3c, 0x40, 0x43,
-    0x3f, 0x3f, 0x34, 0x2e, 0x3a, 0x3e, 0x3b, 0x43, 0x45, 0x32, 0x3a, 0x31,
-    0x37, 0x38, 0x31, 0x35, 0x34, 0x3d, 0x42, 0x36, 0x46, 0x37, 0x32, 0x47,
-    0x41, 0x3c, 0x35, 0x35, 0x36, 0x41, 0x3a, 0x3b, 0x42, 0x44, 0x36, 0x31,
-    0x3c, 0x3d, 0x34, 0x34, 0x3b, 0x40, 0x40, 0x2e, 0x40, 0x46, 0x3b, 0x43,
-    0x3f, 0x40, 0x3b, 0x3a, 0x32, 0x40, 0x46, 0x39, 0x3c, 0x49, 0x2f, 0x3d,
-    0x49, 0x3e, 0x44, 0x3c, 0x3e, 0x35, 0x3f, 0x44, 0x41, 0x40, 0x3e, 0x47,
-    0x3d, 0x40, 0x3f, 0x41, 0x3b, 0x41, 0x41, 0x3f, 0x40, 0x3f, 0x3e, 0x3e,
-    0x3f, 0x43, 0x35, 0x40, 0x2b, 0x42, 0x45, 0x56, 0x40, 0x3c, 0x2f, 0x44,
-    0x44, 0x3d, 0x3e, 0x3d, 0x40, 0x2d, 0x39, 0x31, 0x54, 0x2f, 0x61, 0x48,
-    0x2e, 0x37, 0x37, 0x32, 0x3e, 0x2d, 0x52, 0x4d, 0x2d, 0x4d, 0x4c, 0x3a,
-    0x3a, 0x31, 0x4e, 0x2d, 0x31, 0x48, 0x47, 0x54, 0x45, 0x38, 0x3b, 0x3d,
-    0x42, 0x41, 0x44, 0x4a, 0x48, 0x42, 0x2f, 0x4d, 0x31, 0x34, 0x3a, 0x46,
-    0x37, 0x44, 0x2c, 0x45, 0x46, 0x43, 0x40, 0x3f, 0x34, 0x33, 0x40, 0x39,
-    0x32, 0x35, 0x3a, 0x40, 0x3f, 0x3f, 0x36, 0x32, 0x3f, 0x3d, 0x35, 0x48,
-    0x3c, 0x48, 0x37, 0x39, 0x35, 0x3f, 0x37, 0x3d, 0x44, 0x46, 0x2d, 0x2a,
-    0x47, 0x38, 0x3a, 0x39, 0x45, 0x3b, 0x40, 0x2d, 0x37, 0x33, 0x41, 0x3c,
-    0x40, 0x35, 0x3f, 0x32, 0x3a, 0x36, 0x40, 0x41, 0x3a, 0x3c, 0x33, 0x31,
-    0x42, 0x3f, 0x41, 0x3a, 0x41, 0x46, 0x38, 0x2f, 0x3c, 0x3d, 0x3d, 0x39,
-    0x3b, 0x46, 0x41, 0x31, 0x46, 0x36, 0x40, 0x48, 0x3c, 0x33, 0x42, 0x32,
-    0x3b, 0x40, 0x3f, 0x36, 0x37, 0x44, 0x34, 0x35, 0x32, 0x32, 0x37, 0x38,
-    0x33, 0x3b, 0x37, 0x4a, 0x3f, 0x46, 0x3a, 0x41, 0x32, 0x37, 0x30, 0x3e,
-    0x40, 0x35, 0x41, 0x40, 0x37, 0x41, 0x2b, 0x40, 0x3d, 0x3d, 0x32, 0x38,
-    0x34, 0x3e, 0x47, 0x61, 0x43, 0x3b, 0x3c, 0x42, 0x46, 0x3d, 0x40, 0x4a,
-    0x3c, 0x2d, 0x33, 0x35, 0x55, 0x38, 0x69, 0x4f, 0x33, 0x37, 0x30, 0x39,
-    0x44, 0x2e, 0x58, 0x4b, 0x2a, 0x51, 0x4b, 0x3c, 0x39, 0x2e, 0x51, 0x2d,
-    0x30, 0x4a, 0x42, 0x53, 0x3f, 0x39, 0x3e, 0x44, 0x3b, 0x40, 0x47, 0x44,
-    0x47, 0x3e, 0x39, 0x4b, 0x40, 0x3d, 0x42, 0x39, 0x3b, 0x39, 0x32, 0x42,
-    0x36, 0x36, 0x36, 0x42, 0x44, 0x34, 0x33, 0x40, 0x40, 0x40, 0x3a, 0x3a,
-    0x41, 0x3f, 0x31, 0x30, 0x3f, 0x31, 0x30, 0x39, 0x46, 0x36, 0x35, 0x34,
-    0x40, 0x43, 0x3c, 0x41, 0x31, 0x46, 0x35, 0x26, 0x44, 0x32, 0x3d, 0x35,
-    0x3d, 0x3c, 0x36, 0x32, 0x39, 0x3a, 0x30, 0x40, 0x48, 0x3e, 0x38, 0x37,
-    0x44, 0x3b, 0x3d, 0x42, 0x3d, 0x3c, 0x32, 0x2b, 0x3f, 0x41, 0x39, 0x3d,
-    0x3e, 0x3f, 0x35, 0x2f, 0x46, 0x3d, 0x3d, 0x3b, 0x45, 0x37, 0x31, 0x35,
-    0x44, 0x40, 0x3a, 0x45, 0x3a, 0x3c, 0x39, 0x31, 0x3b, 0x3d, 0x3b, 0x3a,
-    0x43, 0x44, 0x39, 0x47, 0x44, 0x36, 0x3e, 0x39, 0x48, 0x3f, 0x39, 0x4b,
-    0x3c, 0x36, 0x3d, 0x44, 0x44, 0x3f, 0x39, 0x43, 0x3f, 0x37, 0x3f, 0x37,
-    0x3b, 0x3b, 0x38, 0x3b, 0x3f, 0x40, 0x31, 0x44, 0x30, 0x44, 0x46, 0x5b,
-    0x46, 0x3f, 0x39, 0x40, 0x40, 0x37, 0x4a, 0x46, 0x3f, 0x36, 0x40, 0x39,
-    0x59, 0x3e, 0x66, 0x57, 0x32, 0x34, 0x2e, 0x33, 0x46, 0x31, 0x58, 0x44,
-    0x26, 0x4c, 0x4b, 0x3c, 0x39, 0x2e, 0x4d, 0x35, 0x32, 0x46, 0x52, 0x52,
-    0x3e, 0x40, 0x39, 0x3c, 0x39, 0x3d, 0x53, 0x48, 0x41, 0x3c, 0x3b, 0x4d,
-    0x3c, 0x3e, 0x38, 0x44, 0x3a, 0x3a, 0x29, 0x4a, 0x3c, 0x37, 0x36, 0x38,
-    0x3a, 0x31, 0x37, 0x39, 0x3a, 0x40, 0x46, 0x32, 0x42, 0x38, 0x32, 0x2e,
-    0x3a, 0x45, 0x44, 0x34, 0x34, 0x38, 0x32, 0x2e, 0x35, 0x40, 0x3a, 0x41,
-    0x42, 0x3d, 0x37, 0x2c, 0x3f, 0x37, 0x3c, 0x3d, 0x3a, 0x36, 0x33, 0x35,
-    0x3c, 0x34, 0x3c, 0x39, 0x3c, 0x3a, 0x37, 0x30, 0x30, 0x3e, 0x3d, 0x3a,
-    0x44, 0x37, 0x36, 0x32, 0x36, 0x37, 0x36, 0x3a, 0x3c, 0x41, 0x3a, 0x35,
-    0x36, 0x3a, 0x34, 0x40, 0x39, 0x40, 0x3e, 0x32, 0x34, 0x46, 0x33, 0x3f,
-    0x36, 0x45, 0x3e, 0x35, 0x3f, 0x38, 0x3f, 0x3e, 0x3b, 0x3a, 0x36, 0x3b,
-    0x36, 0x38, 0x32, 0x3f, 0x44, 0x3c, 0x35, 0x48, 0x38, 0x39, 0x31, 0x49,
-    0x3d, 0x43, 0x36, 0x3f, 0x31, 0x43, 0x36, 0x3e, 0x3e, 0x41, 0x39, 0x3b,
-    0x40, 0x42, 0x3c, 0x43, 0x36, 0x4a, 0x48, 0x67, 0x4e, 0x43, 0x36, 0x46,
-    0x44, 0x3f, 0x4b, 0x4b, 0x3f, 0x38, 0x3c, 0x3c, 0x5e, 0x38, 0x70, 0x52,
-    0x38, 0x32, 0x3b, 0x36, 0x4a, 0x2c, 0x52, 0x46, 0x29, 0x4f, 0x48, 0x42,
-    0x2d, 0x2e, 0x4f, 0x28, 0x28, 0x45, 0x4d, 0x52, 0x42, 0x3e, 0x3f, 0x41,
-    0x3c, 0x3a, 0x47, 0x50, 0x44, 0x45, 0x33, 0x4b, 0x3e, 0x3f, 0x42, 0x3d,
-    0x43, 0x34, 0x27, 0x3f, 0x42, 0x3e, 0x43, 0x3e, 0x3a, 0x3c, 0x37, 0x3b,
-    0x3f, 0x30, 0x3a, 0x3e, 0x3c, 0x34, 0x37, 0x24, 0x3d, 0x43, 0x40, 0x44,
-    0x40, 0x46, 0x31, 0x2f, 0x43, 0x38, 0x38, 0x39, 0x3c, 0x34, 0x2d, 0x2a,
-    0x38, 0x31, 0x43, 0x3b, 0x39, 0x3b, 0x32, 0x34, 0x3e, 0x39, 0x41, 0x3b,
-    0x3e, 0x33, 0x3a, 0x2a, 0x41, 0x3f, 0x3c, 0x43, 0x3b, 0x3e, 0x35, 0x2c,
-    0x38, 0x41, 0x33, 0x31, 0x3e, 0x3f, 0x3a, 0x3c, 0x3b, 0x35, 0x3f, 0x3d,
-    0x42, 0x3a, 0x3c, 0x35, 0x3f, 0x40, 0x3c, 0x3e, 0x37, 0x41, 0x3d, 0x38,
-    0x34, 0x31, 0x36, 0x3d, 0x3d, 0x47, 0x36, 0x44, 0x3f, 0x45, 0x3c, 0x3c,
-    0x35, 0x36, 0x31, 0x4f, 0x46, 0x3a, 0x41, 0x42, 0x40, 0x32, 0x33, 0x41,
-    0x34, 0x40, 0x3d, 0x43, 0x3b, 0x3a, 0x32, 0x3c, 0x42, 0x42, 0x3d, 0x43,
-    0x37, 0x45, 0x45, 0xff, 0x4b, 0x45, 0x3b, 0x40, 0x43, 0x3e, 0x47, 0x49,
-    0x3d, 0x3b, 0x3e, 0x33, 0x58, 0x35, 0x71, 0x54, 0x2f, 0x38, 0x38, 0x33,
-    0x47, 0x35, 0x5b, 0x46, 0x2c, 0x4c, 0x43, 0x37, 0x36, 0x39, 0x4f, 0x30,
-    0x26, 0x48, 0x51, 0x48, 0x46, 0x45, 0x3b, 0x39, 0x42, 0x50, 0x47, 0x4c,
-    0x4b, 0x3b, 0x3d, 0x4d, 0x41, 0x34, 0x40, 0x44, 0x38, 0x32, 0x2d, 0x43,
-    0x39, 0x36, 0x3b, 0x3b, 0x40, 0x3d, 0x37, 0x3c, 0x44, 0x39, 0x42, 0x37,
-    0x38, 0x38, 0x32, 0x2f, 0x41, 0x40, 0x3f, 0x3a, 0x37, 0x35, 0x3b, 0x2a,
-    0x37, 0x30, 0x3c, 0x37, 0x40, 0x38, 0x3a, 0x27, 0x44, 0x3d, 0x43, 0x40,
-    0x35, 0x3f, 0x3e, 0x32, 0x3e, 0x3c, 0x40, 0x39, 0x39, 0x3a, 0x41, 0x31,
-    0x3b, 0x3f, 0x34, 0x43, 0x3a, 0x38, 0x42, 0x2a, 0x47, 0x46, 0x3b, 0x38,
-    0x47, 0x45, 0x39, 0x31, 0x43, 0x40, 0x37, 0x3a, 0x3d, 0x3e, 0x39, 0x30,
-    0x36, 0x37, 0x3a, 0x43, 0x3f, 0x32, 0x31, 0x41, 0x45, 0x3e, 0x43, 0x38,
-    0x3f, 0x37, 0x3c, 0x49, 0x3b, 0x33, 0x3d, 0x3a, 0x37, 0x44, 0x32, 0x50,
-    0x39, 0x44, 0x3e, 0x3f, 0x3d, 0x41, 0x3e, 0x3e, 0x42, 0x44, 0x45, 0x3f,
-    0x36, 0x3f, 0x37, 0x39, 0x3b, 0x3d, 0x3b, 0x3b, 0x2f, 0x46, 0x40, 0x6d,
-    0x50, 0x45, 0x3b, 0x45, 0x46, 0x3b, 0x42, 0x48, 0x42, 0x3c, 0x39, 0x37,
-    0x57, 0x3b, 0x6c, 0x5b, 0x32, 0x35, 0x3d, 0x39, 0x48, 0x31, 0x5c, 0x46,
-    0x29, 0x4c, 0x3f, 0x3e, 0x37, 0x33, 0x58, 0x32, 0x2a, 0x43, 0x4c, 0x50,
-    0x3b, 0x44, 0x3c, 0x41, 0x39, 0x48, 0x55, 0x4c, 0x42, 0x38, 0x3b, 0x51,
-    0x3f, 0x38, 0x44, 0x46, 0x36, 0x3b, 0x38, 0x4a, 0x3f, 0x37, 0x36, 0x3c,
-    0x31, 0x3d, 0x32, 0x39, 0x3b, 0x3f, 0x3e, 0x35, 0x38, 0x3f, 0x34, 0x2b,
-    0x37, 0x36, 0x39, 0x40, 0x37, 0x41, 0x32, 0x27, 0x36, 0x33, 0x40, 0x3a,
-    0x3f, 0x44, 0x3f, 0x25, 0x38, 0x34, 0x42, 0x3c, 0x3a, 0x40, 0x38, 0x31,
-    0x49, 0x3e, 0x33, 0x3d, 0x31, 0x36, 0x39, 0x2b, 0x44, 0x2f, 0x43, 0x34,
-    0x34, 0x37, 0x39, 0x33, 0x3b, 0x34, 0x42, 0x3c, 0x40, 0x45, 0x36, 0x31,
-    0x43, 0x47, 0x3e, 0x3f, 0x40, 0x3a, 0x33, 0x34, 0x41, 0x44, 0x3a, 0x43,
-    0x3e, 0x38, 0x36, 0x31, 0x42, 0x44, 0x40, 0x41, 0x44, 0x43, 0x33, 0x42,
-    0x3d, 0x41, 0x3d, 0x3e, 0x3c, 0x39, 0x3e, 0x4f, 0x3f, 0x37, 0x31, 0x40,
-    0x3b, 0x38, 0x35, 0x3b, 0x44, 0x41, 0x41, 0x37, 0x40, 0x42, 0x2d, 0x3d,
-    0x39, 0x48, 0x44, 0x3e, 0x34, 0x48, 0x49, 0x6d, 0x45, 0x4b, 0x3a, 0x44,
-    0x49, 0x40, 0x4d, 0x51, 0x3f, 0x34, 0x3b, 0x40, 0x52, 0x34, 0x6f, 0x56,
-    0x33, 0x3e, 0x40, 0x39, 0x41, 0x32, 0x5d, 0x45, 0x2e, 0x51, 0x48, 0x3c,
-    0x2e, 0x2e, 0x51, 0x39, 0x32, 0x45, 0x4a, 0x4c, 0x3b, 0x40, 0x40, 0x3b,
-    0x36, 0x41, 0x54, 0x4e, 0x4a, 0x49, 0x3b, 0x4d, 0x3c, 0x41, 0x38, 0x47,
-    0x3d, 0x3c, 0x37, 0x48, 0x3f, 0x42, 0x3e, 0x36, 0x39, 0x46, 0x37, 0x3e,
-    0x3b, 0x38, 0x40, 0x3b, 0x39, 0x32, 0x3e, 0x29, 0x37, 0x35, 0x3c, 0x3d,
-    0x37, 0x3b, 0x35, 0x2f, 0x32, 0x3b, 0x37, 0x3c, 0x40, 0x3e, 0x39, 0x27,
-    0x3b, 0x38, 0x37, 0x36, 0x39, 0x37, 0x37, 0x35, 0x42, 0x3e, 0x3b, 0x43,
-    0x41, 0x3c, 0x37, 0x2a, 0x3a, 0x3e, 0x38, 0x40, 0x36, 0x3e, 0x44, 0x2e,
-    0x3e, 0x3a, 0x37, 0x3b, 0x3e, 0x41, 0x3d, 0x30, 0x3b, 0x3f, 0x41, 0x45,
-    0x3a, 0x48, 0x37, 0x2f, 0x3a, 0x37, 0x34, 0x43, 0x42, 0x3d, 0x38, 0x41,
-    0x3b, 0x3c, 0x39, 0x3c, 0x39, 0x47, 0x2e, 0x41, 0x42, 0x40, 0x32, 0x36,
-    0x43, 0x40, 0x3d, 0x4c, 0x38, 0x3e, 0x3b, 0x41, 0x3d, 0x3b, 0x34, 0x43,
-    0x43, 0x3f, 0x44, 0x3c, 0x3a, 0x33, 0x39, 0x42, 0x43, 0x3f, 0x33, 0x3d,
-    0x33, 0x3e, 0x48, 0x6b, 0x48, 0x43, 0x36, 0x47, 0x49, 0x44, 0x4a, 0x49,
-    0x3c, 0x31, 0x35, 0x3e, 0x5c, 0x34, 0x73, 0x53, 0x33, 0x3c, 0x32, 0x3b,
-    0x43, 0x27, 0x59, 0x4e, 0x2b, 0x51, 0x4f, 0x37, 0x36, 0x34, 0x56, 0x34,
-    0x32, 0x4f, 0x46, 0x50, 0x40, 0x40, 0x3c, 0x3e, 0x34, 0x37, 0x50, 0x49,
-    0x43, 0x47, 0x3e, 0x52, 0x44, 0x38, 0x3b, 0x4f, 0x3a, 0x3d, 0x2b, 0x4c,
-    0x40, 0x38, 0x3a, 0x35, 0x3a, 0x3a, 0x3d, 0x38, 0x3d, 0x3b, 0x37, 0x48,
-    0x3d, 0x3d, 0x32, 0x30, 0x3a, 0x34, 0x3f, 0x3a, 0x3b, 0x3e, 0x35, 0x2f,
-    0x3b, 0x3a, 0x45, 0x3d, 0x42, 0x33, 0x33, 0x24, 0x44, 0x39, 0x3c, 0x3d,
-    0x41, 0x3c, 0x37, 0x2c, 0x3b, 0x36, 0x34, 0x41, 0x3d, 0x3f, 0x39, 0x32,
-    0x3c, 0x40, 0x44, 0x3d, 0x41, 0x3d, 0x3a, 0x29, 0x3e, 0x3e, 0x43, 0x33,
-    0x3f, 0x3e, 0x3e, 0x31, 0x38, 0x3a, 0x34, 0x3d, 0x3f, 0x3e, 0x3a, 0x3d,
-    0x3e, 0x48, 0x45, 0x3d, 0x44, 0x37, 0x33, 0x3d, 0x45, 0x39, 0x40, 0x40,
-    0x42, 0x3f, 0x3f, 0x3d, 0x3a, 0x3b, 0x41, 0x33, 0x41, 0x3c, 0x32, 0x55,
-    0x43, 0x3a, 0x32, 0x40, 0x3c, 0x3e, 0x40, 0x43, 0x37, 0x3f, 0x40, 0x38,
-    0x43, 0x41, 0x36, 0x42, 0x44, 0x3c, 0x32, 0x3f, 0x38, 0x42, 0x46, 0x59,
-    0x4c, 0x41, 0x39, 0x47, 0x46, 0x46, 0x44, 0x44, 0x35, 0x42, 0x32, 0x39,
-    0x4f, 0x34, 0x6d, 0x55, 0x31, 0x3b, 0x3a, 0x3f, 0x44, 0x2c, 0x5d, 0x43,
-    0x26, 0x4a, 0x4f, 0x40, 0x36, 0x32, 0x4d, 0x33, 0x2f, 0x50, 0x4d, 0x57,
-    0x3b, 0x40, 0x42, 0x44, 0x41, 0x3f, 0x52, 0x4e, 0x35, 0x41, 0x44, 0x52,
-    0x40, 0x35, 0x39, 0x4b, 0x45, 0x34, 0x2c, 0x4a, 0x3b, 0x41, 0x31, 0x33,
-    0x3f, 0x3a, 0x36, 0x3c, 0x3c, 0x33, 0x30, 0x38, 0x43, 0x3f, 0x32, 0x2d,
-    0x3f, 0x3a, 0x38, 0x41, 0x39, 0x45, 0x36, 0x2e, 0x3c, 0x38, 0x45, 0x3f,
-    0x40, 0x3f, 0x3e, 0x26, 0x41, 0x37, 0x3c, 0x44, 0x3f, 0x3f, 0x35, 0x37,
-    0x46, 0x34, 0x37, 0x3e, 0x48, 0x38, 0x36, 0x34, 0x33, 0x39, 0x40, 0x3c,
-    0x42, 0x3d, 0x3b, 0x31, 0x38, 0x3b, 0x44, 0x42, 0x45, 0x38, 0x41, 0x30,
-    0x3d, 0x42, 0x36, 0x3f, 0x3b, 0x45, 0x37, 0x32, 0x3c, 0x37, 0x3d, 0x42,
-    0x38, 0x3d, 0x2f, 0x31, 0x39, 0x40, 0x3f, 0x44, 0x3a, 0x41, 0x44, 0x46,
-    0x3d, 0x3a, 0x32, 0x3b, 0x34, 0x47, 0x36, 0x4c, 0x47, 0x35, 0x3c, 0x33,
-    0x3b, 0x3c, 0x30, 0x43, 0x43, 0x3f, 0x31, 0x40, 0x3a, 0x37, 0x30, 0x46,
-    0x39, 0x3b, 0x42, 0x40, 0x2d, 0x3f, 0x3e, 0x6a, 0x50, 0x3b, 0x31, 0x54,
-    0x47, 0x3d, 0x48, 0x4e, 0x3b, 0x41, 0x3a, 0x39, 0x49, 0x36, 0x64, 0x4e,
-    0x32, 0x39, 0x3d, 0x37, 0x42, 0x2c, 0x5c, 0x43, 0x2a, 0x4b, 0x4b, 0x46,
-    0x30, 0x29, 0x52, 0x31, 0x35, 0x44, 0x4a, 0x4b, 0x3d, 0x3b, 0x4e, 0x42,
-    0x3d, 0x39, 0x42, 0x52, 0x3f, 0x36, 0x3e, 0x50, 0x3f, 0x32, 0x35, 0x3a,
-    0x40, 0x39, 0x35, 0x48, 0x3b, 0x3e, 0x41, 0x43, 0x43, 0x45, 0x2f, 0x36,
-    0x38, 0x34, 0x3f, 0x44, 0x32, 0x3f, 0x37, 0x33, 0x33, 0x35, 0x2e, 0x41,
-    0x37, 0x3e, 0x38, 0x28, 0x49, 0x30, 0x46, 0x39, 0x3b, 0x30, 0x38, 0x28,
-    0x3b, 0x3d, 0x3a, 0x43, 0x3f, 0x34, 0x43, 0x36, 0x39, 0x3c, 0x3e, 0x3e,
-    0x39, 0x3b, 0x39, 0x32, 0x3c, 0x36, 0x3e, 0x38, 0x34, 0x3c, 0x3a, 0x2a,
-    0x46, 0x3d, 0x40, 0x37, 0x3b, 0x39, 0x3b, 0x34, 0x38, 0x31, 0x43, 0x46,
-    0x3b, 0x43, 0x39, 0x2b, 0x38, 0x40, 0x3e, 0x39, 0x35, 0x3d, 0x2c, 0x36,
-    0x37, 0x40, 0x36, 0x40, 0x41, 0x38, 0x32, 0x3f, 0x36, 0x46, 0x34, 0x31,
-    0x40, 0x3e, 0x3c, 0x4e, 0x42, 0x3d, 0x36, 0x3f, 0x42, 0x3f, 0x33, 0x40,
-    0x34, 0x37, 0x3c, 0x3b, 0x31, 0x47, 0x32, 0x3c, 0x34, 0x3d, 0x42, 0x3b,
-    0x37, 0x41, 0x3b, 0x64, 0x52, 0x40, 0x36, 0x4e, 0x46, 0x3f, 0x3f, 0x47,
-    0x3c, 0x3a, 0x3a, 0x41, 0x4a, 0x32, 0x5e, 0x50, 0x2d, 0x39, 0x3a, 0x38,
-    0x3d, 0x2c, 0x5a, 0x3e, 0x2e, 0x47, 0x3e, 0x3e, 0x33, 0x29, 0x4c, 0x35,
-    0x30, 0x4d, 0x4d, 0x4d, 0x38, 0x42, 0x51, 0x47, 0x39, 0x3c, 0x43, 0x4b,
-    0x42, 0x3f, 0x3a, 0x4b, 0x44, 0x3f, 0x3a, 0x44, 0x3e, 0x37, 0x30, 0x45,
-    0x3d, 0x36, 0x34, 0x3f, 0x36, 0x35, 0x37, 0x36, 0x43, 0x3b, 0x37, 0x3e,
-    0x35, 0x3e, 0x32, 0x34, 0x32, 0x38, 0x3c, 0x3a, 0x3a, 0x3c, 0x30, 0x2b,
-    0x31, 0x37, 0x30, 0x42, 0x36, 0x37, 0x36, 0x2c, 0x3c, 0x31, 0x41, 0x37,
-    0x44, 0x41, 0x3b, 0x37, 0x41, 0x3f, 0x38, 0x3b, 0x3a, 0x3a, 0x3c, 0x2f,
-    0x47, 0x41, 0x3e, 0x33, 0x42, 0x3a, 0x32, 0x34, 0x44, 0x40, 0x43, 0x3d,
-    0x34, 0x41, 0x38, 0x35, 0x35, 0x3b, 0x45, 0x38, 0x32, 0x37, 0x3c, 0x2e,
-    0x39, 0x40, 0x30, 0x3e, 0x42, 0x35, 0x3d, 0x36, 0x3e, 0x3d, 0x39, 0x46,
-    0x3f, 0x36, 0x37, 0x49, 0x41, 0x39, 0x3d, 0x3d, 0x33, 0x44, 0x42, 0x50,
-    0x3d, 0x3c, 0x3e, 0x3f, 0x42, 0x42, 0x3b, 0x3d, 0x41, 0x31, 0x39, 0x3a,
-    0x44, 0x34, 0x38, 0x47, 0x44, 0x38, 0x3b, 0x42, 0x30, 0x42, 0x44, 0x57,
-    0x49, 0x3a, 0x39, 0x4f, 0x41, 0x3e, 0x40, 0x43, 0x37, 0x42, 0x3b, 0x48,
-    0x50, 0x29, 0x5b, 0x44, 0x2c, 0x40, 0x3f, 0x3c, 0x46, 0x34, 0x5c, 0x41,
-    0x2c, 0x48, 0x46, 0x46, 0x35, 0x32, 0x4c, 0x35, 0x2f, 0x3b, 0x48, 0x44,
-    0x41, 0x41, 0x49, 0x45, 0x34, 0x37, 0x44, 0x45, 0x43, 0x3b, 0x42, 0x44,
-    0x3a, 0x37, 0x48, 0x49, 0x34, 0x39, 0x33, 0x4a, 0x40, 0x3d, 0x33, 0x39,
-    0x39, 0x3b, 0x30, 0x31, 0x3d, 0x47, 0x3c, 0x3a, 0x34, 0x3c, 0x3a, 0x2b,
-    0x3a, 0x34, 0x41, 0x40, 0x42, 0x36, 0x44, 0x2c, 0x40, 0x47, 0x3b, 0x37,
-    0x38, 0x42, 0x44, 0x29, 0x36, 0x3d, 0x3d, 0x36, 0x42, 0x3b, 0x35, 0x36,
-    0x43, 0x39, 0x41, 0x3d, 0x45, 0x41, 0x31, 0x32, 0x40, 0x3d, 0x3c, 0x41,
-    0x3e, 0x3d, 0x35, 0x34, 0x32, 0x38, 0x36, 0x3f, 0x3b, 0x3d, 0x39, 0x36,
-    0x40, 0x3e, 0x3d, 0x3a, 0x3a, 0x3b, 0x3c, 0x32, 0x40, 0x34, 0x3a, 0x36,
-    0x42, 0x47, 0x3e, 0x33, 0x3a, 0x44, 0x30, 0x39, 0x40, 0x3a, 0x36, 0x44,
-    0x3c, 0x3b, 0x3f, 0x33, 0x3e, 0x3c, 0x35, 0x53, 0x43, 0x3c, 0x3f, 0x43,
-    0x3d, 0x44, 0x33, 0x47, 0x42, 0x40, 0x37, 0x3b, 0x43, 0x3f, 0x33, 0x41,
-    0x38, 0x42, 0x44, 0x3d, 0x2d, 0x3f, 0x46, 0x49, 0x4e, 0x3f, 0x36, 0x45,
-    0x45, 0x39, 0x40, 0x42, 0x39, 0x39, 0x3a, 0x42, 0x45, 0x2c, 0x61, 0x44,
-    0x30, 0x45, 0x38, 0x3a, 0x40, 0x37, 0x58, 0x39, 0x31, 0x3e, 0x3a, 0x3e,
-    0x37, 0x32, 0x4a, 0x39, 0x2e, 0x47, 0x3e, 0x4e, 0x3f, 0x3e, 0x48, 0x45,
-    0x3f, 0x48, 0x3a, 0x3f, 0x40, 0x36, 0x3a, 0x44, 0x36, 0x3e, 0x3d, 0x41,
-    0x45, 0x36, 0x36, 0x4b, 0x3a, 0x3d, 0x45, 0x48, 0x38, 0x45, 0x39, 0x38,
-    0x38, 0x3a, 0x42, 0x34, 0x3f, 0x34, 0x39, 0x34, 0x32, 0x3f, 0x3c, 0x3d,
-    0x3d, 0x47, 0x3a, 0x2f, 0x3c, 0x3e, 0x3f, 0x39, 0x35, 0x42, 0x3c, 0x2a,
-    0x3b, 0x35, 0x42, 0x44, 0x46, 0x39, 0x38, 0x39, 0x43, 0x3a, 0x38, 0x42,
-    0x3d, 0x3a, 0x40, 0x35, 0x34, 0x39, 0x3a, 0x38, 0x43, 0x42, 0x42, 0x2d,
-    0x31, 0x3b, 0x33, 0x40, 0x3b, 0x47, 0x35, 0x30, 0x3a, 0x3c, 0x3b, 0x47,
-    0x3a, 0x3c, 0x38, 0x35, 0x3c, 0x35, 0x3e, 0x3e, 0x39, 0x3d, 0x39, 0x40,
-    0x37, 0x33, 0x49, 0x38, 0x3c, 0x43, 0x34, 0x40, 0x39, 0x42, 0x3c, 0x3b,
-    0x3e, 0x45, 0x3e, 0x51, 0x3d, 0x3f, 0x3b, 0x34, 0x37, 0x3c, 0x40, 0x47,
-    0x3c, 0x41, 0x3f, 0x41, 0x37, 0x3e, 0x36, 0x3c, 0x42, 0x40, 0x3f, 0x3a,
-    0x3b, 0x42, 0x44, 0x4b, 0x4b, 0x37, 0x41, 0x4d, 0x41, 0x45, 0x40, 0x41,
-    0x40, 0x38, 0x37, 0x40, 0x42, 0x2c, 0x57, 0x43, 0x2d, 0x49, 0x3a, 0x3e,
-    0x37, 0x2f, 0x52, 0x37, 0x31, 0x42, 0x3b, 0x3f, 0x39, 0x38, 0x48, 0x3c,
-    0x37, 0x3d, 0x3a, 0x39, 0x3a, 0x45, 0x4b, 0x49, 0x3e, 0x44, 0x48, 0x49,
-    0x3d, 0x39, 0x3c, 0x41, 0x41, 0x38, 0x45, 0x38, 0x33, 0x3d, 0x37, 0x47,
-    0x34, 0x3f, 0x3b, 0x3d, 0x39, 0x34, 0x30, 0x39, 0x44, 0x36, 0x34, 0x3c,
-    0x37, 0x38, 0x45, 0x34, 0x40, 0x33, 0x41, 0x3a, 0x3e, 0x3c, 0x3b, 0x3a,
-    0x40, 0x3f, 0x3b, 0x3d, 0x3b, 0x46, 0x41, 0x2a, 0x3a, 0x3c, 0x42, 0x46,
-    0x33, 0x3f, 0x2d, 0x3a, 0x45, 0x45, 0x38, 0x3b, 0x44, 0x34, 0x35, 0x3f,
-    0x34, 0x43, 0x38, 0x3e, 0x41, 0x3b, 0x42, 0x38, 0x3d, 0x3f, 0x38, 0x45,
-    0x3b, 0x35, 0x39, 0x3c, 0x43, 0x43, 0x38, 0x34, 0x44, 0x43, 0x2e, 0x39,
-    0x39, 0x40, 0x39, 0x41, 0x41, 0x34, 0x3e, 0x44, 0x3d, 0x43, 0x3a, 0x3a,
-    0x3b, 0x3b, 0x36, 0x45, 0x3c, 0x43, 0x3d, 0x48, 0x36, 0x36, 0x39, 0x55,
-    0x35, 0x40, 0x3e, 0x49, 0x40, 0x3a, 0x3d, 0x3d, 0x34, 0x47, 0x40, 0x41,
-    0x40, 0x47, 0x39, 0x3e, 0x3b, 0x38, 0x3c, 0x3a, 0x35, 0x3e, 0x41, 0x4a,
-    0x4b, 0x3f, 0x36, 0x3d, 0x40, 0x3c, 0x39, 0x32, 0x33, 0x36, 0x30, 0x42,
-    0x42, 0x36, 0x54, 0x48, 0x2e, 0x4c, 0x34, 0x3c, 0x39, 0x36, 0x4e, 0x37,
-    0x2f, 0x3e, 0x30, 0x3d, 0x36, 0x3b, 0x45, 0x36, 0x37, 0x3e, 0x41, 0x4b,
-    0x3b, 0x36, 0x45, 0x3b, 0x38, 0x45, 0x3e, 0x43, 0x48, 0x46, 0x44, 0x44,
-    0x3e, 0x3b, 0x37, 0x3b, 0x3a, 0x3f, 0x3d, 0x44, 0x39, 0x38, 0x45, 0x43,
-    0x3d, 0x35, 0x39, 0x2c, 0x44, 0x41, 0x36, 0x40, 0x3d, 0x39, 0x3d, 0x2f,
-    0x3d, 0x39, 0x42, 0x3d, 0x36, 0x46, 0x43, 0x2c, 0x41, 0x3a, 0x30, 0x45,
-    0x3f, 0x41, 0x35, 0x2b, 0x3b, 0x38, 0x3a, 0x44, 0x32, 0x32, 0x39, 0x3c,
-    0x3a, 0x3a, 0x3c, 0x3a, 0x35, 0x40, 0x3b, 0x31, 0x36, 0x33, 0x35, 0x34,
-    0x3c, 0x3b, 0x3d, 0x36, 0x48, 0x3b, 0x3f, 0x42, 0x3e, 0x33, 0x2f, 0x3a,
-    0x49, 0x41, 0x39, 0x3e, 0x3c, 0x44, 0x3c, 0x39, 0x33, 0x39, 0x36, 0x35,
-    0x3d, 0x42, 0x34, 0x3e, 0x38, 0x45, 0x40, 0x45, 0x3d, 0x48, 0x42, 0x4a,
-    0x3f, 0x45, 0x38, 0x42, 0x44, 0x40, 0x34, 0x49, 0x44, 0x3d, 0x3a, 0x39,
-    0x3e, 0x3a, 0x42, 0x3e, 0x48, 0x42, 0x3e, 0x3a, 0x3f, 0x3f, 0x32, 0x3b,
-    0x38, 0x41, 0x3c, 0x39, 0x33, 0x45, 0x44, 0x3c, 0x48, 0x41, 0x41, 0x3d,
-    0x3a, 0x3c, 0x37, 0x33, 0x41, 0x3f, 0x38, 0x3a, 0x3f, 0x37, 0x51, 0x3c,
-    0x37, 0x3a, 0x43, 0x37, 0x40, 0x31, 0x4f, 0x34, 0x3b, 0x44, 0x45, 0x39,
-    0x40, 0x33, 0x49, 0x33, 0x3e, 0x35, 0x44, 0x3d, 0x3b, 0x3f, 0x43, 0x41,
-    0x43, 0x43, 0x48, 0x44, 0x46, 0x3b, 0x43, 0x3f, 0x3c, 0x3f, 0x3e, 0x3d,
-    0x3b, 0x41, 0x3c, 0x43, 0x30, 0x34, 0x39, 0x33, 0x3f, 0x38, 0x36, 0x2e,
-    0x33, 0x3f, 0x3c, 0x40, 0x3d, 0x3b, 0x3b, 0x31, 0x36, 0x41, 0x3b, 0x38,
-    0x46, 0x36, 0x34, 0x31, 0x42, 0x44, 0x33, 0x35, 0x3f, 0x36, 0x3c, 0x30,
-    0x3f, 0x31, 0x39, 0x3e, 0x3f, 0x47, 0x3e, 0x34, 0x36, 0x36, 0x34, 0x39,
-    0x37, 0x46, 0x40, 0x33, 0x3b, 0x3a, 0x3f, 0x41, 0x37, 0x44, 0x3a, 0x3f,
-    0x34, 0x45, 0x37, 0x33, 0x3f, 0x47, 0x41, 0x36, 0x39, 0x3e, 0x40, 0x38,
-    0x41, 0x3d, 0x3d, 0x36, 0x40, 0x3a, 0x3b, 0x3b, 0x41, 0x3b, 0x3a, 0x3f,
-    0x3f, 0x3b, 0x35, 0x42, 0x46, 0x3a, 0x30, 0x45, 0x40, 0x37, 0x39, 0x39,
-    0x3d, 0x38, 0x3f, 0x45, 0x3f, 0x31, 0x32, 0x3b, 0x35, 0x3e, 0x3b, 0x38,
-    0x3b, 0x44, 0x37, 0x39, 0x37, 0x42, 0x3f, 0x44, 0x38, 0x36, 0x37, 0x44,
-    0x45, 0x46, 0x41, 0x3b, 0x46, 0x42, 0x43, 0x43, 0x3a, 0x4b, 0x37, 0x35,
-    0x3b, 0x40, 0x32, 0x38, 0x41, 0x38, 0x4f, 0x3e, 0x36, 0x3f, 0x47, 0x3b,
-    0x47, 0x3b, 0x4a, 0x2e, 0x3d, 0x45, 0x3b, 0x46, 0x3e, 0x38, 0x43, 0x38,
-    0x41, 0x48, 0x3a, 0x39, 0x40, 0x45, 0x3b, 0x43, 0x40, 0x3e, 0x43, 0x41,
-    0x41, 0x3e, 0x39, 0x3f, 0x35, 0x42, 0x33, 0x3f, 0x3d, 0x32, 0x45, 0x3c,
-    0x41, 0x31, 0x45, 0x38, 0x43, 0x45, 0x41, 0x35, 0x35, 0x40, 0x44, 0x36,
-    0x3a, 0x3b, 0x3c, 0x2c, 0x3e, 0x41, 0x33, 0x3d, 0x46, 0x34, 0x3b, 0x30,
-    0x30, 0x42, 0x43, 0x3d, 0x3d, 0x3d, 0x43, 0x31, 0x3f, 0x40, 0x3a, 0x3f,
-    0x48, 0x3e, 0x3b, 0x39, 0x44, 0x43, 0x3b, 0x3a, 0x42, 0x38, 0x38, 0x3b,
-    0x3f, 0x44, 0x37, 0x3e, 0x45, 0x40, 0x41, 0x3b, 0x3c, 0x3a, 0x38, 0x37,
-    0x3b, 0x33, 0x3f, 0x35, 0x43, 0x3d, 0x33, 0x41, 0x3b, 0x46, 0x39, 0x32,
-    0x39, 0x3f, 0x3b, 0x39, 0x47, 0x3c, 0x3f, 0x39, 0x34, 0x3d, 0x3c, 0x46,
-    0x3f, 0x3e, 0x3e, 0x44, 0x34, 0x40, 0x3f, 0x39, 0x3c, 0x38, 0x36, 0x45,
-    0x42, 0x46, 0x3b, 0x44, 0x3a, 0x3d, 0x3b, 0x42, 0x3b, 0x3b, 0x3c, 0x45,
-    0x42, 0x3d, 0x36, 0x37, 0x3d, 0x43, 0x3f, 0x48, 0xa6, 0xfb, 0xff, 0xff,
-    0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xb3, 0x00, 0x00, 0x00,
-    0x39, 0xff, 0xff, 0xff, 0xe5, 0xff, 0xff, 0xff, 0x30, 0x00, 0x00, 0x00,
-    0x68, 0xfb, 0xff, 0xff, 0xbc, 0xfc, 0xff, 0xff, 0x20, 0x00, 0x00, 0x00,
-    0x14, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0xe8, 0x03, 0x00, 0x00,
-    0x01, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-    0x03, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x70, 0x02, 0x00, 0x00,
-    0x70, 0x03, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x00, 0xf0, 0x01, 0x00, 0x00,
-    0x80, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x50, 0x01, 0x00, 0x00,
-    0xa4, 0x02, 0x00, 0x00, 0xba, 0xfc, 0xff, 0xff, 0x00, 0x00, 0x00, 0x03,
-    0x24, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
-    0x24, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x6c, 0x61, 0x62, 0x65,
-    0x6c, 0x73, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x6d, 0x61, 0x78, 0x00, 0x00,
-    0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
-    0x3c, 0xfd, 0xff, 0xff, 0x2c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
-    0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x80, 0x3b, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x3f,
-    0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0xfd, 0xff, 0xff,
-    0x00, 0x00, 0x00, 0x03, 0x1c, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00,
-    0x08, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00,
-    0x61, 0x64, 0x64, 0x5f, 0x31, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
-    0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xa4, 0xfd, 0xff, 0xff,
-    0x2c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00,
-    0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x58, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x09, 0x97, 0xf5, 0x3f,
-    0x01, 0x00, 0x00, 0x00, 0x87, 0x35, 0xa0, 0x43, 0x01, 0x00, 0x00, 0x00,
-    0xd6, 0xd7, 0x28, 0xc3, 0x92, 0xfd, 0xff, 0xff, 0x00, 0x00, 0x00, 0x03,
-    0x1c, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
-    0x24, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x52, 0x65, 0x6c, 0x75,
-    0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-    0x19, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
-    0x14, 0xfe, 0xff, 0xff, 0x2c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
-    0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-    0x05, 0x80, 0xbf, 0x3f, 0x01, 0x00, 0x00, 0x00, 0x85, 0xc0, 0xbe, 0x43,
-    0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xfe, 0xff, 0xff,
-    0x00, 0x00, 0x00, 0x03, 0x3c, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00,
-    0x08, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00,
-    0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x5f, 0x71, 0x75, 0x61, 0x6e,
-    0x74, 0x2f, 0x46, 0x61, 0x6b, 0x65, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x57,
-    0x69, 0x74, 0x68, 0x4d, 0x69, 0x6e, 0x4d, 0x61, 0x78, 0x56, 0x61, 0x72,
-    0x73, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-    0x0a, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
-    0xa4, 0xfe, 0xff, 0xff, 0x30, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00,
-    0x18, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-    0xae, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x01, 0x00, 0x00, 0x00, 0x17, 0xac, 0x6e, 0x3a, 0x01, 0x00, 0x00, 0x00,
-    0x20, 0x4e, 0x97, 0x3d, 0x01, 0x00, 0x00, 0x00, 0xaf, 0x27, 0x21, 0xbe,
-    0x96, 0xfe, 0xff, 0xff, 0x00, 0x00, 0x00, 0x03, 0x20, 0x00, 0x00, 0x00,
-    0x04, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00,
-    0x09, 0x00, 0x00, 0x00, 0x52, 0x65, 0x73, 0x68, 0x61, 0x70, 0x65, 0x5f,
-    0x31, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-    0x31, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-    0x1c, 0xff, 0xff, 0xff, 0x2c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
-    0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x42,
-    0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0xff, 0xff, 0xff,
-    0x00, 0x00, 0x00, 0x02, 0x20, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
-    0x08, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00,
-    0x43, 0x6f, 0x6e, 0x76, 0x32, 0x44, 0x5f, 0x62, 0x69, 0x61, 0x73, 0x00,
-    0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0xfc, 0xfe, 0xff, 0xff,
-    0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-    0x17, 0xac, 0xee, 0x39, 0x5a, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x03,
-    0x48, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
-    0x54, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x77, 0x65, 0x69, 0x67,
-    0x68, 0x74, 0x73, 0x5f, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x5f, 0x31, 0x2f,
-    0x46, 0x61, 0x6b, 0x65, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x57, 0x69, 0x74,
-    0x68, 0x4d, 0x69, 0x6e, 0x4d, 0x61, 0x78, 0x56, 0x61, 0x72, 0x73, 0x2f,
-    0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x73, 0x65, 0x00, 0x00, 0x00,
-    0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x30, 0x11, 0x00, 0x00,
-    0x0c, 0x00, 0x14, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x10, 0x00,
-    0x0c, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00,
-    0x18, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-    0x3d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x01, 0x00, 0x00, 0x00, 0x9d, 0xaf, 0xd0, 0x3a, 0x01, 0x00, 0x00, 0x00,
-    0xe7, 0x29, 0x9e, 0x3e, 0x01, 0x00, 0x00, 0x00, 0x5b, 0x91, 0xc3, 0xbd,
-    0x00, 0x00, 0x0e, 0x00, 0x18, 0x00, 0x08, 0x00, 0x07, 0x00, 0x0c, 0x00,
-    0x10, 0x00, 0x14, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
-    0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
-    0x28, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x4d, 0x61, 0x74, 0x4d,
-    0x75, 0x6c, 0x5f, 0x62, 0x69, 0x61, 0x73, 0x00, 0x01, 0x00, 0x00, 0x00,
-    0x04, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x04, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00,
-    0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x62, 0x1b, 0x1c, 0x3b,
-    0x03, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, 0x5c, 0x00, 0x00, 0x00,
-    0x04, 0x00, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09,
-    0x02, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
-    0x24, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x01, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-    0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x04, 0x00,
-    0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x14, 0x00, 0x1c, 0x00,
-    0x08, 0x00, 0x0c, 0x00, 0x10, 0x00, 0x07, 0x00, 0x14, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x18, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
-    0x01, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
-    0x28, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
-    0x02, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-    0x04, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x18, 0x00,
-    0x00, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x07, 0x00, 0x10, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x14, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
-    0x1c, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00,
-    0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
-    0x02, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
-    0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x10, 0x00,
-    0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x0c, 0x00, 0x00, 0x00,
-    0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
-    0x03, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00,
-    0x04, 0x00, 0x00, 0x00, 0xfa, 0xff, 0xff, 0xff, 0x00, 0x19, 0x06, 0x00,
-    0x06, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x09, 0x06, 0x00,
-    0x08, 0x00, 0x07, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04};
-const int g_model_len = 19800;
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/model.h b/tensorflow/lite/micro/examples/micro_speech/simple_features/model.h
deleted file mode 100644
index b3e705e..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/model.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// This is a standard TensorFlow Lite FlatBuffer model file that has been
-// converted into a C data array, so it can be easily compiled into a binary
-// for devices that don't have a file system. It was created using the command:
-// xxd -i model.tflite > model.cc
-
-#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_MODEL_H_
-#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_MODEL_H_
-
-extern const unsigned char g_model[];
-extern const int g_model_len;
-
-#endif  // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_MODEL_H_
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/no_power_spectrum_data.cc b/tensorflow/lite/micro/examples/micro_speech/simple_features/no_power_spectrum_data.cc
deleted file mode 100644
index aff0242..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/no_power_spectrum_data.cc
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// See the header for documentation on the meaning of this data.
-
-#include "tensorflow/lite/micro/examples/micro_speech/simple_features/no_power_spectrum_data.h"
-
-const uint8_t g_no_power_spectrum_data[g_no_power_spectrum_data_size] = {
-    255, 7, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-    0,   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-};
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/no_power_spectrum_data.h b/tensorflow/lite/micro/examples/micro_speech/simple_features/no_power_spectrum_data.h
deleted file mode 100644
index f203623..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/no_power_spectrum_data.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// This data was extracted from the larger feature data held in
-// no_features_data.cc and consists of the 29th spectrogram slice of 43 values.
-// This is the expected result of running the sample data in
-// no_30ms_sample_data.cc through the preprocessing pipeline.
-
-#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_NO_POWER_SPECTRUM_DATA_H_
-#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_NO_POWER_SPECTRUM_DATA_H_
-
-#include <cstdint>
-
-constexpr int g_no_power_spectrum_data_size = 43;
-extern const uint8_t g_no_power_spectrum_data[];
-
-#endif  // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_NO_POWER_SPECTRUM_DATA_H_
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/no_simple_features_data.cc b/tensorflow/lite/micro/examples/micro_speech/simple_features/no_simple_features_data.cc
deleted file mode 100644
index 2d7ae62..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/no_simple_features_data.cc
+++ /dev/null
@@ -1,152 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/lite/micro/examples/micro_speech/simple_features/no_simple_features_data.h"
-
-/* File automatically created by
- * tensorflow/examples/speech_commands/wav_to_features.py \
- * --sample_rate=16000 \
- * --clip_duration_ms=1000 \
- * --window_size_ms=30 \
- * --window_stride_ms=20 \
- * --feature_bin_count=40 \
- * --quantize=1 \
- * --preprocess="average" \
- * --input_wav="speech_commands_test_set_v0.02/no/f9643d42_nohash_4.wav" \
- * --output_c_file="no_simple_features_data.cc" \
- */
-
-const int g_no_simple_f9643d42_nohash_4_width = 43;
-const int g_no_simple_f9643d42_nohash_4_height = 49;
-const unsigned char g_no_simple_f9643d42_nohash_4_data[] = {
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   5,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   67, 2,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   139, 2,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   195, 2,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   230, 2,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  255, 7,
-    6, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 255, 7,  16, 1,   1,   0,  2, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 255, 7,   22, 0,  1,   0,
-    1, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 238, 5,   20, 3, 4,   1,  1,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  144, 4,   19, 3, 5,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  42, 6,   3,
-    1, 3,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  3, 1,   5,  0,  1,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  5, 1,   3,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    1, 0,   1,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0, 0,   0,   0,  0, 0,   0,  0,  0,   0,   0,  0, 0,   0,   0,  0,  0,   0,
-    0,
-};
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/no_simple_features_data.h b/tensorflow/lite/micro/examples/micro_speech/simple_features/no_simple_features_data.h
deleted file mode 100644
index ff46134..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/no_simple_features_data.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_NO_SIMPLE_FEATURES_DATA_H_
-#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_NO_SIMPLE_FEATURES_DATA_H_
-
-extern const int g_no_simple_f9643d42_nohash_4_width;
-extern const int g_no_simple_f9643d42_nohash_4_height;
-extern const unsigned char g_no_simple_f9643d42_nohash_4_data[];
-
-#endif  // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_NO_SIMPLE_FEATURES_DATA_H_
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator.cc b/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator.cc
deleted file mode 100644
index 3733912..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator.cc
+++ /dev/null
@@ -1,149 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// Reference implementation of the preprocessing pipeline, with the same
-// results as the audio tutorial at
-// https://www.tensorflow.org/tutorials/sequences/audio_recognition
-// This module takes 30ms of PCM-encoded signed 16-bit audio samples (at 16KHz,
-// so 480 values), and extracts a power spectrum of frequencies. There are 43
-// frequency bands in the result, derived from the original 256 output from the
-// discrete Fourier transform, and averaged together in groups of 6.
-// It's expected that most platforms will have optimized versions of the
-// functions used here, for example replacing the DFT with an FFT, so this
-// version shouldn't be used where performance is critical.
-
-#include "tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator.h"
-
-#include <cmath>
-
-#include "tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.h"
-#include "tensorflow/lite/micro/micro_log.h"
-
-namespace {
-
-// Needed because some platforms don't have M_PI defined.
-constexpr float kPi = 3.14159265358979323846f;
-
-// Performs a discrete Fourier transform on the real inputs. This corresponds to
-// rdft() in the FFT package at http://www.kurims.kyoto-u.ac.jp/~ooura/fft.html,
-// and to kiss_fftr() in KISSFFT at https://github.com/mborgerding/kissfft.
-// It takes in an array of float real values, and returns a result of the same
-// length with float real and imaginary components interleaved, so
-// fourier_output[0] is the first real value, fourier_output[1] is the first
-// imaginary, fourier_output[2] is the second real, and so on.
-// The calling function should ensure that the array passed in as fourier_output
-// is at least time_series_size in length. Most optimized FFT implementations
-// require the length to be a power of two as well, but this version doesn't
-// enforce that.
-void CalculateDiscreteFourierTransform(float* time_series, int time_series_size,
-                                       float* fourier_output) {
-  for (int i = 0; i < time_series_size / 2; ++i) {
-    float real = 0;
-    for (int j = 0; j < time_series_size; ++j) {
-      real += time_series[j] * std::cos(j * i * kPi * 2 / time_series_size);
-    }
-    float imaginary = 0;
-    for (int j = 0; j < time_series_size; ++j) {
-      imaginary -=
-          time_series[j] * std::sin(j * i * kPi * 2 / time_series_size);
-    }
-    fourier_output[(i * 2) + 0] = real;
-    fourier_output[(i * 2) + 1] = imaginary;
-  }
-}
-
-// Produces a simple sine curve that is used to ensure frequencies at the center
-// of the current sample window are weighted more heavily than those at the end.
-void CalculatePeriodicHann(int window_length, float* window_function) {
-  for (int i = 0; i < window_length; ++i) {
-    window_function[i] = 0.5f - 0.5f * std::cos((2 * kPi * i) / window_length);
-  }
-}
-
-}  // namespace
-
-TfLiteStatus GenerateSimpleFeatures(const int16_t* input, int input_size,
-                                    int output_size, uint8_t* output) {
-  // Ensure our input and output data arrays are valid.
-  if (input_size > kMaxAudioSampleSize) {
-    MicroPrintf("Input size %d larger than %d", input_size,
-                kMaxAudioSampleSize);
-    return kTfLiteError;
-  }
-  if (output_size != kFeatureSliceSize) {
-    MicroPrintf("Requested output size %d doesn't match %d", output_size,
-                kFeatureSliceSize);
-    return kTfLiteError;
-  }
-
-  // Pre-calculate the window function we'll be applying to the input data.
-  // In a real application, we'd calculate this table once in an initialization
-  // function and store it for repeated reuse.
-  float window_function[kMaxAudioSampleSize];
-  CalculatePeriodicHann(input_size, window_function);
-
-  // Apply the window function to our time series input, and pad it with zeroes
-  // to the next power of two.
-  float float_input[kMaxAudioSampleSize];
-  for (int i = 0; i < kMaxAudioSampleSize; ++i) {
-    if (i < input_size) {
-      float_input[i] =
-          (input[i] * window_function[i]) / static_cast<float>(1 << 15);
-    } else {
-      float_input[i] = 0.0f;
-    }
-  }
-
-  // Pull the frequency data from the time series sample.
-  float fourier_values[kMaxAudioSampleSize];
-  CalculateDiscreteFourierTransform(float_input, kMaxAudioSampleSize,
-                                    fourier_values);
-
-  // We have the complex numbers giving us information about each frequency
-  // band, but all we want to know is how strong each frequency is, so calculate
-  // the squared magnitude by adding together the squares of each component.
-  float power_spectrum[kMaxAudioSampleSize / 2];
-  for (int i = 0; i < (kMaxAudioSampleSize / 2); ++i) {
-    const float real = fourier_values[(i * 2) + 0];
-    const float imaginary = fourier_values[(i * 2) + 1];
-    power_spectrum[i] = (real * real) + (imaginary * imaginary);
-  }
-
-  // Finally, reduce the size of the output by averaging together six adjacent
-  // frequencies into each slot, producing an array of 43 values.
-  for (int i = 0; i < kFeatureSliceSize; ++i) {
-    float total = 0.0f;
-    for (int j = 0; j < kAverageWindowSize; ++j) {
-      const int index = (i * kAverageWindowSize) + j;
-      if (index < (kMaxAudioSampleSize / 2)) {
-        total += power_spectrum[index];
-      }
-    }
-    const float average = total / kAverageWindowSize;
-    // Quantize the result into eight bits, effectively multiplying by two.
-    // The 127.5 constant here has to match the features_max value defined in
-    // tensorflow/examples/speech_commands/input_data.py, and this also assumes
-    // that features_min is zero. If it wasn't, we'd have to subtract it first.
-    int quantized_average = roundf(average * (255.0f / 127.5f));
-    if (quantized_average < 0) {
-      quantized_average = 0;
-    }
-    if (quantized_average > 255) {
-      quantized_average = 255;
-    }
-    output[i] = quantized_average;
-  }
-  return kTfLiteOk;
-}
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator.h b/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator.h
deleted file mode 100644
index 7beccea..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_SIMPLE_FEATURES_GENERATOR_H_
-#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_SIMPLE_FEATURES_GENERATOR_H_
-
-#include "tensorflow/lite/c/common.h"
-
-// Converts audio sample data into a more compact form that's appropriate for
-// feeding into a neural network. There are reference implementations that use
-// both floating point and fixed point available, but because the calculations
-// involved can be time-consuming, it's recommended that you use or write
-// specialized versions for your platform.
-TfLiteStatus GenerateSimpleFeatures(const int16_t* input, int input_size,
-                                    int output_size, uint8_t* output);
-
-#endif  // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_SIMPLE_FEATURES_GENERATOR_H_
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator_test.cc b/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator_test.cc
deleted file mode 100644
index f3babd1..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator_test.cc
+++ /dev/null
@@ -1,59 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator.h"
-
-#include "tensorflow/lite/c/common.h"
-#include "tensorflow/lite/micro/examples/micro_speech/simple_features/no_power_spectrum_data.h"
-#include "tensorflow/lite/micro/examples/micro_speech/simple_features/yes_power_spectrum_data.h"
-#include "tensorflow/lite/micro/examples/micro_speech/testdata/no_30ms_audio_data.h"
-#include "tensorflow/lite/micro/examples/micro_speech/testdata/yes_30ms_audio_data.h"
-#include "tensorflow/lite/micro/micro_log.h"
-#include "tensorflow/lite/micro/testing/micro_test.h"
-
-TF_LITE_MICRO_TESTS_BEGIN
-
-TF_LITE_MICRO_TEST(TestSimpleFeaturesGenerator) {
-  uint8_t yes_calculated_data[g_yes_power_spectrum_data_size];
-  TfLiteStatus yes_status = GenerateSimpleFeatures(
-      g_yes_30ms_audio_data, g_yes_30ms_audio_data_size,
-      g_yes_power_spectrum_data_size, yes_calculated_data);
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, yes_status);
-
-  for (int i = 0; i < g_yes_power_spectrum_data_size; ++i) {
-    TF_LITE_MICRO_EXPECT_EQ(g_yes_power_spectrum_data[i],
-                            yes_calculated_data[i]);
-    if (g_yes_power_spectrum_data[i] != yes_calculated_data[i]) {
-      MicroPrintf("Expected value %d but found %d",
-                  g_yes_power_spectrum_data[i], yes_calculated_data[i]);
-    }
-  }
-
-  uint8_t no_calculated_data[g_yes_power_spectrum_data_size];
-  TfLiteStatus no_status =
-      GenerateSimpleFeatures(g_no_30ms_audio_data, g_no_30ms_audio_data_size,
-                             g_no_power_spectrum_data_size, no_calculated_data);
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, no_status);
-
-  for (int i = 0; i < g_no_power_spectrum_data_size; ++i) {
-    TF_LITE_MICRO_EXPECT_EQ(g_no_power_spectrum_data[i], no_calculated_data[i]);
-    if (g_no_power_spectrum_data[i] != no_calculated_data[i]) {
-      MicroPrintf("Expected value %d but found %d", g_no_power_spectrum_data[i],
-                  no_calculated_data[i]);
-    }
-  }
-}
-
-TF_LITE_MICRO_TESTS_END
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.h b/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.h
deleted file mode 100644
index 9d129c8..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_SIMPLE_MODEL_SETTINGS_H_
-#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_SIMPLE_MODEL_SETTINGS_H_
-
-// Keeping these as constant expressions allow us to allocate fixed-sized arrays
-// on the stack for our working memory.
-
-// The size of the input time series data we pass to the FFT to produce the
-// frequency information. This has to be a power of two, and since we're dealing
-// with 30ms of 16KHz inputs, which means 480 samples, this is the next value.
-constexpr int kMaxAudioSampleSize = 512;
-constexpr int kAudioSampleFrequency = 16000;
-
-// All of these values are derived from the values used during model training,
-// if you change your model you'll need to update these constants.
-constexpr int kAverageWindowSize = 6;
-constexpr int kFeatureSliceSize =
-    ((kMaxAudioSampleSize / 2) + (kAverageWindowSize - 1)) / kAverageWindowSize;
-constexpr int kFeatureSliceCount = 49;
-constexpr int kFeatureElementCount = (kFeatureSliceSize * kFeatureSliceCount);
-constexpr int kFeatureSliceStrideMs = 20;
-constexpr int kFeatureSliceDurationMs = 30;
-
-constexpr int kCategoryCount = 4;
-constexpr int kSilenceIndex = 0;
-constexpr int kUnknownIndex = 1;
-extern const char* kCategoryLabels[kCategoryCount];
-
-#endif  // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_SIMPLE_MODEL_SETTINGS_H_
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/yes_power_spectrum_data.cc b/tensorflow/lite/micro/examples/micro_speech/simple_features/yes_power_spectrum_data.cc
deleted file mode 100644
index 96a7c9a..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/yes_power_spectrum_data.cc
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// See the header for documentation on the meaning of this data.
-
-#include "tensorflow/lite/micro/examples/micro_speech/simple_features/yes_power_spectrum_data.h"
-
-const uint8_t g_yes_power_spectrum_data[g_yes_power_spectrum_data_size] = {
-    8, 89, 8, 0, 0, 0, 0, 0, 0, 0, 0, 4, 13, 1, 6, 23, 20, 6, 4, 0, 0, 0,
-    0, 0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,  0, 0, 0,  0,  0, 0, 0, 0,
-};
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/yes_power_spectrum_data.h b/tensorflow/lite/micro/examples/micro_speech/simple_features/yes_power_spectrum_data.h
deleted file mode 100644
index 5264e62..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/yes_power_spectrum_data.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// This data was extracted from the larger feature data held in
-// no_features_data.cc and consists of the 26th spectrogram slice of 43 values.
-// This is the expected result of running the sample data in
-// yes_30ms_sample_data.cc through the preprocessing pipeline.
-
-#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_YES_POWER_SPECTRUM_DATA_H_
-#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_YES_POWER_SPECTRUM_DATA_H_
-
-#include <cstdint>
-
-constexpr int g_yes_power_spectrum_data_size = 43;
-extern const uint8_t g_yes_power_spectrum_data[];
-
-#endif  // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_YES_POWER_SPECTRUM_DATA_H_
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/yes_simple_features_data.cc b/tensorflow/lite/micro/examples/micro_speech/simple_features/yes_simple_features_data.cc
deleted file mode 100644
index 078f78d..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/yes_simple_features_data.cc
+++ /dev/null
@@ -1,158 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/lite/micro/examples/micro_speech/simple_features/yes_simple_features_data.h"
-
-/* File automatically created by
- * tensorflow/examples/speech_commands/wav_to_features.py \
- * --sample_rate=16000 \
- * --clip_duration_ms=1000 \
- * --window_size_ms=30 \
- * --window_stride_ms=20 \
- * --feature_bin_count=40 \
- * --quantize=1 \
- * --preprocess="average" \
- * --input_wav="speech_commands_test_set_v0.02/yes/f2e59fea_nohash_1.wav" \
- * --output_c_file="yes_simple_features_data.cc" \
- */
-
-const int g_yes_simple_f2e59fea_nohash_1_width = 43;
-const int g_yes_simple_f2e59fea_nohash_1_height = 49;
-const unsigned char g_yes_simple_f2e59fea_nohash_1_data[] = {
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  1,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  1,   1,  1,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  4,   5,   1,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  1,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   2,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    1,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   1,  19, 1,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   1,   0,  1,  3,   3,   1,  1,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   8,   89, 8,   0,   0,  0,  0,   0,   0,  0,  0,   4,  13,
-    1,  6,  23,  20,  6,   4,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  19, 177, 42, 1,
-    1,  0,  0,   0,   0,   2,  3,   119, 51, 5,  139, 92,  58, 58, 15,  2,  1,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   13, 165, 176, 3,  1,  1,   0,   0,  1,  1,   32, 214,
-    26, 19, 113, 103, 28,  22, 27,  3,   1,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  12,  55, 128,
-    27, 1,  1,   0,   1,   4,  2,   52,  93, 10, 28,  156, 10, 21, 21,  3,  3,
-    1,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  14,  99,  32, 65, 7,   1,   2,  2,  6,   13, 121,
-    36, 15, 11,  112, 125, 14, 5,   13,  4,  4,  2,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   24, 25,
-    32, 5,  1,   0,   0,   0,  1,   0,   7,  5,  1,   1,   3,  3,  0,   3,  3,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   13,  13, 5,  1,   0,   0,  0,  0,   0,  3,
-    4,  1,  0,   1,   2,   3,  1,   1,   1,  4,  8,   1,   2,  1,  3,   1,  1,
-    0,  1,  1,   3,   1,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  1,
-    8,  2,  1,   0,   0,   0,  0,   0,   1,  1,  0,   0,   1,  1,  2,   0,  2,
-    1,  0,  2,   0,   2,   2,  3,   1,   1,  0,  1,   1,   4,  5,  1,   0,  1,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  1,   1,   1,  0,  1,   2,   1,  0,  1,   3,  1,
-    1,  3,  1,   1,   6,   2,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  2,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  1,   1,   0,  1,  2,   6,   2,  4,  2,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  3,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  1,
-    0,  0,  1,   2,   1,   1,  2,   1,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  4,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   2,  1,  0,   0,   2,  3,  5,   2,  0,
-    1,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   1,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   1,   2,  2,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  1,  0,   0,   0,  0,  1,   2,  3,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   1,  1,   1,   1,  0,  0,   0,   1,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,  0,
-    0,  0,  0,   0,   0,   0,  0,   0,   0,  0,  0,   0,   0,  0,  0,   0,
-};
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/yes_simple_features_data.h b/tensorflow/lite/micro/examples/micro_speech/simple_features/yes_simple_features_data.h
deleted file mode 100644
index 98c7e42..0000000
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/yes_simple_features_data.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_YES_SIMPLE_FEATURES_DATA_H_
-#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_YES_SIMPLE_FEATURES_DATA_H_
-
-extern const int g_yes_simple_f2e59fea_nohash_1_width;
-extern const int g_yes_simple_f2e59fea_nohash_1_height;
-extern const unsigned char g_yes_simple_f2e59fea_nohash_1_data[];
-
-#endif  // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_YES_SIMPLE_FEATURES_DATA_H_
diff --git a/tensorflow/lite/micro/examples/micro_speech/testdata/noise_1000ms.wav b/tensorflow/lite/micro/examples/micro_speech/testdata/noise_1000ms.wav
new file mode 100644
index 0000000..8e0896a
--- /dev/null
+++ b/tensorflow/lite/micro/examples/micro_speech/testdata/noise_1000ms.wav
Binary files differ
diff --git a/tensorflow/lite/micro/examples/micro_speech/testdata/silence_1000ms.wav b/tensorflow/lite/micro/examples/micro_speech/testdata/silence_1000ms.wav
new file mode 100644
index 0000000..294dfc6
--- /dev/null
+++ b/tensorflow/lite/micro/examples/micro_speech/testdata/silence_1000ms.wav
Binary files differ
diff --git a/tensorflow/lite/micro/examples/micro_speech/train/README.md b/tensorflow/lite/micro/examples/micro_speech/train/README.md
index 18a6846..636f70c 100644
--- a/tensorflow/lite/micro/examples/micro_speech/train/README.md
+++ b/tensorflow/lite/micro/examples/micro_speech/train/README.md
@@ -1,7 +1,7 @@
 
 # Micro Speech Training
 
-This example shows how to train a 20 kB model that can recognize 2 keywords,
+This example shows how to train a less than 20 kB model that can recognize 2 keywords,
 "yes" and "no", from speech data.
 
 If the input does not belong to either categories, it is classified as "unknown"
@@ -89,19 +89,17 @@
 
 **Fully quantized implies that the model is **strictly int8** quantized
 **including** the input(s) and output(s).*
-<!-- **Fully quantized implies that the model is **strictly int8** except the
-input(s) and output(s) which remain float.* -->
 
 ## Model Architecture
 
-This is a simple model comprising of a Convolutional 2D layer, a Fully Connected
+This is a simple model comprised of a Convolutional 2D layer, a Fully Connected
 Layer or a MatMul Layer (output: logits) and a Softmax layer
-(output: probabilities) as shown below. Refer to the [`tiny_conv`](https://github.com/tensorflow/tflite-micro/blob/main/tensorflow/examples/speech_commands/models.py#L673)
+(output: probabilities) as shown below. Refer to the [`tiny_conv`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/speech_commands/models.py#L673)
 model architecture.
 
-![model_architecture.png](../images/model_architecture.png)
+[<img src="../images/micro_speech_quantized.png" width="900" alt="model architecture"/>](../images/micro_speech_quantized.png)
 
-*This image was derived from visualizing the 'model.tflite' file in
+*This image was derived from visualizing the 'models/micro_speech_quantized.tflite' file in
 [Netron](https://github.com/lutzroeder/netron)*
 
 This doesn't produce a highly accurate model, but it's designed to be used as
@@ -109,7 +107,7 @@
 can always be on, and then wake higher-power chips when a possible utterance has
 been found, so that more accurate analysis can be done. Additionally, the model
 takes in preprocessed speech input as a result of which we can leverage a
-simpler model for accurate results.
+simpler model for inference results.
 
 ## Dataset
 
@@ -124,49 +122,32 @@
 ## Preprocessing Speech Input
 
 In this section we discuss spectrograms, the preprocessed speech input to the
-model. Here's an illustration of the process:
-
-![spectrogram diagram](https://storage.googleapis.com/download.tensorflow.org/example_images/spectrogram_diagram.png)
+model.
 
 The model doesn't take in raw audio sample data, instead it works with
 spectrograms which are two dimensional arrays that are made up of slices of
 frequency information, each taken from a different time window.
 
 The recipe for creating the spectrogram data is that each frequency slice is
-created by running an FFT across a 30ms section of the audio sample data. The
+created by running an FFT across a 30ms window of the audio sample data. The
 input samples are treated as being between -1 and +1 as real values (encoded as
--32,768 and 32,767 in 16-bit signed integer samples).
+-32,768 and 32,767 in 16-bit signed integer samples).  The audio sampling window
+stride is 20ms, thus every window overlaps by 10ms.
 
-This results in an FFT with 256 entries. Every sequence of six entries is
-averaged together, giving a total of 43 frequency buckets in the final slice.
-The results are stored as unsigned eight-bit values, where 0 represents a real
-number of zero, and 255 represents 127.5 as a real number.
+This results in an FFT with 257 entries. Every sequence of approximately six entries is
+averaged together, giving a total of 40 frequency buckets in the slice.
+The results are further processed by down-scaling, noise reduction, automatic
+gain control, and a final down-scaling.
 
 Each adjacent frequency entry is stored in ascending memory order (frequency
 bucket 0 at data[0], bucket 1 at data[1], etc). The window for the frequency
 analysis is then moved forward by 20ms, and the process repeated, storing the
-results in the next memory row (for example bucket 0 in this moved window would
-be in data[43 + 0], etc). This process happens 49 times in total, producing a
-single channel image that is 43 pixels wide, and 49 rows high.
-
-In a complete application these spectrograms would be calculated at runtime from
-microphone inputs, but the code for doing that is not yet included in this
-sample code. The test uses spectrograms that have been pre-calculated from
-one-second WAV files in the test dataset generated by running the following
-commands:
-
-```
-python tensorflow/tensorflow/examples/speech_commands/wav_to_features.py \
---input_wav=/tmp/speech_dataset/yes/f2e59fea_nohash_1.wav \
---output_c_file=/tmp/yes_features_data.cc \
---window_stride=20 --preprocess=average --quantize=1
-
-python tensorflow/tensorflow/examples/speech_commands/wav_to_features.py \
---input_wav=/tmp/speech_dataset/no/f9643d42_nohash_4.wav \
---output_c_file=/tmp/no_features_data.cc \
---window_stride=20 --preprocess=average --quantize=1
-```
-
+results of the new frequency slice in the next memory row.
+The training is configured for raw audio samples of 1000ms in length.
+With a window size of 30ms and stride of 20ms, some 49 frequency slices can
+be created from 1000ms of audio data.
+Thus, the preprocessing produces a
+single channel image that is 40 pixels wide, and 49 rows high.
 
 ## Other Training Methods
 
diff --git a/tensorflow/lite/micro/examples/mnist_lstm/BUILD b/tensorflow/lite/micro/examples/mnist_lstm/BUILD
index 6df2eef..7d818b2 100644
--- a/tensorflow/lite/micro/examples/mnist_lstm/BUILD
+++ b/tensorflow/lite/micro/examples/mnist_lstm/BUILD
@@ -1,3 +1,4 @@
+load("@rules_python//python:defs.bzl", "py_binary", "py_test")
 load("@tflm_pip_deps//:requirements.bzl", "requirement")
 
 py_binary(
@@ -6,7 +7,7 @@
     srcs_version = "PY3",
     deps = [
         requirement("numpy"),
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
     ],
 )
 
@@ -17,6 +18,7 @@
     deps = [
         "//python/tflite_micro:runtime",
         "@absl_py//absl:app",
+        requirement("pillow"),
     ],
 )
 
diff --git a/tensorflow/lite/micro/examples/network_tester/network_tester_test.cc b/tensorflow/lite/micro/examples/network_tester/network_tester_test.cc
index e62e0c4..23e50c9 100644
--- a/tensorflow/lite/micro/examples/network_tester/network_tester_test.cc
+++ b/tensorflow/lite/micro/examples/network_tester/network_tester_test.cc
@@ -1,4 +1,4 @@
-/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -40,7 +40,7 @@
 #define NUM_INFERENCES 1
 #endif
 
-uint8_t tensor_arena[TENSOR_ARENA_SIZE];
+alignas(16) uint8_t tensor_arena[TENSOR_ARENA_SIZE];
 
 #ifdef NUM_BYTES_TO_PRINT
 inline void print_output_data(TfLiteTensor* output) {
@@ -92,15 +92,19 @@
         model->version(), TFLITE_SCHEMA_VERSION);
     return kTfLiteError;
   }
+#ifdef ETHOS_U
+  tflite::MicroMutableOpResolver<1> resolver;
+  resolver.AddEthosU();
 
-  tflite::MicroMutableOpResolver<6> resolver;
+#else
+  tflite::MicroMutableOpResolver<5> resolver;
   resolver.AddAveragePool2D(tflite::Register_AVERAGE_POOL_2D_INT8());
   resolver.AddConv2D(tflite::Register_CONV_2D_INT8());
   resolver.AddDepthwiseConv2D(tflite::Register_DEPTHWISE_CONV_2D_INT8());
-  resolver.AddEthosU();
   resolver.AddReshape();
   resolver.AddSoftmax(tflite::Register_SOFTMAX_INT8());
 
+#endif
   tflite::MicroInterpreter interpreter(model, resolver, tensor_arena,
                                        TENSOR_ARENA_SIZE);
 
@@ -152,7 +156,8 @@
     }
 #endif
   }
-  MicroPrintf("Ran successfully\n");
+
+  MicroPrintf("~~~ALL TESTS PASSED~~~\n");
 }
 
 TF_LITE_MICRO_TESTS_END
diff --git a/tensorflow/lite/micro/examples/person_detection/README.md b/tensorflow/lite/micro/examples/person_detection/README.md
index a490629..2b33334 100644
--- a/tensorflow/lite/micro/examples/person_detection/README.md
+++ b/tensorflow/lite/micro/examples/person_detection/README.md
@@ -7,6 +7,7 @@
 
 -   [Run the tests on a development machine](#run-the-tests-on-a-development-machine)
 -   [Training your own model](#training-your-own-model)
+-   [Additional makefile targets](#additional-makefile-targets)
 
 
 ## Run the tests on a development machine
@@ -26,6 +27,19 @@
 To understand how TensorFlow Lite does this, you can look at
 [person_detection_test.cc](person_detection_test.cc).
 
+## Additional makefile targets
+```
+make -f tensorflow/lite/micro/tools/make/Makefile person_detection
+make -f tensorflow/lite/micro/tools/make/Makefile person_detection_bin
+make -f tensorflow/lite/micro/tools/make/Makefile run_person_detection
+```
+
+The `run_person_detection` target will produce continuous output similar
+to the following:
+```
+person score:-72 no person score 72
+```
+
 ## Training your own model
 
 You can train your own model with some easy-to-use scripts. See
diff --git a/tensorflow/lite/micro/examples/person_detection/utils/BUILD b/tensorflow/lite/micro/examples/person_detection/utils/BUILD
index 980d803..7f5c815 100644
--- a/tensorflow/lite/micro/examples/person_detection/utils/BUILD
+++ b/tensorflow/lite/micro/examples/person_detection/utils/BUILD
@@ -1,4 +1,5 @@
 load("@tflm_pip_deps//:requirements.bzl", "requirement")
+load("@rules_python//python:defs.bzl", "py_binary", "py_library", "py_test")
 
 package(
     features = ["-layering_check"],
@@ -36,7 +37,7 @@
     ],
     deps = [
         ":raw_to_bitmap_lib",
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
         requirement("numpy"),
     ],
 )
diff --git a/tensorflow/lite/micro/examples/recipes/BUILD b/tensorflow/lite/micro/examples/recipes/BUILD
index 475e552..3572fdb 100644
--- a/tensorflow/lite/micro/examples/recipes/BUILD
+++ b/tensorflow/lite/micro/examples/recipes/BUILD
@@ -1,3 +1,4 @@
+load("@rules_python//python:defs.bzl", "py_library", "py_test")
 load("@tflm_pip_deps//:requirements.bzl", "requirement")
 
 package(
@@ -5,13 +6,24 @@
 )
 
 py_library(
+    name = "add_four_numbers",
+    srcs = ["add_four_numbers.py"],
+    srcs_version = "PY3",
+    visibility = ["//:__subpackages__"],
+    deps = [
+        requirement("numpy"),
+        requirement("tensorflow"),
+    ],
+)
+
+py_library(
     name = "resource_variables_lib",
     srcs = ["resource_variables_lib.py"],
     srcs_version = "PY3",
     visibility = ["//:__subpackages__"],
     deps = [
         requirement("numpy"),
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
     ],
 )
 
@@ -26,8 +38,6 @@
     ],
     deps = [
         ":resource_variables_lib",
-        # TODO(b/286456378): update tflm_runtime to runtime when we are ready to
-        # remove the alias.
-        "//tensorflow/lite/micro/python/interpreter/src:tflm_runtime",
+        "//python/tflite_micro:runtime",
     ],
 )
diff --git a/tensorflow/lite/micro/examples/recipes/add_four_numbers.py b/tensorflow/lite/micro/examples/recipes/add_four_numbers.py
new file mode 100644
index 0000000..f564141
--- /dev/null
+++ b/tensorflow/lite/micro/examples/recipes/add_four_numbers.py
@@ -0,0 +1,62 @@
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =============================================================================
+"""Simple TF model creation using resource variables."""
+
+import numpy as np
+import tensorflow as tf
+
+
+"""
+Generates a simple TfLite model that adds 4 numbers.
+
+Basic Usage:
+
+  model = generate_model(False)
+
+Usage where you want model written to file:
+
+  file_path = "some file path"
+  model = generate_model(True, file_path)
+"""
+
+class AddFourNumbers(tf.Module):
+  @tf.function(
+      input_signature=[
+          tf.TensorSpec(shape=[1], dtype=tf.float32, name="a"),
+          tf.TensorSpec(shape=[1], dtype=tf.float32, name="b"),
+          tf.TensorSpec(shape=[1], dtype=tf.float32, name="c"),
+          tf.TensorSpec(shape=[1], dtype=tf.float32, name="d"),
+      ]
+  )
+  def __call__(self, a, b, c, d):
+    return a + b + c + d
+
+
+def get_model_from_concrete_function():
+  """Accumulator model built via TF concrete functions."""
+  model = AddFourNumbers("AddFourNumbers")
+  concrete_func = model.__call__.get_concrete_function()
+  converter = tf.lite.TFLiteConverter.from_concrete_functions(
+      [concrete_func], model
+  )
+  return converter.convert()
+
+
+def generate_model(write_file=True, filename="/tmp/add.tflite"):
+  model = get_model_from_concrete_function()
+  if write_file:
+    with open(filename, "wb") as f:
+      f.write(model)
+  return model
diff --git a/tensorflow/lite/micro/examples/recipes/resource_variables_test.py b/tensorflow/lite/micro/examples/recipes/resource_variables_test.py
index ad8c79e..686a650 100644
--- a/tensorflow/lite/micro/examples/recipes/resource_variables_test.py
+++ b/tensorflow/lite/micro/examples/recipes/resource_variables_test.py
@@ -18,9 +18,7 @@
 from tensorflow.python.platform import test
 from tflite_micro.tensorflow.lite.micro.examples.recipes import resource_variables_lib
 
-# TODO(b/286456378): change tflm_runtime to runtime when we all other usage has
-# been updated.
-from tflite_micro.tensorflow.lite.micro.python.interpreter.src import tflm_runtime
+from tflite_micro.python.tflite_micro import runtime as tflm_runtime
 
 
 class ResourceVariablesTest(test_util.TensorFlowTestCase):
diff --git a/tensorflow/lite/micro/fake_micro_context.cc b/tensorflow/lite/micro/fake_micro_context.cc
index 03ea6df..8874798 100644
--- a/tensorflow/lite/micro/fake_micro_context.cc
+++ b/tensorflow/lite/micro/fake_micro_context.cc
@@ -1,4 +1,4 @@
-/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -15,28 +15,31 @@
 
 #include "tensorflow/lite/micro/fake_micro_context.h"
 
+#include "tensorflow/lite/c/c_api_types.h"
 #include "tensorflow/lite/kernels/internal/compatibility.h"
 #include "tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h"
-#include "tensorflow/lite/micro/micro_allocator.h"
 #include "tensorflow/lite/micro/micro_arena_constants.h"
 #include "tensorflow/lite/micro/micro_log.h"
 
 namespace tflite {
-namespace {
-// Dummy static variables to allow creation of dummy MicroAllocator.
-// All tests are guarateed to run serially.
-static constexpr int KDummyTensorArenaSize = 256;
-static uint8_t dummy_tensor_arena[KDummyTensorArenaSize];
-}  // namespace
 
-FakeMicroContext::FakeMicroContext(TfLiteTensor* tensors,
-                                   SingleArenaBufferAllocator* allocator,
-                                   MicroGraph* micro_graph)
-    : MicroContext(
-          MicroAllocator::Create(dummy_tensor_arena, KDummyTensorArenaSize),
-          nullptr, micro_graph),
+FakeMicroContext::FakeMicroContext(
+    TfLiteTensor* tensors, SingleArenaBufferAllocator* allocator,
+    MicroGraph* micro_graph
+#ifdef USE_TFLM_COMPRESSION
+    ,
+    const CompressedTensorList* compressed_tensors
+#endif  // USE_TFLM_COMPRESSION
+    )
+    : graph_(*micro_graph),
       tensors_(tensors),
-      allocator_(allocator) {}
+      allocator_(allocator)
+#ifdef USE_TFLM_COMPRESSION
+      ,
+      compressed_tensors_(compressed_tensors)
+#endif  // USE_TFLM_COMPRESSION
+{
+}
 
 TfLiteTensor* FakeMicroContext::AllocateTempTfLiteTensor(int tensor_index) {
   allocated_temp_count_++;
@@ -113,4 +116,69 @@
   return scratch_buffers_[buffer_index];
 }
 
+TfLiteStatus FakeMicroContext::set_external_context(
+    void* external_context_payload) {
+  return kTfLiteError;
+}
+
+void* FakeMicroContext::external_context() { return nullptr; }
+
+MicroGraph& FakeMicroContext::graph() { return graph_; }
+
+#ifdef USE_TFLM_COMPRESSION
+
+// Available during Prepare & Eval. Returns false if tensor is not
+// compressed.
+bool FakeMicroContext::IsTensorCompressed(const TfLiteNode* node,
+                                          int tensor_idx) {
+  if (compressed_tensors_ != nullptr && tensor_idx < node->inputs->size) {
+    int index = node->inputs->data[tensor_idx];
+    if (index >= 0 && compressed_tensors_->tensors[index] != nullptr) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+// Only available during Prepare. The kernel is responsible for storing the
+// scratch buffer handle.
+int FakeMicroContext::AllocateDecompressionScratchBuffer(const TfLiteNode* node,
+                                                         int tensor_idx) {
+  if (compressed_tensors_ == nullptr || tensor_idx >= node->inputs->size) {
+    return -1;
+  }
+  int index = node->inputs->data[tensor_idx];
+  if (index < 0 || compressed_tensors_->tensors[index] == nullptr) {
+    return -1;
+  }
+  TfLiteTensor* tensor = &tensors_[index];
+  int scratch_index = -1;
+  TfLiteStatus result =
+      RequestScratchBufferInArena(tensor->bytes, &scratch_index);
+  if (result != kTfLiteOk) {
+    return -1;
+  }
+
+  return scratch_index;
+}
+
+// Available during Prepare & Eval. Returns nullptr if tensor is not
+// compressed.
+const CompressionTensorData* FakeMicroContext::GetTensorCompressionData(
+    const TfLiteNode* node, int tensor_idx) {
+  if (compressed_tensors_ == nullptr || tensor_idx >= node->inputs->size) {
+    return nullptr;
+  }
+
+  int index = node->inputs->data[tensor_idx];
+  if (index < 0) {
+    return nullptr;
+  }
+
+  return compressed_tensors_->tensors[index];
+}
+
+#endif  // USE_TFLM_COMPRESSION
+
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/fake_micro_context.h b/tensorflow/lite/micro/fake_micro_context.h
index b068f32..7cf9c68 100644
--- a/tensorflow/lite/micro/fake_micro_context.h
+++ b/tensorflow/lite/micro/fake_micro_context.h
@@ -1,4 +1,4 @@
-/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -27,8 +27,15 @@
 
 class FakeMicroContext : public MicroContext {
  public:
+  ~FakeMicroContext() = default;
+
   FakeMicroContext(TfLiteTensor* tensors, SingleArenaBufferAllocator* allocator,
-                   MicroGraph* micro_graph);
+                   MicroGraph* micro_graph
+#ifdef USE_TFLM_COMPRESSION
+                   ,
+                   const CompressedTensorList* compressed_tensors = nullptr
+#endif  // USE_TFLM_COMPRESSION
+  );
 
   void* AllocatePersistentBuffer(size_t bytes) override;
   TfLiteStatus RequestScratchBufferInArena(size_t bytes,
@@ -44,9 +51,32 @@
 
   TfLiteEvalTensor* GetEvalTensor(int tensor_index) override;
 
+  TfLiteStatus set_external_context(void* external_context_payload) override;
+  void* external_context() override;
+  MicroGraph& graph() override;
+
+#ifdef USE_TFLM_COMPRESSION
+
+  // Available during Prepare & Eval. Returns false if tensor is not
+  // compressed.
+  bool IsTensorCompressed(const TfLiteNode* node, int tensor_idx) override;
+
+  // Only available during Prepare. The kernel is responsible for storing the
+  // scratch buffer handle.
+  int AllocateDecompressionScratchBuffer(const TfLiteNode* node,
+                                         int tensor_idx) override;
+
+  // Available during Prepare & Eval. Returns nullptr if tensor is not
+  // compressed.
+  const CompressionTensorData* GetTensorCompressionData(
+      const TfLiteNode* node, int tensor_idx) override;
+
+#endif  // USE_TFLM_COMPRESSION
+
  private:
   static constexpr int kNumScratchBuffers_ = 12;
 
+  MicroGraph& graph_;
   int scratch_buffer_count_ = 0;
   uint8_t* scratch_buffers_[kNumScratchBuffers_];
 
@@ -55,6 +85,15 @@
 
   SingleArenaBufferAllocator* allocator_;
 
+#ifdef USE_TFLM_COMPRESSION
+
+  //
+  // Compression
+  //
+  const CompressedTensorList* compressed_tensors_;
+
+#endif  // USE_TFLM_COMPRESSION
+
   TF_LITE_REMOVE_VIRTUAL_DELETE
 };
 
diff --git a/tensorflow/lite/micro/fake_micro_context_test.cc b/tensorflow/lite/micro/fake_micro_context_test.cc
index 264b7e7..e792238 100644
--- a/tensorflow/lite/micro/fake_micro_context_test.cc
+++ b/tensorflow/lite/micro/fake_micro_context_test.cc
@@ -18,6 +18,7 @@
 
 #include "tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h"
 #include "tensorflow/lite/micro/micro_allocator.h"
+#include "tensorflow/lite/micro/mock_micro_graph.h"
 #include "tensorflow/lite/micro/test_helpers.h"
 #include "tensorflow/lite/micro/testing/micro_test.h"
 
@@ -58,7 +59,7 @@
   uint8_t arena_buffer[kArenaSize];
   tflite::SingleArenaBufferAllocator simple_memory_allocator(arena_buffer,
                                                              kArenaSize);
-  tflite::MicroGraph dummy_micro_graph(nullptr, nullptr, nullptr, nullptr);
+  tflite::MockMicroGraph dummy_micro_graph{&simple_memory_allocator};
 
   tflite::FakeMicroContext micro_context = tflite::CreateFakeMicroContext(
       &simple_memory_allocator, &dummy_micro_graph);
@@ -71,7 +72,7 @@
   uint8_t arena_buffer[kArenaSize];
   tflite::SingleArenaBufferAllocator simple_memory_allocator(arena_buffer,
                                                              kArenaSize);
-  tflite::MicroGraph dummy_micro_graph(nullptr, nullptr, nullptr, nullptr);
+  tflite::MockMicroGraph dummy_micro_graph{&simple_memory_allocator};
 
   tflite::FakeMicroContext micro_context = tflite::CreateFakeMicroContext(
       &simple_memory_allocator, &dummy_micro_graph);
diff --git a/tensorflow/lite/micro/integration_tests/BUILD b/tensorflow/lite/micro/integration_tests/BUILD
index 1e96ba8..5b90e5d 100644
--- a/tensorflow/lite/micro/integration_tests/BUILD
+++ b/tensorflow/lite/micro/integration_tests/BUILD
@@ -1,3 +1,4 @@
+load("@rules_python//python:defs.bzl", "py_binary")
 load("@tflm_pip_deps//:requirements.bzl", "requirement")
 
 package(licenses = ["notice"])
@@ -18,7 +19,7 @@
         "@absl_py//absl:app",
         "@absl_py//absl/flags",
         requirement("mako"),
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
         "//tensorflow/lite/micro/tools:generate_test_for_model",
         "//tensorflow/lite/python:schema_py",
         "//tensorflow/lite/python:schema_util",
diff --git a/tensorflow/lite/micro/kernels/BUILD b/tensorflow/lite/micro/kernels/BUILD
index 065c8f5..22254d9 100644
--- a/tensorflow/lite/micro/kernels/BUILD
+++ b/tensorflow/lite/micro/kernels/BUILD
@@ -1,8 +1,10 @@
+load("@bazel_skylib//rules:common_settings.bzl", "string_flag")
 load("//tensorflow/lite/micro:build_def.bzl", "micro_copts", "tflm_kernel_cc_library")
 load(
     "//tensorflow:extra_rules.bzl",
     "tflm_kernel_friends",
     "xtensa_fusion_f1_config",
+    "xtensa_hifi_3_config",
     "xtensa_hifi_3z_config",
     "xtensa_hifi_5_config",
     "xtensa_vision_p6_config",
@@ -44,6 +46,7 @@
 cc_library(
     name = "activation_utils",
     hdrs = ["activation_utils.h"],
+    copts = micro_copts(),
     deps = [
         "//tensorflow/lite/c:common",
         "//tensorflow/lite/kernels/internal:cppmath",
@@ -58,6 +61,7 @@
     hdrs = [
         "circular_buffer_flexbuffers_generated_data.h",
     ],
+    copts = micro_copts(),
 )
 
 cc_library(
@@ -71,6 +75,7 @@
     visibility = [
         "//visibility:public",
     ],
+    copts = micro_copts(),
     deps = [
         ":kernel_runner",
         ":micro_ops",
@@ -88,6 +93,7 @@
     hdrs = [
         "detection_postprocess_flexbuffers_generated_data.h",
     ],
+    copts = micro_copts(),
 )
 
 cc_library(
@@ -96,6 +102,7 @@
         "kernel_runner.cc",
     ],
     hdrs = ["kernel_runner.h"],
+    copts = micro_copts(),
     visibility = [
         "//visibility:public",
     ],
@@ -116,6 +123,7 @@
         "kernel_util.cc",
     ],
     hdrs = ["kernel_util.h"],
+    copts = micro_copts(),
     visibility = [
         ":kernel_friends",
         ":tflite_micro",
@@ -135,6 +143,7 @@
     hdrs = [
         "lstm_shared.h",
     ],
+    copts = micro_copts(),
     visibility = ["//tensorflow/lite/micro/kernels/testdata:__pkg__"],
 )
 
@@ -143,6 +152,7 @@
     hdrs = [
         "lstm_eval_test.h",
     ],
+    copts = micro_copts(),
     deps = [
         ":kernel_util",
         ":micro_ops",
@@ -158,6 +168,7 @@
         "micro_tensor_utils.cc",
     ],
     hdrs = ["micro_tensor_utils.h"],
+    copts = micro_copts(),
     deps = [
         "//tensorflow/lite/c:common",
         "//tensorflow/lite/core:macros",
@@ -169,6 +180,11 @@
     ],
 )
 
+HIFI3_COPTS = [
+    "-DXTENSA=1",
+    "-DHIFI3=1",
+]
+
 HIFI4_COPTS = [
     "-DXTENSA=1",
     "-DHIFI4=1",
@@ -197,6 +213,7 @@
         "add_n.cc",
         "arg_min_max.cc",
         "assign_variable.cc",
+        "batch_matmul.cc",
         "batch_to_space_nd.cc",
         "broadcast_args.cc",
         "broadcast_to.cc",
@@ -278,6 +295,7 @@
         "squared_difference.cc",
         "squeeze.cc",
         "strided_slice.cc",
+        "strided_slice_common.cc",
         "sub.cc",
         "sub_common.cc",
         "svdf.cc",
@@ -315,10 +333,13 @@
         "reduce.h",
         "reshape.h",
         "softmax.h",
+        "strided_slice.h",
         "sub.h",
         "svdf.h",
+        "transpose_conv.h",
     ] + select({
         xtensa_fusion_f1_config(): glob(["xtensa/**/*.h"]),
+        xtensa_hifi_3_config(): glob(["xtensa/**/*.h"]),
         xtensa_hifi_3z_config(): glob(["xtensa/**/*.h"]),
         xtensa_hifi_5_config(): glob(["xtensa/**/*.h"]),
         xtensa_vision_p6_config(): glob(["xtensa/**/*.h"]),
@@ -327,6 +348,7 @@
     }),
     accelerated_srcs = {
         xtensa_fusion_f1_config(): glob(["xtensa/**/*.cc"]),
+        xtensa_hifi_3_config(): glob(["xtensa/**/*.cc"]),
         xtensa_hifi_3z_config(): glob(["xtensa/**/*.cc"]),
         xtensa_hifi_5_config(): glob(["xtensa/**/*.cc"]),
         xtensa_vision_p6_config(): glob(["xtensa/**/*.cc"]),
@@ -334,6 +356,7 @@
     },
     copts = micro_copts() + select({
         xtensa_fusion_f1_config(): HIFI4_COPTS,
+        xtensa_hifi_3_config(): HIFI3_COPTS,
         xtensa_hifi_3z_config(): HIFI4_COPTS,
         xtensa_hifi_5_config(): HIFI5_COPTS,
         xtensa_vision_p6_config(): VP6_COPTS,
@@ -370,6 +393,7 @@
         "@flatbuffers//:runtime_cc",
     ] + select({
         xtensa_fusion_f1_config(): ["//third_party/xtensa/nnlib_hifi4:nnlib_hifi4_lib"],
+        xtensa_hifi_3_config(): ["//third_party/xtensa/nnlib_hifi4:nnlib_hifi4_lib"],
         xtensa_hifi_3z_config(): ["//third_party/xtensa/nnlib_hifi4:nnlib_hifi4_lib"],
         xtensa_hifi_5_config(): ["//third_party/xtensa/nnlib_hifi5:nnlib_hifi5_lib"],
         xtensa_vision_p6_config(): ["//third_party/xtensa/xi_tflmlib_vision_p6:xi_tflmlib_vision_p6_lib"],
@@ -440,6 +464,20 @@
 )
 
 cc_test(
+    name = "batch_matmul_test",
+    srcs = [
+        "batch_matmul_test.cc",
+    ],
+    deps = [
+        ":kernel_runner",
+        "//tensorflow/lite/c:common",
+        "//tensorflow/lite/micro:op_resolvers",
+        "//tensorflow/lite/micro:test_helpers",
+        "//tensorflow/lite/micro/testing:micro_test",
+    ],
+)
+
+cc_test(
     name = "batch_to_space_nd_test",
     srcs = [
         "batch_to_space_nd_test.cc",
@@ -1442,31 +1480,55 @@
 # Bazel config settings.
 ####################################
 
+# Command line flag to select which set of optimized kernels to use.
+# Each value should have a `config_setting` which is selected on in the
+# `micro_ops` target to pickup optimized kernel sources. An empty value
+# indicates only reference kernels should be used.
+string_flag(
+    name = "optimized_kernels",
+    build_setting_default = "",
+    values = [
+        "",
+        "xtensa_fusion_f1",
+        "xtensa_hifi_3",
+        "xtensa_hifi_3z",
+        "xtensa_hifi_5",
+        "xtensa_vision_p6",
+    ],
+)
+
 config_setting(
     name = "xtensa_fusion_f1_default",
-    values = {
-        "cpu": "F1_190305_swupgrade",
+    flag_values = {
+        ":optimized_kernels": "xtensa_fusion_f1",
+    },
+)
+
+config_setting(
+    name = "xtensa_hifi_3_default",
+    flag_values = {
+        ":optimized_kernels": "xtensa_hifi_3",
     },
 )
 
 config_setting(
     name = "xtensa_hifi_3z_default",
-    values = {
-        "cpu": "HIFI_190304_swupgrade",
+    flag_values = {
+        ":optimized_kernels": "xtensa_hifi_3z",
     },
 )
 
 config_setting(
     name = "xtensa_hifi_5_default",
-    values = {
-        "cpu": "AE_HiFi5_LE5_AO_FP_XC",
+    flag_values = {
+        ":optimized_kernels": "xtensa_hifi_5",
     },
 )
 
 config_setting(
     name = "xtensa_vision_p6_default",
-    values = {
-        "cpu": "P6_200528",
+    flag_values = {
+        ":optimized_kernels": "xtensa_vision_p6",
     },
 )
 
diff --git a/tensorflow/lite/micro/kernels/Makefile.inc b/tensorflow/lite/micro/kernels/Makefile.inc
index 384fcb3..0bd846b 100644
--- a/tensorflow/lite/micro/kernels/Makefile.inc
+++ b/tensorflow/lite/micro/kernels/Makefile.inc
@@ -1,4 +1,4 @@
-# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -92,6 +92,11 @@
   $(TENSORFLOW_ROOT)signal/micro/kernels/overlap_add_flexbuffers_generated_data.cc, \
   $(TENSORFLOW_ROOT)signal/micro/kernels/overlap_add_flexbuffers_generated_data.h))
 
+$(eval $(call microlite_test,kernel_signal_pcan_test,\
+  $(TENSORFLOW_ROOT)signal/micro/kernels/pcan_test.cc \
+  $(TENSORFLOW_ROOT)signal/micro/kernels/pcan_flexbuffers_generated_data.cc, \
+  $(TENSORFLOW_ROOT)signal/micro/kernels/pcan_flexbuffers_generated_data.h))
+
 $(eval $(call microlite_test,kernel_signal_stacker_test,\
   $(TENSORFLOW_ROOT)signal/micro/kernels/stacker_test.cc \
   $(TENSORFLOW_ROOT)signal/micro/kernels/stacker_flexbuffers_generated_data.cc, \
@@ -109,6 +114,7 @@
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/add_test.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/add_n_test.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/arg_min_max_test.cc \
+$(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/batch_matmul_test.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/batch_to_space_nd_test.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/broadcast_args_test.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/broadcast_to_test.cc \
diff --git a/tensorflow/lite/micro/kernels/add.cc b/tensorflow/lite/micro/kernels/add.cc
index b27206c..fde047a 100644
--- a/tensorflow/lite/micro/kernels/add.cc
+++ b/tensorflow/lite/micro/kernels/add.cc
@@ -38,7 +38,7 @@
                      const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) {
   switch (output->type) {
     case kTfLiteFloat32: {
-      tflite::ArithmeticParams op_params;
+      tflite::ArithmeticParams op_params = {};
       SetActivationParams(data->output_activation_min_f32,
                           data->output_activation_max_f32, &op_params);
       if (data->requires_broadcast) {
@@ -59,7 +59,7 @@
       }
     } break;
     case kTfLiteInt32: {
-      tflite::ArithmeticParams op_params;
+      tflite::ArithmeticParams op_params = {};
       SetActivationParams(std::numeric_limits<int32_t>::lowest(),
                           std::numeric_limits<int32_t>::max(), &op_params);
       if (data->requires_broadcast) {
@@ -93,7 +93,7 @@
                               const TfLiteEvalTensor* input1,
                               const TfLiteEvalTensor* input2,
                               TfLiteEvalTensor* output) {
-  tflite::ArithmeticParams op_params;
+  tflite::ArithmeticParams op_params = {};
   op_params.left_shift = data->left_shift;
   op_params.input1_offset = data->input1_offset;
   op_params.input1_multiplier = data->input1_multiplier;
diff --git a/tensorflow/lite/micro/kernels/batch_matmul.cc b/tensorflow/lite/micro/kernels/batch_matmul.cc
new file mode 100644
index 0000000..bd621f4
--- /dev/null
+++ b/tensorflow/lite/micro/kernels/batch_matmul.cc
@@ -0,0 +1,558 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/lite/kernels/internal/reference/batch_matmul.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <limits>
+
+#include "tensorflow/lite/core/c/common.h"
+#include "tensorflow/lite/kernels/internal/quantization_util.h"
+#include "tensorflow/lite/kernels/internal/reference/transpose.h"
+#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
+#include "tensorflow/lite/kernels/internal/types.h"
+#include "tensorflow/lite/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/micro_log.h"
+
+namespace tflite {
+namespace {
+
+constexpr int kInputLhsTensor = 0;
+constexpr int kInputRhsTensor = 1;
+constexpr int kOutputTensor = 0;
+
+struct QuantizationOpData {
+  // The scaling factor from input to output (aka the 'real multiplier') can
+  // be represented as a fixed point multiplier plus a left shift.
+  int32_t output_multiplier;
+  int output_shift;  // exponent
+
+  // The range of the fused activation layer. For example for kNone and
+  // int8_t these would be -128 and 127.
+  int32_t output_activation_min;
+  int32_t output_activation_max;
+
+  int32_t lhs_zero_point;
+  int32_t rhs_zero_point;
+  int32_t output_zero_point;
+};
+
+struct OpData {
+  QuantizationOpData* quantization;
+
+  // Transpose tensors and state
+  TfLiteEvalTensor* lhs_transposed_tensor;
+  TfLiteEvalTensor* rhs_transposed_tensor;
+  bool rhs_is_transposed;
+  bool lhs_is_constant_tensor;
+  bool rhs_is_constant_tensor;
+};
+
+struct OpContext {
+  OpContext(TfLiteContext* context, TfLiteNode* node)
+      : params(static_cast<TfLiteBatchMatMulParams*>(node->builtin_data)),
+        op_data(static_cast<OpData*>(node->user_data)) {}
+
+  TfLiteBatchMatMulParams* params;
+  OpData* op_data;
+};
+
+struct PrepareOpContext : OpContext {
+  PrepareOpContext(TfLiteContext* context, TfLiteNode* node)
+      : OpContext(context, node),
+        micro_context_(GetMicroContext(context)),
+        lhs(micro_context_->AllocateTempInputTensor(node, kInputLhsTensor)),
+        rhs(micro_context_->AllocateTempInputTensor(node, kInputRhsTensor)),
+        output(micro_context_->AllocateTempOutputTensor(node, kOutputTensor)) {}
+
+  ~PrepareOpContext() {
+    if (lhs != nullptr) {
+      micro_context_->DeallocateTempTfLiteTensor(lhs);
+    }
+    if (rhs != nullptr) {
+      micro_context_->DeallocateTempTfLiteTensor(rhs);
+    }
+    if (output != nullptr) {
+      micro_context_->DeallocateTempTfLiteTensor(output);
+    }
+  }
+
+ private:
+  MicroContext* micro_context_;
+
+ public:
+  TfLiteTensor* lhs;
+  TfLiteTensor* rhs;
+  TfLiteTensor* output;
+};
+
+struct EvalOpContext : OpContext {
+  EvalOpContext(TfLiteContext* context, TfLiteNode* node)
+      : OpContext(context, node),
+        lhs(tflite::micro::GetEvalInput(context, node, kInputLhsTensor)),
+        rhs(tflite::micro::GetEvalInput(context, node, kInputRhsTensor)),
+        output(tflite::micro::GetEvalOutput(context, node, kOutputTensor)) {}
+
+  const TfLiteEvalTensor* lhs;
+  const TfLiteEvalTensor* rhs;
+  TfLiteEvalTensor* output;
+};
+
+TfLiteStatus ReshapeOutputTensor(TfLiteContext* context, TfLiteNode* node,
+                                 const RuntimeShape& extended_lhs_shape,
+                                 const RuntimeShape& extended_rhs_shape,
+                                 bool adj_x, bool adj_y, int output_rank,
+                                 TfLiteTensor* output) {
+  int64_t orig_size = NumElements(output);
+
+  // make sure the new output dims rank does not exceed the original rank
+  TF_LITE_ENSURE(context, output_rank <= NumDimensions(output));
+
+  // make sure output tensor dims are not in the FlatBuffer
+  TfLiteEvalTensor* output_eval =
+      tflite::micro::GetEvalOutput(context, node, kOutputTensor);
+  TF_LITE_ENSURE_OK(context, tflite::micro::CreateWritableTensorDimsWithCopy(
+                                 context, output, output_eval));
+
+  // Fill in any broadcast dimensions.
+  for (int i = 0; i < output_rank - 2; ++i) {
+    const int lhs_dim = extended_lhs_shape.Dims(i);
+    const int rhs_dim = extended_rhs_shape.Dims(i);
+    int broadcast_dim = lhs_dim;
+    if ((lhs_dim != rhs_dim) && (lhs_dim == 1)) {
+      broadcast_dim = rhs_dim;
+    }
+    output->dims->data[i] = broadcast_dim;
+  }
+  // Fill in the matmul dimensions.
+  int lhs_rows_index = adj_x ? output_rank - 1 : output_rank - 2;
+  int rhs_cols_index = adj_y ? output_rank - 2 : output_rank - 1;
+
+  output->dims->data[output_rank - 2] = extended_lhs_shape.Dims(lhs_rows_index);
+  output->dims->data[output_rank - 1] = extended_rhs_shape.Dims(rhs_cols_index);
+  output->dims->size = output_rank;
+
+  // Check that output tensor has not been resized
+  // since TFLM doesn't support tensor resizing.
+  TF_LITE_ENSURE_EQ(context, orig_size, NumElements(output));
+
+  return kTfLiteOk;
+}
+
+TfLiteEvalTensor* AllocInitTransposeTensorFromTfLiteTensor(
+    TfLiteContext* context, const TfLiteTensor& tensor) {
+  MicroContext* micro_context = GetMicroContext(context);
+  TfLiteEvalTensor* eval_tensor = static_cast<TfLiteEvalTensor*>(
+      micro_context->AllocatePersistentBuffer(sizeof(TfLiteEvalTensor)));
+  if (eval_tensor == nullptr) {
+    return nullptr;
+  }
+
+  eval_tensor->type = tensor.type;
+
+  const int tensor_rank = NumDimensions(&tensor);
+  const size_t eval_dims_size = TfLiteIntArrayGetSizeInBytes(tensor_rank);
+  eval_tensor->dims = static_cast<TfLiteIntArray*>(
+      micro_context->AllocatePersistentBuffer(eval_dims_size));
+  if (eval_tensor->dims == nullptr) {
+    return nullptr;
+  }
+  eval_tensor->dims->size = tensor_rank;
+  for (int i = 0; i < tensor_rank - 2; ++i) {
+    eval_tensor->dims->data[i] = tensor.dims->data[i];
+  }
+  // Swap last two dimensions.
+  eval_tensor->dims->data[tensor_rank - 2] = tensor.dims->data[tensor_rank - 1];
+  eval_tensor->dims->data[tensor_rank - 1] = tensor.dims->data[tensor_rank - 2];
+
+  const size_t eval_data_size = static_cast<size_t>(NumElements(&tensor)) *
+                                TfLiteTypeGetSize(tensor.type);
+  eval_tensor->data.data =
+      micro_context->AllocatePersistentBuffer(eval_data_size);
+  if (eval_tensor->data.data == nullptr) {
+    return nullptr;
+  }
+
+  return eval_tensor;
+}
+
+// Initializes tensors to store transposed operands.
+// Allocate storage for hybrid quantization if needed.
+// Allocate normal quantization data if needed.
+TfLiteStatus InitializeTemporaries(TfLiteContext* context, TfLiteNode* node,
+                                   const PrepareOpContext& op_context) {
+  OpData* op_data = op_context.op_data;
+  const TfLiteTensor* lhs = op_context.lhs;
+  const TfLiteTensor* rhs = op_context.rhs;
+  MicroContext* micro_context = GetMicroContext(context);
+
+  op_data->quantization = nullptr;
+  op_data->lhs_transposed_tensor = nullptr;
+  op_data->rhs_transposed_tensor = nullptr;
+
+  if (lhs->type == kTfLiteInt8 || lhs->type == kTfLiteInt16) {
+    op_data->quantization = static_cast<decltype(op_data->quantization)>(
+        micro_context->AllocatePersistentBuffer(
+            sizeof(*op_data->quantization)));
+    TF_LITE_ENSURE(context, op_data->quantization != nullptr);
+  }
+
+  // tensor for Transposed LHS;
+  if (op_context.params->adj_x) {
+    op_data->lhs_transposed_tensor =
+        AllocInitTransposeTensorFromTfLiteTensor(context, *lhs);
+    TF_LITE_ENSURE(context, op_data->lhs_transposed_tensor != nullptr);
+  }
+
+  // We need a buffer for the RHS if we need to transpose the RHS. We
+  // transpose by default, so that the two inputs (LHS and RHS) are in a proper
+  // layout for our fast matrix multiplication routines. If the transpose flag
+  // is set by the caller, the data is already in the desired layout.
+  if (!op_context.params->adj_y) {
+    op_data->rhs_transposed_tensor =
+        AllocInitTransposeTensorFromTfLiteTensor(context, *rhs);
+    TF_LITE_ENSURE(context, op_data->rhs_transposed_tensor != nullptr);
+  }
+
+  return kTfLiteOk;
+}
+
+template <typename Scalar>
+void TransposeRowsColumnsImpl(const TfLiteEvalTensor& tensor_in,
+                              TfLiteEvalTensor* tensor_out) {
+  const Scalar* input = tflite::micro::GetTensorData<Scalar>(&tensor_in);
+  Scalar* output = tflite::micro::GetTensorData<Scalar>(tensor_out);
+  RuntimeShape transposed_shape(tflite::micro::GetTensorShape(&tensor_in));
+  RuntimeShape shape(transposed_shape);
+  TransposeParams params;
+  const int rank = shape.DimensionsCount();
+  params.perm_count = rank;
+  for (int i = 0; i < rank - 2; ++i) {
+    params.perm[i] = i;
+  }
+  // Transpose the last two dimensions.
+  params.perm[rank - 2] = rank - 1;
+  params.perm[rank - 1] = rank - 2;
+  transposed_shape.SetDim(rank - 1, shape.Dims(rank - 2));
+  transposed_shape.SetDim(rank - 2, shape.Dims(rank - 1));
+  reference_ops::Transpose(params, shape, input, transposed_shape, output);
+}
+
+TfLiteStatus TransposeRowsColumns(const TfLiteEvalTensor& tensor_in,
+                                  TfLiteEvalTensor* tensor_out) {
+  if (tensor_in.type == kTfLiteFloat32) {
+    TransposeRowsColumnsImpl<float>(tensor_in, tensor_out);
+    return kTfLiteOk;
+  } else if (tensor_in.type == kTfLiteInt8) {
+    TransposeRowsColumnsImpl<int8_t>(tensor_in, tensor_out);
+    return kTfLiteOk;
+  } else if (tensor_in.type == kTfLiteInt16) {
+    TransposeRowsColumnsImpl<int16_t>(tensor_in, tensor_out);
+    return kTfLiteOk;
+  } else {
+    MicroPrintf(
+        "BATCH_MATMUL can only transpose tensors with FLOAT32, INT8, INT16 "
+        "type.");
+  }
+  return kTfLiteError;
+}
+
+RuntimeShape SwapRowColumnDims(const RuntimeShape& shape) {
+  RuntimeShape swapped_shape(shape);
+  const int32_t dims = shape.DimensionsCount();
+  swapped_shape.SetDim(dims - 2, shape.Dims(dims - 1));
+  swapped_shape.SetDim(dims - 1, shape.Dims(dims - 2));
+  return swapped_shape;
+}
+
+void* BatchMatMulInit(TfLiteContext* context, const char* buffer,
+                      size_t length) {
+  // This is a builtin op, so we don't use the contents in 'buffer', if any.
+  // Instead, we allocate a new object to carry information from Prepare() to
+  // Eval().
+  TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
+  MicroContext* micro_context = GetMicroContext(context);
+  return micro_context->AllocatePersistentBuffer(sizeof(OpData));
+}
+
+TfLiteStatus BatchMatMulPrepare(TfLiteContext* context, TfLiteNode* node) {
+  TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
+  TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
+
+  PrepareOpContext op_context(context, node);
+  const TfLiteTensor* lhs_data = op_context.lhs;
+  TF_LITE_ENSURE(context, lhs_data != nullptr);
+  const TfLiteTensor* rhs_data = op_context.rhs;
+  TF_LITE_ENSURE(context, rhs_data != nullptr);
+  TfLiteTensor* output = op_context.output;
+  TF_LITE_ENSURE(context, output != nullptr);
+
+  TF_LITE_ENSURE(context, lhs_data->type == kTfLiteFloat32 ||
+                              lhs_data->type == kTfLiteInt8 ||
+                              lhs_data->type == kTfLiteInt16);
+  TF_LITE_ENSURE(context, rhs_data->type == kTfLiteFloat32 ||
+                              rhs_data->type == kTfLiteInt8 ||
+                              rhs_data->type == kTfLiteInt16);
+  // Both inputs should be of the same type.
+  // Hybrid input (FLOAT32 LHS, INT8 RHS) is not supported.
+  TF_LITE_ENSURE(context, lhs_data->type == rhs_data->type);
+  // LHS input must match output type.  INT32 output not supported.
+  TF_LITE_ENSURE(context, lhs_data->type == output->type);
+
+  const int lhs_rank = NumDimensions(lhs_data);
+  const int rhs_rank = NumDimensions(rhs_data);
+  // Support dimensions between 2 and 5, inclusive.
+  TF_LITE_ENSURE(context, lhs_rank >= 2);
+  TF_LITE_ENSURE(context, lhs_rank <= 5);
+  TF_LITE_ENSURE(context, rhs_rank >= 2);
+  TF_LITE_ENSURE(context, rhs_rank <= 5);
+
+  TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, op_context));
+
+  OpData* op_data = op_context.op_data;
+  // If the RHS is constant, we only transpose once.
+  op_data->rhs_is_transposed = false;
+  op_data->lhs_is_constant_tensor = IsConstantTensor(lhs_data);
+  op_data->rhs_is_constant_tensor = IsConstantTensor(rhs_data);
+
+  // Note that quantized inference requires that all tensors have their
+  // parameters set. This is usually done during quantized training.
+  if (lhs_data->type == kTfLiteInt8 || lhs_data->type == kTfLiteInt16) {
+    TF_LITE_ENSURE(context, op_data->quantization != nullptr);
+    double real_multiplier = 0.0;
+    TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler(
+        context, lhs_data, rhs_data, output, &real_multiplier));
+    QuantizeMultiplier(real_multiplier,
+                       &op_data->quantization->output_multiplier,
+                       &op_data->quantization->output_shift);
+    // BatchMatMul has no fused activation functions. Therefore, set
+    // output activation min and max to min and max of int8_t or int16_t type.
+    if (lhs_data->type == kTfLiteInt8) {
+      op_data->quantization->output_activation_min =
+          std::numeric_limits<int8_t>::min();
+      op_data->quantization->output_activation_max =
+          std::numeric_limits<int8_t>::max();
+    } else {
+      op_data->quantization->output_activation_min =
+          std::numeric_limits<int16_t>::min();
+      op_data->quantization->output_activation_max =
+          std::numeric_limits<int16_t>::max();
+
+      TF_LITE_ENSURE_EQ(context, lhs_data->params.zero_point, 0);
+      TF_LITE_ENSURE_EQ(context, rhs_data->params.zero_point, 0);
+      TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
+    }
+
+    op_data->quantization->lhs_zero_point = lhs_data->params.zero_point;
+    op_data->quantization->rhs_zero_point = rhs_data->params.zero_point;
+    op_data->quantization->output_zero_point = output->params.zero_point;
+  }
+
+  const int output_rank = std::max(lhs_rank, rhs_rank);
+  const RuntimeShape extended_lhs_shape =
+      RuntimeShape::ExtendedShape(output_rank, GetTensorShape(lhs_data));
+  const RuntimeShape extended_rhs_shape =
+      RuntimeShape::ExtendedShape(output_rank, GetTensorShape(rhs_data));
+
+  // Ensure any batch dimensions obey broacasting rules.
+  for (int i = 0; i < output_rank - 2; ++i) {
+    const int lhs_dim = extended_lhs_shape.Dims(i);
+    const int rhs_dim = extended_rhs_shape.Dims(i);
+    if (lhs_dim != rhs_dim) {
+      if (lhs_dim != 1) {
+        TF_LITE_ENSURE_EQ(context, rhs_dim, 1);
+      }
+    }
+  }
+  bool adj_x = op_context.params->adj_x;
+  bool adj_y = op_context.params->adj_y;
+  // Ensure other dimensions work for matrix multiplication.
+  int accum_dim_lhs = adj_x ? extended_lhs_shape.Dims(output_rank - 2)
+                            : extended_lhs_shape.Dims(output_rank - 1);
+  int accum_dim_rhs = adj_y ? extended_rhs_shape.Dims(output_rank - 1)
+                            : extended_rhs_shape.Dims(output_rank - 2);
+
+  TF_LITE_ENSURE_EQ(context, accum_dim_lhs, accum_dim_rhs);
+  TfLiteStatus status =
+      ReshapeOutputTensor(context, node, extended_lhs_shape, extended_rhs_shape,
+                          adj_x, adj_y, output_rank, output);
+  return status;
+}
+
+TfLiteStatus EvalInt8(TfLiteContext* context, const OpData& data,
+                      const RuntimeShape& lhs_shape,
+                      const TfLiteEvalTensor& lhs,
+                      const RuntimeShape& rhs_shape,
+                      const TfLiteEvalTensor& rhs,
+                      const RuntimeShape& output_shape,
+                      TfLiteEvalTensor* output) {
+  TF_LITE_ENSURE(context, data.quantization != nullptr);
+  // Reuse params struct from FullyConnected Op.
+  FullyConnectedParams op_params;
+  op_params.input_offset = -data.quantization->lhs_zero_point;
+  op_params.weights_offset =
+      -data.quantization->rhs_zero_point;  // filter offset
+  op_params.output_offset = data.quantization->output_zero_point;
+  op_params.output_multiplier = data.quantization->output_multiplier;
+  op_params.output_shift = data.quantization->output_shift;
+  op_params.quantized_activation_min = data.quantization->output_activation_min;
+  op_params.quantized_activation_max = data.quantization->output_activation_max;
+  op_params.lhs_cacheable = data.lhs_is_constant_tensor;
+  op_params.rhs_cacheable = data.rhs_is_constant_tensor;
+
+  // Note we pass RHS args first, LHS args second. See note for Eval.
+  reference_ops::BatchMatMul<int8_t, int32_t>(
+      op_params, rhs_shape, tflite::micro::GetTensorData<int8_t>(&rhs),
+      lhs_shape, tflite::micro::GetTensorData<int8_t>(&lhs), output_shape,
+      tflite::micro::GetTensorData<int8_t>(output));
+
+  return kTfLiteOk;
+}
+
+TfLiteStatus EvalInt16(TfLiteContext* context, const OpData& data,
+                       const RuntimeShape& lhs_shape,
+                       const TfLiteEvalTensor& lhs,
+                       const RuntimeShape& rhs_shape,
+                       const TfLiteEvalTensor& rhs,
+                       const RuntimeShape& output_shape,
+                       TfLiteEvalTensor* output) {
+  TF_LITE_ENSURE(context, data.quantization != nullptr);
+  // Reuse params struct from FullyConnected Op.
+  FullyConnectedParams op_params;
+  op_params.input_offset = -data.quantization->lhs_zero_point;
+  op_params.weights_offset =
+      -data.quantization->rhs_zero_point;  // filter offset
+  op_params.output_offset = data.quantization->output_zero_point;
+  op_params.output_multiplier = data.quantization->output_multiplier;
+  op_params.output_shift = data.quantization->output_shift;
+  op_params.quantized_activation_min = data.quantization->output_activation_min;
+  op_params.quantized_activation_max = data.quantization->output_activation_max;
+  op_params.lhs_cacheable = data.lhs_is_constant_tensor;
+  op_params.rhs_cacheable = data.rhs_is_constant_tensor;
+
+  // Note we pass RHS args first, LHS args second. See note for Eval.
+  reference_ops::BatchMatMul<int16_t, int64_t>(
+      op_params, rhs_shape, tflite::micro::GetTensorData<int16_t>(&rhs),
+      lhs_shape, tflite::micro::GetTensorData<int16_t>(&lhs), output_shape,
+      tflite::micro::GetTensorData<int16_t>(output));
+
+  return kTfLiteOk;
+}
+
+// Perform a batch matrix multiply on
+// LHS <..., A, B>  X  RHS<..., B, C>
+// where the leading dimensions of LHS and RHS obey broadcasting rules
+// (this Op will apply broadcasting rules).
+// We assume that LHS and RHS are both row oriented (adjacent values in memory
+// are in the same row) and will output in the same memory layout. However,
+// our fast GEMM libraries assume RCC layout (LHS row oriented,
+// RHS column oriented, output column oriented). Therefore, we perform
+// RHS <..., C, B> X LHS <..., B, A>
+// where output is a C X A column-oriented, which is equivalent to
+// A X C row-oriented.
+TfLiteStatus BatchMatMulEval(TfLiteContext* context, TfLiteNode* node) {
+  EvalOpContext op_context(context, node);
+  OpData* op_data = op_context.op_data;
+  const TfLiteEvalTensor* lhs = op_context.lhs;
+  const TfLiteEvalTensor* rhs = op_context.rhs;
+  TfLiteEvalTensor* output = op_context.output;
+  RuntimeShape orig_lhs_shape = tflite::micro::GetTensorShape(lhs);
+  RuntimeShape orig_rhs_shape = tflite::micro::GetTensorShape(rhs);
+
+  bool adj_y = op_context.params->adj_y;
+  bool adj_x = op_context.params->adj_x;
+
+  // Compress BatchMatMul when third from last RHS dimension is one.
+  int32_t rhs_dims_count = orig_rhs_shape.DimensionsCount();
+  int32_t lhs_dims_count = orig_lhs_shape.DimensionsCount();
+  // Compress ops where rhs shape is [..., 1, X, Y] and lhs shape is
+  // [..., Q, R, S] which is equivalent to rhs: [..., X, Y] and
+  // lhs: [..., Q * R, S].
+  if (rhs_dims_count > 2 && lhs_dims_count > 2) {
+    int rhs_one = orig_rhs_shape.DimsData()[rhs_dims_count - 3];
+    if (rhs_one == 1) {
+      int32_t* lhs_dims = orig_lhs_shape.DimsData();
+      int32_t* rhs_dims = orig_rhs_shape.DimsData();
+      RuntimeShape tmp_l(lhs_dims_count - 1, lhs_dims);
+      tmp_l.SetDim(lhs_dims_count - 3,
+                   lhs_dims[lhs_dims_count - 3] * lhs_dims[lhs_dims_count - 2]);
+      tmp_l.SetDim(lhs_dims_count - 2, lhs_dims[lhs_dims_count - 1]);
+      orig_lhs_shape.ReplaceWith(tmp_l.DimensionsCount(), tmp_l.DimsData());
+      RuntimeShape tmp_r(rhs_dims_count - 1, orig_rhs_shape.DimsData());
+      tmp_r.SetDim(rhs_dims_count - 3, rhs_dims[rhs_dims_count - 2]);
+      tmp_r.SetDim(rhs_dims_count - 2, rhs_dims[rhs_dims_count - 1]);
+      orig_rhs_shape.ReplaceWith(tmp_r.DimensionsCount(), tmp_r.DimsData());
+      rhs_dims_count = orig_rhs_shape.DimensionsCount();
+      lhs_dims_count = orig_lhs_shape.DimensionsCount();
+    }
+  }
+
+  TfLiteEvalTensor* rhs_tensor = adj_y ? const_cast<TfLiteEvalTensor*>(rhs)
+                                       : op_data->rhs_transposed_tensor;
+  TfLiteEvalTensor* lhs_tensor = adj_x ? op_data->lhs_transposed_tensor
+                                       : const_cast<TfLiteEvalTensor*>(lhs);
+  TF_LITE_ENSURE(context, rhs_tensor != nullptr);
+  TF_LITE_ENSURE(context, lhs_tensor != nullptr);
+  if (!adj_y) {
+    // TODO(b/154760341): Constant tensors should already be transposed, but
+    // we transpose once if necessary for now.
+    if (!(op_data->rhs_is_constant_tensor && op_data->rhs_is_transposed)) {
+      TransposeRowsColumns(*rhs, rhs_tensor);
+      op_data->rhs_is_transposed = true;
+    }
+  }
+  if (adj_x) {
+    TransposeRowsColumns(*lhs, lhs_tensor);
+  }
+  RuntimeShape rhs_shape =
+      adj_y ? orig_rhs_shape : SwapRowColumnDims(orig_rhs_shape);
+  RuntimeShape lhs_shape =
+      adj_x ? orig_lhs_shape : SwapRowColumnDims(orig_lhs_shape);
+
+  switch (lhs->type) {
+    case kTfLiteFloat32:
+      // Note we pass RHS args first, LHS args second. See note above.
+      reference_ops::BatchMatMul(
+          rhs_shape, tflite::micro::GetTensorData<float>(rhs_tensor), lhs_shape,
+          tflite::micro::GetTensorData<float>(lhs_tensor),
+          tflite::micro::GetTensorShape(output),
+          tflite::micro::GetTensorData<float>(output));
+      break;
+    case kTfLiteInt8:
+      return EvalInt8(context, *op_data, lhs_shape, *lhs_tensor, rhs_shape,
+                      *rhs_tensor, tflite::micro::GetTensorShape(output),
+                      output);
+    case kTfLiteInt16:
+      return EvalInt16(context, *op_data, lhs_shape, *lhs_tensor, rhs_shape,
+                       *rhs_tensor, tflite::micro::GetTensorShape(output),
+                       output);
+    default:
+      MicroPrintf("BATCH_MATMUL doesn't support input type %s",
+                  TfLiteTypeGetName(lhs->type));
+      return kTfLiteError;
+  }
+  return kTfLiteOk;
+}
+
+}  // namespace
+
+TFLMRegistration Register_BATCH_MATMUL() {
+  return tflite::micro::RegisterOp(BatchMatMulInit, BatchMatMulPrepare,
+                                   BatchMatMulEval);
+}
+
+}  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/batch_matmul_test.cc b/tensorflow/lite/micro/kernels/batch_matmul_test.cc
new file mode 100644
index 0000000..abba757
--- /dev/null
+++ b/tensorflow/lite/micro/kernels/batch_matmul_test.cc
@@ -0,0 +1,736 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <algorithm>
+#include <iterator>
+#include <numeric>
+#include <type_traits>
+
+#include "tensorflow/lite/c/builtin_op_data.h"
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/micro/kernels/kernel_runner.h"
+#include "tensorflow/lite/micro/test_helpers.h"
+#include "tensorflow/lite/micro/testing/micro_test.h"
+
+namespace tflite {
+namespace testing {
+namespace {
+
+constexpr float kFloatTolerance = 1e-5;
+
+constexpr int kNumInputs = 2;
+constexpr int kNumOutputs = 1;
+constexpr int kLhsInputTensorIndex = 0;
+constexpr int kRhsInputTensorIndex = 1;
+constexpr int kOutputTensorIndex = 2;
+
+// data_min/data_max are used to compute symmetric scale, zero-point is 0
+// scale should be 0 to use data_min/data_max
+template <typename T, size_t kNumElements>
+struct TestQuantizationParams {
+  // quantization parameters
+  float scale;  // if 0, use data_min and data_max
+  int zero_point;
+  float data_min;  // input data minimum value
+  float data_max;  // input data maximum value
+
+  T quantized_data[kNumElements];  // quantized storage
+};
+
+micro::KernelRunner* GetKernelRunnerInstance(
+    TfLiteTensor* tensors, int tensors_count,
+    const TfLiteBatchMatMulParams& params, bool need_init_prepare) {
+  static int kInputArrayData[] = {kNumInputs, kLhsInputTensorIndex,
+                                  kRhsInputTensorIndex};
+  TfLiteIntArray* inputs_array = IntArrayFromInts(kInputArrayData);
+  static int kOutputArrayData[] = {kNumOutputs, kOutputTensorIndex};
+  TfLiteIntArray* outputs_array = IntArrayFromInts(kOutputArrayData);
+
+  static const TFLMRegistration registration = tflite::Register_BATCH_MATMUL();
+
+  alignas(micro::KernelRunner) static char
+      kernel_runner_buffer[sizeof(micro::KernelRunner)] = {};
+
+  static micro::KernelRunner* runner = nullptr;
+  if (runner == nullptr || need_init_prepare) {
+    runner = new (kernel_runner_buffer)
+        micro::KernelRunner(registration, tensors, tensors_count, inputs_array,
+                            outputs_array, &params);
+
+    TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner->InitAndPrepare());
+  }
+
+  return runner;
+}
+
+void TestBatchMatMulFloat(const TfLiteBatchMatMulParams& params,
+                          const int* input_dims_data[kNumInputs],
+                          const float* input_data_lhs,
+                          const float* input_data_rhs, const int* expected_dims,
+                          const float* expected_data, float* output_data,
+                          bool need_constant_rhs = false,
+                          bool need_init_prepare = true) {
+  TfLiteIntArray* input_dims_lhs = IntArrayFromInts(input_dims_data[0]);
+  TfLiteIntArray* input_dims_rhs = IntArrayFromInts(input_dims_data[1]);
+  TfLiteIntArray* output_dims = IntArrayFromInts(expected_dims);
+  const int kOutputCount = ElementCount(*output_dims);
+
+  static TfLiteTensor tensors[kNumInputs + kNumOutputs];
+
+  if (need_init_prepare) {
+    tensors[kLhsInputTensorIndex] =
+        CreateTensor(input_data_lhs, input_dims_lhs);
+    tensors[kRhsInputTensorIndex] =
+        CreateTensor(input_data_rhs, input_dims_rhs);
+    if (need_constant_rhs) {
+      tensors[kRhsInputTensorIndex].allocation_type = kTfLiteMmapRo;
+    }
+    tensors[kOutputTensorIndex] = CreateTensor(output_data, output_dims);
+  }
+
+  constexpr int kTensorCount = std::extent<decltype(tensors)>::value;
+  micro::KernelRunner* runner =
+      GetKernelRunnerInstance(tensors, kTensorCount, params, need_init_prepare);
+  TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner->Invoke());
+
+  // check output data against expected
+  for (int i = 0; i < kOutputCount; i++) {
+    TF_LITE_MICRO_EXPECT_NEAR(expected_data[i], output_data[i],
+                              kFloatTolerance);
+  }
+
+  // check output dimensions (relocated) against original dimensions
+  TF_LITE_MICRO_EXPECT_EQ(output_dims->size,
+                          tensors[kOutputTensorIndex].dims->size);
+  for (int i = 0; i < output_dims->size; i++) {
+    TF_LITE_MICRO_EXPECT_EQ(output_dims->data[i],
+                            tensors[kOutputTensorIndex].dims->data[i]);
+  }
+}
+
+template <typename T, size_t kNumElements>
+void SetScaleAndZeroPoint(TestQuantizationParams<T, kNumElements>* q_params) {
+  if (q_params->scale == 0.0f || q_params->data_max != 0 ||
+      q_params->data_min != 0) {
+    q_params->scale =
+        ScaleFromMinMax<T>(q_params->data_min, q_params->data_max);
+    q_params->zero_point =
+        ZeroPointFromMinMax<T>(q_params->data_min, q_params->data_max);
+  }
+}
+
+template <typename T, size_t kNumLhs, size_t kNumRhs, size_t kNumOutput>
+void TestBatchMatMulQuantized(
+    const TfLiteBatchMatMulParams& params,
+    TestQuantizationParams<T, kNumLhs>* quantization_lhs,
+    TestQuantizationParams<T, kNumRhs>* quantization_rhs,
+    TestQuantizationParams<T, kNumOutput>* quantization_output,
+    const int* input_dims_data[kNumInputs], const float* input_data_lhs,
+    const float* input_data_rhs, const int* expected_dims,
+    const T* expected_data, const float* output_data) {
+  TfLiteIntArray* input_dims_lhs = IntArrayFromInts(input_dims_data[0]);
+  TfLiteIntArray* input_dims_rhs = IntArrayFromInts(input_dims_data[1]);
+  TfLiteIntArray* output_dims = IntArrayFromInts(expected_dims);
+  const int kOutputCount = ElementCount(*output_dims);
+
+  static TfLiteTensor tensors[kNumInputs + kNumOutputs];
+
+  SetScaleAndZeroPoint<T, kNumLhs>(quantization_lhs);
+  tensors[kLhsInputTensorIndex] = CreateQuantizedTensor(
+      input_data_lhs, quantization_lhs->quantized_data, input_dims_lhs,
+      quantization_lhs->scale, quantization_lhs->zero_point);
+  SetScaleAndZeroPoint<T, kNumRhs>(quantization_rhs);
+  tensors[kRhsInputTensorIndex] = CreateQuantizedTensor(
+      input_data_rhs, quantization_rhs->quantized_data, input_dims_rhs,
+      quantization_rhs->scale, quantization_rhs->zero_point);
+  SetScaleAndZeroPoint<T, kNumOutput>(quantization_output);
+  tensors[kOutputTensorIndex] = CreateQuantizedTensor(
+      quantization_output->quantized_data, output_dims,
+      quantization_output->scale, quantization_output->zero_point);
+
+  constexpr int kTensorCount = std::extent<decltype(tensors)>::value;
+  micro::KernelRunner* runner =
+      GetKernelRunnerInstance(tensors, kTensorCount, params, true);
+  TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner->Invoke());
+
+  // check output data against expected
+  for (int i = 0; i < kOutputCount; i++) {
+    TF_LITE_MICRO_EXPECT_EQ(expected_data[i],
+                            quantization_output->quantized_data[i]);
+  }
+  // check dequantized output data against expected
+  for (int i = 0; i < kOutputCount; i++) {
+    float dequantized_value = (quantization_output->quantized_data[i] -
+                               quantization_output->zero_point) *
+                              quantization_output->scale;
+    TF_LITE_MICRO_EXPECT_NEAR(output_data[i], dequantized_value,
+                              kFloatTolerance);
+  }
+
+  // check output dimensions (relocated) against original dimensions
+  TF_LITE_MICRO_EXPECT_EQ(output_dims->size,
+                          tensors[kOutputTensorIndex].dims->size);
+  for (int i = 0; i < output_dims->size; i++) {
+    TF_LITE_MICRO_EXPECT_EQ(output_dims->data[i],
+                            tensors[kOutputTensorIndex].dims->data[i]);
+  }
+}
+
+}  // namespace
+}  // namespace testing
+}  // namespace tflite
+
+TF_LITE_MICRO_TESTS_BEGIN
+
+TF_LITE_MICRO_TEST(BatchMatMulOpTestFloat32Test_Ones) {
+  constexpr int kLhsInputDims[] = {4, 3, 2, 1, 4};
+  constexpr int kRhsInputDims[] = {4, 3, 1, 4, 1};
+  const int* kInputDims[tflite::testing::kNumInputs] = {kLhsInputDims,
+                                                        kRhsInputDims};
+
+  constexpr size_t kLhsInputSize = 24;
+  float lhs_input[kLhsInputSize];
+  std::iota(std::begin(lhs_input), std::end(lhs_input), 1);
+
+  constexpr size_t kRhsInputSize = 12;
+  float rhs_input[kRhsInputSize];
+  std::iota(std::begin(rhs_input), std::end(rhs_input), 1);
+
+  constexpr float kExpect[] = {30, 70, 278, 382, 782, 950};
+  constexpr int kOutputDims[] = {4, 3, 2, 1, 1};
+  constexpr int kOutputCount = std::extent<decltype(kExpect)>::value;
+  float output_data[kOutputCount];
+
+  constexpr TfLiteBatchMatMulParams params = {
+      false,  // adj_x
+      false,  // adj_y
+      false   // asymmetric_quantize_inputs
+  };
+
+  tflite::testing::TestBatchMatMulFloat(params, kInputDims, lhs_input,
+                                        rhs_input, kOutputDims, kExpect,
+                                        output_data);
+}
+
+TF_LITE_MICRO_TEST(BatchMatMulOpTestFloat32Test_Flatten) {
+  constexpr int kLhsInputDims[] = {4, 3, 2, 2, 4};
+  constexpr int kRhsInputDims[] = {4, 3, 1, 4, 1};
+  const int* kInputDims[tflite::testing::kNumInputs] = {kLhsInputDims,
+                                                        kRhsInputDims};
+
+  constexpr size_t kLhsInputSize = 48;
+  float lhs_input[kLhsInputSize];
+  std::iota(std::begin(lhs_input), std::end(lhs_input), 1);
+
+  constexpr size_t kRhsInputSize = 12;
+  float rhs_input[kRhsInputSize];
+  std::iota(std::begin(rhs_input), std::end(rhs_input), 1);
+
+  constexpr float kExpect[] = {30,  70,  110,  150,  486,  590,
+                               694, 798, 1454, 1622, 1790, 1958};
+  constexpr int kOutputDims[] = {4, 3, 2, 2, 1};
+  constexpr int kOutputCount = std::extent<decltype(kExpect)>::value;
+  float output_data[kOutputCount];
+
+  constexpr TfLiteBatchMatMulParams params = {
+      false,  // adj_x
+      false,  // adj_y
+      false   // asymmetric_quantize_inputs
+  };
+
+  tflite::testing::TestBatchMatMulFloat(params, kInputDims, lhs_input,
+                                        rhs_input, kOutputDims, kExpect,
+                                        output_data);
+}
+
+TF_LITE_MICRO_TEST(BatchMatMulOpTestFloat32Test_Simple) {
+  constexpr int kLhsInputDims[] = {3, 1, 2, 3};
+  constexpr int kRhsInputDims[] = {3, 1, 3, 4};
+  const int* kInputDims[tflite::testing::kNumInputs] = {kLhsInputDims,
+                                                        kRhsInputDims};
+
+  constexpr size_t kLhsInputSize = 6;
+  float lhs_input[kLhsInputSize];
+  std::iota(std::begin(lhs_input), std::end(lhs_input), 1);
+
+  constexpr size_t kRhsInputSize = 12;
+  float rhs_input[kRhsInputSize];
+  std::iota(std::begin(rhs_input), std::end(rhs_input), 7);
+
+  constexpr float kExpect[] = {74., 80., 86., 92., 173., 188., 203., 218.};
+  constexpr int kOutputDims[] = {3, 1, 2, 4};
+  constexpr int kOutputCount = std::extent<decltype(kExpect)>::value;
+  float output_data[kOutputCount];
+
+  constexpr TfLiteBatchMatMulParams params = {
+      false,  // adj_x
+      false,  // adj_y
+      false   // asymmetric_quantize_inputs
+  };
+
+  tflite::testing::TestBatchMatMulFloat(params, kInputDims, lhs_input,
+                                        rhs_input, kOutputDims, kExpect,
+                                        output_data);
+}
+
+TF_LITE_MICRO_TEST(BatchMatMulOpTestFloat32Test_SimpleRHSAdjoint) {
+  constexpr int kLhsInputDims[] = {3, 1, 2, 3};
+  constexpr int kRhsInputDims[] = {3, 1, 4, 3};
+  const int* kInputDims[tflite::testing::kNumInputs] = {kLhsInputDims,
+                                                        kRhsInputDims};
+
+  constexpr size_t kLhsInputSize = 6;
+  float lhs_input[kLhsInputSize];
+  std::iota(std::begin(lhs_input), std::end(lhs_input), 1);
+
+  constexpr float kRhsInput[] = {7, 11, 15, 8, 12, 16, 9, 13, 17, 10, 14, 18};
+
+  constexpr float kExpect[] = {74., 80., 86., 92., 173., 188., 203., 218.};
+  constexpr int kOutputDims[] = {3, 1, 2, 4};
+  constexpr int kOutputCount = std::extent<decltype(kExpect)>::value;
+  float output_data[kOutputCount];
+
+  constexpr TfLiteBatchMatMulParams params = {
+      false,  // adj_x
+      true,   // adj_y
+      false   // asymmetric_quantize_inputs
+  };
+
+  tflite::testing::TestBatchMatMulFloat(params, kInputDims, lhs_input,
+                                        kRhsInput, kOutputDims, kExpect,
+                                        output_data);
+}
+
+TF_LITE_MICRO_TEST(BatchMatMulOpTestFloat32Test_SimpleLHSAdjoint) {
+  constexpr int kLhsInputDims[] = {3, 1, 3, 2};
+  constexpr int kRhsInputDims[] = {3, 1, 3, 4};
+  const int* kInputDims[tflite::testing::kNumInputs] = {kLhsInputDims,
+                                                        kRhsInputDims};
+  constexpr float kLhsInput[] = {1, 4, 2, 5, 3, 6};
+
+  constexpr size_t kRhsInputSize = 12;
+  float rhs_input[kRhsInputSize];
+  std::iota(std::begin(rhs_input), std::end(rhs_input), 7);
+
+  constexpr float kExpect[] = {74., 80., 86., 92., 173., 188., 203., 218.};
+  constexpr int kOutputDims[] = {3, 1, 2, 4};
+  constexpr int kOutputCount = std::extent<decltype(kExpect)>::value;
+  float output_data[kOutputCount];
+
+  constexpr TfLiteBatchMatMulParams params = {
+      true,   // adj_x
+      false,  // adj_y
+      false   // asymmetric_quantize_inputs
+  };
+
+  tflite::testing::TestBatchMatMulFloat(params, kInputDims, kLhsInput,
+                                        rhs_input, kOutputDims, kExpect,
+                                        output_data);
+}
+
+TF_LITE_MICRO_TEST(BatchMatMulOpTestFloat32Test_BatchSizeTwo) {
+  constexpr int kLhsInputDims[] = {3, 2, 2, 3};
+  constexpr int kRhsInputDims[] = {3, 2, 3, 4};
+  const int* kInputDims[tflite::testing::kNumInputs] = {kLhsInputDims,
+                                                        kRhsInputDims};
+  constexpr size_t kLhsInputSize = 12;
+  float lhs_input[kLhsInputSize];
+  std::iota(std::begin(lhs_input), std::end(lhs_input), 1);
+
+  constexpr size_t kRhsInputSize = 24;
+  float rhs_input[kRhsInputSize];
+  std::iota(std::begin(rhs_input), std::end(rhs_input), 7);
+
+  constexpr float kExpect[] = {74.,  80.,  86.,  92.,  173., 188., 203., 218.,
+                               560., 584., 608., 632., 767., 800., 833., 866.};
+  constexpr int kOutputDims[] = {3, 2, 2, 4};
+  constexpr int kOutputCount = std::extent<decltype(kExpect)>::value;
+  float output_data[kOutputCount];
+
+  constexpr TfLiteBatchMatMulParams params = {
+      false,  // adj_x
+      false,  // adj_y
+      false   // asymmetric_quantize_inputs
+  };
+
+  tflite::testing::TestBatchMatMulFloat(params, kInputDims, lhs_input,
+                                        rhs_input, kOutputDims, kExpect,
+                                        output_data);
+}
+
+TF_LITE_MICRO_TEST(BatchMatMulOpTestFloat32Test_Broadcast) {
+  constexpr int kLhsInputDims[] = {3, 2, 2, 3};
+  constexpr int kRhsInputDims[] = {2, 3, 4};
+  const int* kInputDims[tflite::testing::kNumInputs] = {kLhsInputDims,
+                                                        kRhsInputDims};
+  constexpr size_t kLhsInputSize = 12;
+  float lhs_input[kLhsInputSize];
+  std::iota(std::begin(lhs_input), std::end(lhs_input), 1);
+
+  constexpr size_t kRhsInputSize = 12;
+  float rhs_input[kRhsInputSize];
+  std::iota(std::begin(rhs_input), std::end(rhs_input), 7);
+
+  constexpr float kExpect[] = {74.,  80.,  86.,  92.,  173., 188., 203., 218.,
+                               272., 296., 320., 344., 371., 404., 437., 470.};
+  constexpr int kOutputDims[] = {3, 2, 2, 4};
+  constexpr int kOutputCount = std::extent<decltype(kExpect)>::value;
+  float output_data[kOutputCount];
+
+  constexpr TfLiteBatchMatMulParams params = {
+      false,  // adj_x
+      false,  // adj_y
+      false   // asymmetric_quantize_inputs
+  };
+
+  tflite::testing::TestBatchMatMulFloat(params, kInputDims, lhs_input,
+                                        rhs_input, kOutputDims, kExpect,
+                                        output_data);
+}
+
+TF_LITE_MICRO_TEST(BatchMatMulOpTestFloat32Test_BroadcastLHSAdjoint) {
+  constexpr int kLhsInputDims[] = {3, 2, 3, 2};
+  constexpr int kRhsInputDims[] = {2, 3, 4};
+  const int* kInputDims[tflite::testing::kNumInputs] = {kLhsInputDims,
+                                                        kRhsInputDims};
+
+  constexpr float kLhsInput[] = {1, 4, 2, 5, 3, 6, 7, 10, 8, 11, 9, 12};
+
+  constexpr size_t kRhsInputSize = 12;
+  float rhs_input[kRhsInputSize];
+  std::iota(std::begin(rhs_input), std::end(rhs_input), 7);
+
+  constexpr float kExpect[] = {74.,  80.,  86.,  92.,  173., 188., 203., 218.,
+                               272., 296., 320., 344., 371., 404., 437., 470.};
+  constexpr int kOutputDims[] = {3, 2, 2, 4};
+  constexpr int kOutputCount = std::extent<decltype(kExpect)>::value;
+  float output_data[kOutputCount];
+
+  constexpr TfLiteBatchMatMulParams params = {
+      true,   // adj_x
+      false,  // adj_y
+      false   // asymmetric_quantize_inputs
+  };
+
+  tflite::testing::TestBatchMatMulFloat(params, kInputDims, kLhsInput,
+                                        rhs_input, kOutputDims, kExpect,
+                                        output_data);
+}
+
+TF_LITE_MICRO_TEST(BatchMatMulOpTestFloat32Test_Broadcast2) {
+  constexpr int kLhsInputDims[] = {4, 2, 1, 3, 2};
+  constexpr int kRhsInputDims[] = {3, 3, 2, 4};
+  const int* kInputDims[tflite::testing::kNumInputs] = {kLhsInputDims,
+                                                        kRhsInputDims};
+
+  constexpr size_t kLhsInputSize = 12;
+  float lhs_input[kLhsInputSize];
+  std::iota(std::begin(lhs_input), std::end(lhs_input), 1);
+
+  constexpr size_t kRhsInputSize = 24;
+  float rhs_input[kRhsInputSize];
+  std::iota(std::begin(rhs_input), std::end(rhs_input), 7);
+
+  constexpr float kExpect[] = {
+      29.,  32.,  35.,  38.,  65.,  72.,  79.,  86.,  101., 112., 123., 134.,
+      53.,  56.,  59.,  62.,  121., 128., 135., 142., 189., 200., 211., 222.,
+      77.,  80.,  83.,  86.,  177., 184., 191., 198., 277., 288., 299., 310.,
+      137., 152., 167., 182., 173., 192., 211., 230., 209., 232., 255., 278.,
+      257., 272., 287., 302., 325., 344., 363., 382., 393., 416., 439., 462.,
+      377., 392., 407., 422., 477., 496., 515., 534., 577., 600., 623., 646.};
+  constexpr int kOutputDims[] = {4, 2, 3, 3, 4};
+  constexpr int kOutputCount = std::extent<decltype(kExpect)>::value;
+  float output_data[kOutputCount];
+
+  constexpr TfLiteBatchMatMulParams params = {
+      false,  // adj_x
+      false,  // adj_y
+      false   // asymmetric_quantize_inputs
+  };
+
+  tflite::testing::TestBatchMatMulFloat(params, kInputDims, lhs_input,
+                                        rhs_input, kOutputDims, kExpect,
+                                        output_data);
+}
+
+TF_LITE_MICRO_TEST(BatchMatMulOpTestFloat32Test_Broadcast2LHSAdjoint) {
+  constexpr int kLhsInputDims[] = {4, 2, 1, 2, 3};
+  constexpr int kRhsInputDims[] = {3, 3, 2, 4};
+  const int* kInputDims[tflite::testing::kNumInputs] = {kLhsInputDims,
+                                                        kRhsInputDims};
+
+  constexpr float kLhsInput[] = {1, 3, 5, 2, 4, 6, 7, 9, 11, 8, 10, 12};
+
+  constexpr size_t kRhsInputSize = 24;
+  float rhs_input[kRhsInputSize];
+  std::iota(std::begin(rhs_input), std::end(rhs_input), 7);
+
+  constexpr float kExpect[] = {
+      29.,  32.,  35.,  38.,  65.,  72.,  79.,  86.,  101., 112., 123., 134.,
+      53.,  56.,  59.,  62.,  121., 128., 135., 142., 189., 200., 211., 222.,
+      77.,  80.,  83.,  86.,  177., 184., 191., 198., 277., 288., 299., 310.,
+      137., 152., 167., 182., 173., 192., 211., 230., 209., 232., 255., 278.,
+      257., 272., 287., 302., 325., 344., 363., 382., 393., 416., 439., 462.,
+      377., 392., 407., 422., 477., 496., 515., 534., 577., 600., 623., 646.};
+  constexpr int kOutputDims[] = {4, 2, 3, 3, 4};
+  constexpr int kOutputCount = std::extent<decltype(kExpect)>::value;
+  float output_data[kOutputCount];
+
+  constexpr TfLiteBatchMatMulParams params = {
+      true,   // adj_x
+      false,  // adj_y
+      false   // asymmetric_quantize_inputs
+  };
+
+  tflite::testing::TestBatchMatMulFloat(params, kInputDims, kLhsInput,
+                                        rhs_input, kOutputDims, kExpect,
+                                        output_data);
+}
+
+TF_LITE_MICRO_TEST(BatchMatMulOpTestFloat32Test_Broadcast2RHSAdjoint) {
+  constexpr int kLhsInputDims[] = {4, 2, 1, 3, 2};
+  constexpr int kRhsInputDims[] = {3, 3, 4, 2};
+  const int* kInputDims[tflite::testing::kNumInputs] = {kLhsInputDims,
+                                                        kRhsInputDims};
+
+  constexpr size_t kLhsInputSize = 12;
+  float lhs_input[kLhsInputSize];
+  std::iota(std::begin(lhs_input), std::end(lhs_input), 1);
+
+  constexpr float kRhsInput[] = {7,  11, 8,  12, 9,  13, 10, 14,
+                                 15, 19, 16, 20, 17, 21, 18, 22,
+                                 23, 27, 24, 28, 25, 29, 26, 30};
+
+  constexpr float kExpect[] = {
+      29.,  32.,  35.,  38.,  65.,  72.,  79.,  86.,  101., 112., 123., 134.,
+      53.,  56.,  59.,  62.,  121., 128., 135., 142., 189., 200., 211., 222.,
+      77.,  80.,  83.,  86.,  177., 184., 191., 198., 277., 288., 299., 310.,
+      137., 152., 167., 182., 173., 192., 211., 230., 209., 232., 255., 278.,
+      257., 272., 287., 302., 325., 344., 363., 382., 393., 416., 439., 462.,
+      377., 392., 407., 422., 477., 496., 515., 534., 577., 600., 623., 646.};
+  constexpr int kOutputDims[] = {4, 2, 3, 3, 4};
+  constexpr int kOutputCount = std::extent<decltype(kExpect)>::value;
+  float output_data[kOutputCount];
+
+  constexpr TfLiteBatchMatMulParams params = {
+      false,  // adj_x
+      true,   // adj_y
+      false   // asymmetric_quantize_inputs
+  };
+
+  tflite::testing::TestBatchMatMulFloat(params, kInputDims, lhs_input,
+                                        kRhsInput, kOutputDims, kExpect,
+                                        output_data);
+}
+
+TF_LITE_MICRO_TEST(BatchMatMulOpTestFloat32Test_Broadcast2BothAdjoint) {
+  constexpr int kLhsInputDims[] = {4, 2, 1, 2, 3};
+  constexpr int kRhsInputDims[] = {3, 3, 4, 2};
+  const int* kInputDims[tflite::testing::kNumInputs] = {kLhsInputDims,
+                                                        kRhsInputDims};
+
+  constexpr float kLhsInput[] = {1, 3, 5, 2, 4, 6, 7, 9, 11, 8, 10, 12};
+
+  constexpr float kRhsInput[] = {7,  11, 8,  12, 9,  13, 10, 14,
+                                 15, 19, 16, 20, 17, 21, 18, 22,
+                                 23, 27, 24, 28, 25, 29, 26, 30};
+
+  constexpr float kExpect[] = {
+      29.,  32.,  35.,  38.,  65.,  72.,  79.,  86.,  101., 112., 123., 134.,
+      53.,  56.,  59.,  62.,  121., 128., 135., 142., 189., 200., 211., 222.,
+      77.,  80.,  83.,  86.,  177., 184., 191., 198., 277., 288., 299., 310.,
+      137., 152., 167., 182., 173., 192., 211., 230., 209., 232., 255., 278.,
+      257., 272., 287., 302., 325., 344., 363., 382., 393., 416., 439., 462.,
+      377., 392., 407., 422., 477., 496., 515., 534., 577., 600., 623., 646.};
+  constexpr int kOutputDims[] = {4, 2, 3, 3, 4};
+  constexpr int kOutputCount = std::extent<decltype(kExpect)>::value;
+  float output_data[kOutputCount];
+
+  constexpr TfLiteBatchMatMulParams params = {
+      true,  // adj_x
+      true,  // adj_y
+      false  // asymmetric_quantize_inputs
+  };
+
+  tflite::testing::TestBatchMatMulFloat(params, kInputDims, kLhsInput,
+                                        kRhsInput, kOutputDims, kExpect,
+                                        output_data);
+}
+
+TF_LITE_MICRO_TEST(BatchMatMulOpTestFloat32Test_BroadcastFromRHS) {
+  constexpr int kLhsInputDims[] = {2, 4, 5};
+  constexpr int kRhsInputDims[] = {4, 3, 1, 5, 2};
+  const int* kInputDims[tflite::testing::kNumInputs] = {kLhsInputDims,
+                                                        kRhsInputDims};
+
+  constexpr size_t kLhsInputSize = 20;
+  float lhs_input[kLhsInputSize];
+  std::iota(std::begin(lhs_input), std::end(lhs_input), 1);
+
+  constexpr size_t kRhsInputSize = 30;
+  float rhs_input[kRhsInputSize];
+  std::iota(std::begin(rhs_input), std::end(rhs_input), 7);
+
+  constexpr float kExpect[] = {185.,  200.,  460.,  500.,  735.,  800.,
+                               1010., 1100., 335.,  350.,  860.,  900.,
+                               1385., 1450., 1910., 2000., 485.,  500.,
+                               1260., 1300., 2035., 2100., 2810., 2900.};
+  constexpr int kOutputDims[] = {4, 3, 1, 4, 2};
+  constexpr int kOutputCount = std::extent<decltype(kExpect)>::value;
+  float output_data[kOutputCount];
+
+  constexpr TfLiteBatchMatMulParams params = {
+      false,  // adj_x
+      false,  // adj_y
+      false   // asymmetric_quantize_inputs
+  };
+
+  tflite::testing::TestBatchMatMulFloat(params, kInputDims, lhs_input,
+                                        rhs_input, kOutputDims, kExpect,
+                                        output_data);
+}
+
+TF_LITE_MICRO_TEST(ConstRHSBatchMatMulOpModelRHSNotAdjoint) {
+  constexpr int kLhsInputDims[] = {3, 1, 6, 2};
+  constexpr int kRhsInputDims[] = {2, 2, 3};
+  const int* kInputDims[tflite::testing::kNumInputs] = {kLhsInputDims,
+                                                        kRhsInputDims};
+
+  constexpr float kLhsInput[] = {6, 3, 7, 4, 6, 9, 2, 6, 7, 4, 3, 7};
+
+  constexpr float kRhsInput[] = {6, 3, 7, 4, 6, 9};
+
+  constexpr float kExpect[] = {48, 36, 69, 58, 45, 85, 72, 72, 123,
+                               36, 42, 68, 58, 45, 85, 46, 51, 84};
+  constexpr int kOutputDims[] = {3, 1, 6, 3};
+  constexpr int kOutputCount = std::extent<decltype(kExpect)>::value;
+  float output_data[kOutputCount];
+
+  constexpr TfLiteBatchMatMulParams params = {
+      false,  // adj_x
+      false,  // adj_y
+      false   // asymmetric_quantize_inputs
+  };
+
+  tflite::testing::TestBatchMatMulFloat(params, kInputDims, kLhsInput,
+                                        kRhsInput, kOutputDims, kExpect,
+                                        output_data, true);
+  // Eval twice to make sure constant transposed RHS is persistent.
+  tflite::testing::TestBatchMatMulFloat(params, kInputDims, kLhsInput,
+                                        kRhsInput, kOutputDims, kExpect,
+                                        output_data, true, false);
+}
+
+TF_LITE_MICRO_TEST(QuantizedBatchMatMulOpTestSimpleTestQuantizedInt8) {
+  constexpr int kLhsInputDims[] = {2, 2, 10};
+  constexpr int kRhsInputDims[] = {2, 10, 3};
+  const int* kInputDims[tflite::testing::kNumInputs] = {kLhsInputDims,
+                                                        kRhsInputDims};
+
+  constexpr float kLhsInput[] = {
+      1, 2, 3, 4, 5, 6, 7, 8,  -9, -10,  // b = 0
+      1, 2, 3, 4, 5, 6, 7, -8, 9,  -10,  // b = 1
+  };
+  constexpr int kLhsInputCount = std::extent<decltype(kLhsInput)>::value;
+
+  constexpr float kRhsInput[] = {
+      1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5,  5,  5,
+      6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10,
+  };
+  constexpr int kRhsInputCount = std::extent<decltype(kRhsInput)>::value;
+
+  constexpr int8_t kExpect[] = {22, 22, 22, 56, 56, 56};
+  constexpr int kOutputDims[] = {2, 2, 3};
+  constexpr int kOutputCount = std::extent<decltype(kExpect)>::value;
+  constexpr float output_data[kOutputCount] = {23, 23, 23, 57, 57, 57};
+
+  constexpr TfLiteBatchMatMulParams params = {
+      false,  // adj_x
+      false,  // adj_y
+      false   // asymmetric_quantize_inputs
+  };
+
+  tflite::testing::TestQuantizationParams<int8_t, kLhsInputCount>
+      quantization_params_lhs = {0.0f,    // scale
+                                 0,       // zero_point
+                                 -63.5f,  // data_min
+                                 64.0f,   // data_max
+                                 {}};
+  tflite::testing::TestQuantizationParams<int8_t, kRhsInputCount>
+      quantization_params_rhs = {0.0f,    // scale
+                                 0,       // zero_point
+                                 -63.5f,  // data_min
+                                 64.0f,   // data_max
+                                 {}};
+  tflite::testing::TestQuantizationParams<int8_t, kOutputCount>
+      quantization_params_output = {0.0f,     // scale
+                                    0,        // zero_point
+                                    -127.0f,  // data_min
+                                    128.0f,   // data_max
+                                    {}};
+
+  tflite::testing::TestBatchMatMulQuantized<int8_t>(
+      params, &quantization_params_lhs, &quantization_params_rhs,
+      &quantization_params_output, kInputDims, kLhsInput, kRhsInput,
+      kOutputDims, kExpect, output_data);
+}
+
+TF_LITE_MICRO_TEST(QuantizedBatchMatMulOpTestSimpleTestQuantizedInt16) {
+  constexpr int kLhsInputDims[] = {2, 2, 10};
+  constexpr int kRhsInputDims[] = {2, 10, 3};
+  const int* kInputDims[tflite::testing::kNumInputs] = {kLhsInputDims,
+                                                        kRhsInputDims};
+
+  constexpr float kLhsInput[] = {
+      1, 2, 3, 4, 5, 6, 7, 8,  -9, -10,  // b = 0
+      1, 2, 3, 4, 5, 6, 7, -8, 9,  -10,  // b = 1
+  };
+  constexpr int kLhsInputCount = std::extent<decltype(kLhsInput)>::value;
+
+  constexpr float kRhsInput[] = {
+      1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5,  5,  5,
+      6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10,
+  };
+  constexpr int kRhsInputCount = std::extent<decltype(kRhsInput)>::value;
+
+  constexpr int16_t kExpect[] = {23, 23, 23, 57, 57, 57};
+  constexpr int kOutputDims[] = {2, 2, 3};
+  constexpr int kOutputCount = std::extent<decltype(kExpect)>::value;
+  constexpr float output_data[kOutputCount] = {23, 23, 23, 57, 57, 57};
+
+  constexpr TfLiteBatchMatMulParams params = {
+      false,  // adj_x
+      false,  // adj_y
+      false   // asymmetric_quantize_inputs
+  };
+
+  tflite::testing::TestQuantizationParams<int16_t, kLhsInputCount>
+      quantization_params_lhs = {};
+  quantization_params_lhs.scale = 10.0f / std::numeric_limits<int16_t>::max();
+  tflite::testing::TestQuantizationParams<int16_t, kRhsInputCount>
+      quantization_params_rhs = {};
+  quantization_params_rhs.scale = 10.0f / std::numeric_limits<int16_t>::max();
+
+  tflite::testing::TestQuantizationParams<int16_t, kOutputCount>
+      quantization_params_output = {};
+  quantization_params_output.scale = 1.0f;
+
+  tflite::testing::TestBatchMatMulQuantized<int16_t>(
+      params, &quantization_params_lhs, &quantization_params_rhs,
+      &quantization_params_output, kInputDims, kLhsInput, kRhsInput,
+      kOutputDims, kExpect, output_data);
+}
+
+TF_LITE_MICRO_TESTS_END
diff --git a/tensorflow/lite/micro/kernels/batch_to_space_nd.cc b/tensorflow/lite/micro/kernels/batch_to_space_nd.cc
index 090a040..31a1c28 100644
--- a/tensorflow/lite/micro/kernels/batch_to_space_nd.cc
+++ b/tensorflow/lite/micro/kernels/batch_to_space_nd.cc
@@ -38,7 +38,7 @@
 const int kInputOutputMinDimensionNum = 3;
 const int kInputOutputMaxDimensionNum = 4;
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus BatchToSpaceNDPrepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
 
@@ -62,7 +62,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus BatchToSpaceNDEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kInputTensor);
   const TfLiteEvalTensor* block_shape =
@@ -106,7 +106,8 @@
 }  // namespace.
 
 TFLMRegistration Register_BATCH_TO_SPACE_ND() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, BatchToSpaceNDPrepare,
+                                   BatchToSpaceNDEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/call_once.cc b/tensorflow/lite/micro/kernels/call_once.cc
index 8ad1c20..65857ef 100644
--- a/tensorflow/lite/micro/kernels/call_once.cc
+++ b/tensorflow/lite/micro/kernels/call_once.cc
@@ -36,12 +36,12 @@
   bool has_run;
 };
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* CallOnceInit(TfLiteContext* context, const char* buffer, size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context, sizeof(OpData));
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus CallOncePrepare(TfLiteContext* context, TfLiteNode* node) {
   OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
   const auto* params =
       reinterpret_cast<const TfLiteCallOnceParams*>(node->builtin_data);
@@ -60,7 +60,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus CallOnceEval(TfLiteContext* context, TfLiteNode* node) {
   OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
 
   // Call once only runs one time then is a no-op for every subsequent call.
@@ -82,7 +82,7 @@
 }  // namespace.
 
 TFLMRegistration Register_CALL_ONCE() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(CallOnceInit, CallOncePrepare, CallOnceEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/cast.cc b/tensorflow/lite/micro/kernels/cast.cc
index a493618..0b450d6 100644
--- a/tensorflow/lite/micro/kernels/cast.cc
+++ b/tensorflow/lite/micro/kernels/cast.cc
@@ -1,4 +1,4 @@
-/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -25,7 +25,7 @@
 constexpr int kInputTensor = 0;
 constexpr int kOutputTensor = 0;
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus CastPrepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
 
@@ -63,6 +63,9 @@
     case kTfLiteInt32:
       copyCast(in, out->data.i32, num_elements);
       break;
+    case kTfLiteUInt32:
+      copyCast(in, out->data.u32, num_elements);
+      break;
     case kTfLiteFloat32:
       copyCast(in, tflite::micro::GetTensorData<float>(out), num_elements);
       break;
@@ -74,7 +77,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus CastEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kInputTensor);
   TfLiteEvalTensor* output =
@@ -98,6 +101,9 @@
     case kTfLiteFloat32:
       return copyToTensor(context, tflite::micro::GetTensorData<float>(input),
                           output, num_elements);
+    case kTfLiteBool:
+      return copyToTensor(context, tflite::micro::GetTensorData<bool>(input),
+                          output, num_elements);
     default:
       // Unsupported type.
       MicroPrintf("Input type %s (%d) not supported.",
@@ -108,7 +114,7 @@
 }  // namespace
 
 TFLMRegistration Register_CAST() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, CastPrepare, CastEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/cast_test.cc b/tensorflow/lite/micro/kernels/cast_test.cc
index f5ab660..8625572 100644
--- a/tensorflow/lite/micro/kernels/cast_test.cc
+++ b/tensorflow/lite/micro/kernels/cast_test.cc
@@ -1,4 +1,4 @@
-/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -113,4 +113,28 @@
   tflite::testing::TestCast(input_dims, input_values, golden, output_data);
 }
 
+TF_LITE_MICRO_TEST(CastUInt32ToInt32) {
+  int32_t output_data[6];
+  int input_dims[] = {2, 2, 3};
+  const uint32_t input_values[] = {100, 200, 300, 400, 500, 600};
+  const int32_t golden[] = {100, 200, 300, 400, 500, 600};
+  tflite::testing::TestCast(input_dims, input_values, golden, output_data);
+}
+
+TF_LITE_MICRO_TEST(CastUInt32ToInt32) {
+  uint32_t output_data[6];
+  int input_dims[] = {2, 2, 3};
+  const int32_t input_values[] = {100, 200, 300, 400, 500, 600};
+  const uint32_t golden[] = {100, 200, 300, 400, 500, 600};
+  tflite::testing::TestCast(input_dims, input_values, golden, output_data);
+}
+
+TF_LITE_MICRO_TEST(CastBoolToFloat) {
+  float output_data[6];
+  int input_dims[] = {2, 2, 3};
+  const bool input_values[] = {true, true, false, true, false, true};
+  const float golden[] = {1.f, 1.0f, 0.f, 1.0f, 0.0f, 1.0f};
+  tflite::testing::TestCast(input_dims, input_values, golden, output_data);
+}
+
 TF_LITE_MICRO_TESTS_END
diff --git a/tensorflow/lite/micro/kernels/ceil.cc b/tensorflow/lite/micro/kernels/ceil.cc
index 46b55e7..36139f9 100644
--- a/tensorflow/lite/micro/kernels/ceil.cc
+++ b/tensorflow/lite/micro/kernels/ceil.cc
@@ -27,7 +27,7 @@
 constexpr int kInputTensor = 0;
 constexpr int kOutputTensor = 0;
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus CeilPrepare(TfLiteContext* context, TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
 
   TfLiteTensor* input =
@@ -50,7 +50,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus CeilEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kInputTensor);
   TfLiteEvalTensor* output =
@@ -67,7 +67,7 @@
 }  // namespace
 
 TFLMRegistration Register_CEIL() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, CeilPrepare, CeilEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/cmsis_nn/README.md b/tensorflow/lite/micro/kernels/cmsis_nn/README.md
index e4a4de3..dc531b7 100644
--- a/tensorflow/lite/micro/kernels/cmsis_nn/README.md
+++ b/tensorflow/lite/micro/kernels/cmsis_nn/README.md
@@ -1,12 +1,14 @@
 <!-- mdformat off(b/169948621#comment2) -->
 
-# Info
+# General Info
 CMSIS-NN is a library containing kernel optimizations for Arm(R) Cortex(R)-M
 processors. To use CMSIS-NN optimized kernels instead of reference kernels, add
 `OPTIMIZED_KERNEL_DIR=cmsis_nn` to the make command line. See examples below.
 
 For more information about the optimizations, check out
-[CMSIS-NN documentation](https://github.com/ARM-software/CMSIS_5/blob/develop/CMSIS/NN/README.md).
+[CMSIS-NN documentation](https://github.com/ARM-software/CMSIS-NN/blob/main/README.md),
+
+# Specifying path to CMSIS-NN
 
 By default CMSIS-NN is built by code that is downloaded to the TFLM tree.
 It also possible to build CMSIS-NN code from an external path by specifying
@@ -14,7 +16,7 @@
 since CMSIS-NN has a dependency to CMSIS-Core. As a third option CMSIS-NN can be provided manually as an external library.
 The examples below will illustrate this.
 
-# Example - FVP based on Arm Corstone-300 software.
+## Example - FVP based on Arm Corstone-300 software.
 In this example, the kernel conv unit test is built. For more information about
 this specific target, check out the [Corstone-300 readme](https://github.com/tensorflow/tflite-micro/tree/main/tensorflow/lite/micro/cortex_m_corstone_300/README.md).
 
@@ -39,3 +41,22 @@
 Also note that if specifying CMSIS_NN_LIBS but not CMSIS_PATH and or CMSIS_NN_PATH, headers and
 system/startup code from the default downloaded path of CMSIS would be used.
 So CMSIS_NN_LIBS, CMSIS_NN_PATH and CMSIS_PATH should have the same base path and if not there will be a build error.
+
+# Build for speed or size
+It is possible to build for speed or size. The size option may be required for a large model on an embedded system with limited memory. Where applicable, building for size would result in higher latency paired with a smaller scratch buffer, whereas building for speed would result in lower latency with a larger scratch buffer. Currently only transpose conv supports this.  See examples below.
+
+## Example - building a static library with CMSIS-NN optimized kernels
+More info on the target used in this example: https://github.com/tensorflow/tflite-micro/blob/main/tensorflow/lite/micro/cortex_m_generic/README.md
+
+Bulding for speed (default):
+Note that speed is default so if leaving out OPTIMIZE_KERNELS_FOR completely that will be the default.
+```
+make -f tensorflow/lite/micro/tools/make/Makefile TARGET=cortex_m_generic TARGET_ARCH=cortex-m55 OPTIMIZED_KERNEL_DIR=cmsis_nn OPTIMIZE_KERNELS_FOR=KERNELS_OPTIMIZED_FOR_SPEED microlite
+
+```
+
+Bulding for size:
+```
+make -f tensorflow/lite/micro/tools/make/Makefile TARGET=cortex_m_generic TARGET_ARCH=cortex-m55 OPTIMIZED_KERNEL_DIR=cmsis_nn OPTIMIZE_KERNELS_FOR=KERNELS_OPTIMIZED_FOR_SIZE microlite
+
+```
diff --git a/tensorflow/lite/micro/kernels/cmsis_nn/add.cc b/tensorflow/lite/micro/kernels/cmsis_nn/add.cc
index 898410a..fb166a1 100644
--- a/tensorflow/lite/micro/kernels/cmsis_nn/add.cc
+++ b/tensorflow/lite/micro/kernels/cmsis_nn/add.cc
@@ -1,4 +1,4 @@
-/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -301,6 +301,15 @@
       micro_context->AllocateTempOutputTensor(node, kOutputTensor);
   TF_LITE_ENSURE(context, output != nullptr);
 
+  TF_LITE_ENSURE_EQ(context, input1->type, output->type);
+  TF_LITE_ENSURE_MSG(
+      context,
+      input1->type == kTfLiteFloat32 || input1->type == kTfLiteInt32 ||
+          input1->type == kTfLiteInt16 || input1->type == kTfLiteInt8,
+      "Input data type not supported");
+  TF_LITE_ENSURE_MSG(context, input1->type == input2->type,
+                     "Hybrid models are not supported on TFLite Micro.");
+
   if (input1->type == kTfLiteInt16) {
     TF_LITE_ENSURE_EQ(context, input1->params.zero_point, 0);
     TF_LITE_ENSURE_EQ(context, input2->params.zero_point, 0);
diff --git a/tensorflow/lite/micro/kernels/cmsis_nn/conv.cc b/tensorflow/lite/micro/kernels/cmsis_nn/conv.cc
index 8b6928b..cae68c7 100644
--- a/tensorflow/lite/micro/kernels/cmsis_nn/conv.cc
+++ b/tensorflow/lite/micro/kernels/cmsis_nn/conv.cc
@@ -1,4 +1,4 @@
-/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -21,7 +21,6 @@
 #include "tensorflow/lite/kernels/internal/common.h"
 #include "tensorflow/lite/kernels/internal/quantization_util.h"
 #include "tensorflow/lite/kernels/internal/reference/conv.h"
-#include "tensorflow/lite/kernels/internal/reference/integer_ops/conv.h"
 #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
 #include "tensorflow/lite/kernels/kernel_util.h"
 #include "tensorflow/lite/kernels/padding.h"
@@ -63,39 +62,62 @@
   TfLiteTensor* output =
       micro_context->AllocateTempOutputTensor(node, kConvOutputTensor);
   TF_LITE_ENSURE(context, output != nullptr);
+  TfLiteTensor* bias =
+      micro_context->AllocateTempOutputTensor(node, kConvBiasTensor);
+  TfLiteType bias_type = bias != nullptr ? bias->type : kTfLiteNoType;
 
-  RuntimeShape input_shape = GetTensorShape(input);
-  RuntimeShape output_shape = GetTensorShape(output);
+  TF_LITE_ENSURE_EQ(context, input->type, output->type);
+  TF_LITE_ENSURE_MSG(context,
+                     input->type == kTfLiteFloat32 ||
+                         input->type == kTfLiteInt16 ||
+                         input->type == kTfLiteInt8,
+                     "Input data type not supported");
+  TF_LITE_ENSURE_MSG(
+      context,
+      (input->type == kTfLiteFloat32 && filter->type == kTfLiteFloat32) ||
+          (input->type == kTfLiteInt16 && filter->type == kTfLiteInt8) ||
+          (input->type == kTfLiteInt8 &&
+           (filter->type == kTfLiteInt4 || filter->type == kTfLiteInt8)),
+      "Hybrid models are not supported on TFLite Micro.");
 
-  // Initialize cmsis_nn input dimensions
+  // Consistency check tensor dims
+  // Dimensionality
+  TF_LITE_ENSURE_EQ(context, input->dims->size, 4);
+  TF_LITE_ENSURE_EQ(context, filter->dims->size, 4);
+  TF_LITE_ENSURE_EQ(context, output->dims->size, 4);
+  // Equal batch size in input and output
+  TF_LITE_ENSURE_EQ(context, input->dims->data[0], output->dims->data[0]);
+  // Input channels should be an even multiple of filter channels
+  TF_LITE_ENSURE(context, filter->dims->data[3] > 0);
+  TF_LITE_ENSURE_EQ(context, input->dims->data[3] % filter->dims->data[3], 0);
+  // Output channels should be an even multiple of the number of groups
+  const int groups = input->dims->data[3] / filter->dims->data[3];
+  TFLITE_DCHECK_EQ(output->dims->data[3] % groups, 0);
+  // Bias size equal to output channels
+  if (bias != nullptr) {
+    TF_LITE_ENSURE_EQ(context, bias->dims->size, 4);
+    const int bias_size = NumElements(bias->dims);
+    TFLITE_DCHECK_EQ(bias_size, output->dims->data[3]);
+  }
+
+  // Initialize cmsis_nn dimensions
   cmsis_nn_dims input_dims;
-  input_dims.n = MatchingDim(input_shape, 0, output_shape, 0);
+  input_dims.n = input->dims->data[0];
   input_dims.h = input->dims->data[1];
   input_dims.w = input->dims->data[2];
-  input_dims.c = input_shape.Dims(3);
+  input_dims.c = input->dims->data[3];
 
-  // Initialize cmsis_nn filter dimensions
   cmsis_nn_dims filter_dims;
-  filter_dims.n = output_shape.Dims(3);
+  filter_dims.n = 1;
   filter_dims.h = filter->dims->data[1];
   filter_dims.w = filter->dims->data[2];
-  filter_dims.c = input_dims.c;
+  filter_dims.c = filter->dims->data[3];
 
-  // Initialize cmsis_nn output dimensions
   cmsis_nn_dims output_dims;
-  output_dims.n = input_dims.n;
+  output_dims.n = output->dims->data[0];
   output_dims.h = output->dims->data[1];
   output_dims.w = output->dims->data[2];
-  output_dims.c = output_shape.Dims(3);
-
-  if (filter->type == kTfLiteInt4) {
-    int filter_size =
-        RuntimeShape(filter->dims->size,
-                     reinterpret_cast<const int32_t*>(filter->dims->data))
-            .FlatSize();
-    context->RequestScratchBufferInArena(
-        context, filter_size, &data->reference_op_data.filter_buffer_index);
-  }
+  output_dims.c = output->dims->data[3];
 
   if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) {
     const int num_channels = filter->dims->data[kConvQuantizedDimension];
@@ -112,7 +134,10 @@
       filter_dims.h, output_dims.w, output_dims.h, input->type,
       &data->reference_op_data));
 
-  if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) {
+  // CMSIS_NN allows INT64 or nullptr bias data pointer
+  if (input->type == kTfLiteInt8 ||
+      (input->type == kTfLiteInt16 &&
+       (bias_type == kTfLiteInt64 || bias_type == kTfLiteNoType))) {
     // Initialize cmsis_nn convolution parameters
     cmsis_nn_conv_params conv_params;
     conv_params.input_offset = -input->params.zero_point;
@@ -147,10 +172,76 @@
   micro_context->DeallocateTempTfLiteTensor(output);
   micro_context->DeallocateTempTfLiteTensor(input);
   micro_context->DeallocateTempTfLiteTensor(filter);
+  if (bias != nullptr) {
+    micro_context->DeallocateTempTfLiteTensor(bias);
+  }
 
   return kTfLiteOk;
 }
 
+template <class ActType, class BiasType, class WeigthsType>
+arm_cmsis_nn_status convolve_wrapper(
+    const cmsis_nn_context* ctx, const cmsis_nn_conv_params* conv_params,
+    const cmsis_nn_per_channel_quant_params* quant_params,
+    const cmsis_nn_dims* input_dims, const ActType* input,
+    const cmsis_nn_dims* filter_dims, const int8_t* filter,
+    const cmsis_nn_dims* bias_dims, const BiasType* bias,
+    const cmsis_nn_dims* output_dims, ActType* output, WeigthsType weightsT) {
+  return ARM_CMSIS_NN_ARG_ERROR;
+}
+
+template <>
+arm_cmsis_nn_status convolve_wrapper(
+    const cmsis_nn_context* ctx, const cmsis_nn_conv_params* conv_params,
+    const cmsis_nn_per_channel_quant_params* quant_params,
+    const cmsis_nn_dims* input_dims, const int8_t* input,
+    const cmsis_nn_dims* filter_dims, const int8_t* filter,
+    const cmsis_nn_dims* bias_dims, const int32_t* bias,
+    const cmsis_nn_dims* output_dims, int8_t* output, TfLiteType weightsT) {
+  if (weightsT == kTfLiteInt8) {
+    return arm_convolve_wrapper_s8(ctx, conv_params, quant_params, input_dims,
+                                   input, filter_dims, filter, bias_dims, bias,
+                                   output_dims, output);
+  } else if (weightsT == kTfLiteInt4) {
+    return arm_convolve_wrapper_s4(ctx, conv_params, quant_params, input_dims,
+                                   input, filter_dims, filter, bias_dims, bias,
+                                   output_dims, output);
+  } else {
+    return ARM_CMSIS_NN_ARG_ERROR;
+  }
+}
+
+template <>
+arm_cmsis_nn_status convolve_wrapper(
+    const cmsis_nn_context* ctx, const cmsis_nn_conv_params* conv_params,
+    const cmsis_nn_per_channel_quant_params* quant_params,
+    const cmsis_nn_dims* input_dims, const int16_t* input,
+    const cmsis_nn_dims* filter_dims, const int8_t* filter,
+    const cmsis_nn_dims* bias_dims, const int64_t* bias,
+    const cmsis_nn_dims* output_dims, int16_t* output, TfLiteType weightsT) {
+  const cmsis_nn_bias_data bias_data = {bias, false};
+
+  return arm_convolve_wrapper_s16(ctx, conv_params, quant_params, input_dims,
+                                  input, filter_dims, filter, bias_dims,
+                                  &bias_data, output_dims, output);
+}
+
+template <>
+arm_cmsis_nn_status convolve_wrapper(
+    const cmsis_nn_context* ctx, const cmsis_nn_conv_params* conv_params,
+    const cmsis_nn_per_channel_quant_params* quant_params,
+    const cmsis_nn_dims* input_dims, const int16_t* input,
+    const cmsis_nn_dims* filter_dims, const int8_t* filter,
+    const cmsis_nn_dims* bias_dims, const int32_t* bias,
+    const cmsis_nn_dims* output_dims, int16_t* output, TfLiteType weightsT) {
+  const cmsis_nn_bias_data bias_data = {bias, true};
+
+  return arm_convolve_wrapper_s16(ctx, conv_params, quant_params, input_dims,
+                                  input, filter_dims, filter, bias_dims,
+                                  &bias_data, output_dims, output);
+}
+
+template <typename ActType, typename BiasType, TfLiteType type>
 TfLiteStatus EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
                                      const TfLiteConvParams& params,
                                      const OpData& data,
@@ -179,51 +270,31 @@
   quant_params.shift =
       const_cast<int32_t*>(data.reference_op_data.per_channel_output_shift);
 
-  RuntimeShape filter_shape = tflite::micro::GetTensorShape(filter);
-  RuntimeShape input_shape = tflite::micro::GetTensorShape(input);
-  RuntimeShape output_shape = tflite::micro::GetTensorShape(output);
-  RuntimeShape bias_shape = tflite::micro::GetTensorShape(bias);
-
-  // Consistency check.
-  TFLITE_DCHECK_LE(conv_params.activation.min, conv_params.activation.max);
-  TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
-  TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
-  TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
-  const int batch_size = MatchingDim(input_shape, 0, output_shape, 0);
-  const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3);
-  const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3);
-  if (tflite::micro::GetOptionalTensorData<int8_t>(bias)) {
-    TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
-  }
-
-  // Initialize cmsis_nn dimensions
-  // Input
+  // Initialize cmsis_nn dimension structs, consistency is checked in the
+  // prepare stage
   cmsis_nn_dims input_dims;
-  input_dims.n = batch_size;
-  input_dims.h = input_shape.Dims(1);
-  input_dims.w = input_shape.Dims(2);
-  input_dims.c = input_depth;
+  input_dims.n = input->dims->data[0];
+  input_dims.h = input->dims->data[1];
+  input_dims.w = input->dims->data[2];
+  input_dims.c = input->dims->data[3];
 
-  // Filter
   cmsis_nn_dims filter_dims;
-  filter_dims.n = output_depth;
-  filter_dims.h = filter_shape.Dims(1);
-  filter_dims.w = filter_shape.Dims(2);
-  filter_dims.c = input_depth;
+  filter_dims.n = 1;
+  filter_dims.h = filter->dims->data[1];
+  filter_dims.w = filter->dims->data[2];
+  filter_dims.c = filter->dims->data[3];
 
-  // Bias
   cmsis_nn_dims bias_dims;
   bias_dims.n = 1;
   bias_dims.h = 1;
   bias_dims.w = 1;
-  bias_dims.c = output_depth;
+  bias_dims.c = output->dims->data[3];
 
-  // Output
   cmsis_nn_dims output_dims;
-  output_dims.n = batch_size;
-  output_dims.h = output_shape.Dims(1);
-  output_dims.w = output_shape.Dims(2);
-  output_dims.c = output_depth;
+  output_dims.n = output->dims->data[0];
+  output_dims.h = output->dims->data[1];
+  output_dims.w = output->dims->data[2];
+  output_dims.c = output->dims->data[3];
 
   // Initialize cmsis_nn context
   cmsis_nn_context ctx;
@@ -233,118 +304,44 @@
   if (data.buffer_idx > -1) {
     ctx.buf = context->GetScratchBuffer(context, data.buffer_idx);
     // Note: ctx.size is currently not used in cmsis_nn.
-    // The buffer should be allocated in the Prepare function through
-    // arm_convolve_wrapper_s8_get_buffer_size
+    // The buffer should be allocated in the prepare function through
+    // the corresponding arm_convolve_wrapper_[type]_get_buffer_size
   }
 
-  // arm_convolve_wrapper_s8 dispatches the optimized kernel accordingly with
-  // the parameters passed
+  // arm_convolve_wrapper_[type] dispatches the optimized kernel accordingly
+  // with the parameters passed
   TFLITE_DCHECK_EQ(
-      arm_convolve_wrapper_s8(
+      convolve_wrapper(
           &ctx, &conv_params, &quant_params, &input_dims,
-          tflite::micro::GetTensorData<int8_t>(input), &filter_dims,
+          tflite::micro::GetTensorData<ActType>(input), &filter_dims,
           tflite::micro::GetTensorData<int8_t>(filter), &bias_dims,
-          tflite::micro::GetOptionalTensorData<int32_t>(bias), &output_dims,
-          tflite::micro::GetTensorData<int8_t>(output)),
+          tflite::micro::GetOptionalTensorData<BiasType>(bias), &output_dims,
+          tflite::micro::GetTensorData<ActType>(output), type),
       ARM_CMSIS_NN_SUCCESS);
 
   return kTfLiteOk;
 }
 
-TfLiteStatus EvalQuantizedPerChannel16x8(
-    TfLiteContext* context, TfLiteNode* node, const TfLiteConvParams& params,
-    const OpData& data, const TfLiteEvalTensor* input,
-    const TfLiteEvalTensor* filter, const TfLiteEvalTensor* bias,
-    TfLiteEvalTensor* output) {
-  cmsis_nn_conv_params conv_params;
-  conv_params.dilation.h = params.dilation_height_factor;
-  conv_params.dilation.w = params.dilation_width_factor;
+TfLiteStatus EvalInt4(TfLiteContext* context, TfLiteNode* node) {
+  const TfLiteEvalTensor* input =
+      tflite::micro::GetEvalInput(context, node, kConvInputTensor);
+  const TfLiteEvalTensor* filter =
+      tflite::micro::GetEvalInput(context, node, kConvWeightsTensor);
+  const TfLiteEvalTensor* bias =
+      (NumInputs(node) == 3)
+          ? tflite::micro::GetEvalInput(context, node, kConvBiasTensor)
+          : nullptr;
+  TfLiteEvalTensor* output =
+      tflite::micro::GetEvalOutput(context, node, kConvOutputTensor);
 
-  // Initialize cmsis_nn convolution parameters
-  conv_params.input_offset = -data.reference_op_data.input_zero_point;
-  conv_params.output_offset = data.reference_op_data.output_zero_point;
-  conv_params.stride.h = params.stride_height;
-  conv_params.stride.w = params.stride_width;
-  conv_params.padding.h = data.reference_op_data.padding.height;
-  conv_params.padding.w = data.reference_op_data.padding.width;
-  conv_params.activation.min = data.reference_op_data.output_activation_min;
-  conv_params.activation.max = data.reference_op_data.output_activation_max;
+  TFLITE_DCHECK(node->builtin_data != nullptr);
+  const auto& params =
+      *(reinterpret_cast<TfLiteConvParams*>(node->builtin_data));
+  TFLITE_DCHECK(node->user_data != nullptr);
+  const OpData& data = *(static_cast<const OpData*>(node->user_data));
 
-  // Initialize cmsis_nn per channel quantization parameters
-  cmsis_nn_per_channel_quant_params quant_params;
-  quant_params.multiplier = const_cast<int32_t*>(
-      data.reference_op_data.per_channel_output_multiplier);
-  quant_params.shift =
-      const_cast<int32_t*>(data.reference_op_data.per_channel_output_shift);
-
-  RuntimeShape filter_shape = tflite::micro::GetTensorShape(filter);
-  RuntimeShape input_shape = tflite::micro::GetTensorShape(input);
-  RuntimeShape output_shape = tflite::micro::GetTensorShape(output);
-  RuntimeShape bias_shape = tflite::micro::GetTensorShape(bias);
-
-  // Consistency check.
-  TFLITE_DCHECK_LE(conv_params.activation.min, conv_params.activation.max);
-  TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
-  TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
-  TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
-  const int batch_size = MatchingDim(input_shape, 0, output_shape, 0);
-  const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3);
-  const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3);
-  if (tflite::micro::GetOptionalTensorData<int8_t>(bias)) {
-    TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
-  }
-
-  // Initialize cmsis_nn dimensions
-  // Input
-  cmsis_nn_dims input_dims;
-  input_dims.n = batch_size;
-  input_dims.h = input_shape.Dims(1);
-  input_dims.w = input_shape.Dims(2);
-  input_dims.c = input_depth;
-
-  // Filter
-  cmsis_nn_dims filter_dims;
-  filter_dims.n = output_depth;
-  filter_dims.h = filter_shape.Dims(1);
-  filter_dims.w = filter_shape.Dims(2);
-  filter_dims.c = input_depth;
-
-  // Bias
-  cmsis_nn_dims bias_dims;
-  bias_dims.n = 1;
-  bias_dims.h = 1;
-  bias_dims.w = 1;
-  bias_dims.c = output_depth;
-
-  // Output
-  cmsis_nn_dims output_dims;
-  output_dims.n = batch_size;
-  output_dims.h = output_shape.Dims(1);
-  output_dims.w = output_shape.Dims(2);
-  output_dims.c = output_depth;
-
-  // Initialize cmsis_nn context
-  cmsis_nn_context ctx;
-  ctx.buf = nullptr;
-  ctx.size = 0;
-
-  if (data.buffer_idx > -1) {
-    ctx.buf = context->GetScratchBuffer(context, data.buffer_idx);
-    // Note: ctx.size is currently not used in cmsis_nn.
-    // The buffer should be allocated in the Prepare function through
-    // arm_convolve_wrapper_s8_get_buffer_size
-  }
-
-  TFLITE_DCHECK_EQ(
-      arm_convolve_wrapper_s16(
-          &ctx, &conv_params, &quant_params, &input_dims,
-          tflite::micro::GetTensorData<int16_t>(input), &filter_dims,
-          tflite::micro::GetTensorData<int8_t>(filter), &bias_dims,
-          tflite::micro::GetOptionalTensorData<int64_t>(bias), &output_dims,
-          tflite::micro::GetTensorData<int16_t>(output)),
-      ARM_CMSIS_NN_SUCCESS);
-
-  return kTfLiteOk;
+  return EvalQuantizedPerChannel<int8_t, int32_t, kTfLiteInt4>(
+      context, node, params, data, input, filter, bias, output);
 }
 
 TfLiteStatus EvalInt8(TfLiteContext* context, TfLiteNode* node) {
@@ -364,11 +361,9 @@
       *(reinterpret_cast<TfLiteConvParams*>(node->builtin_data));
   TFLITE_DCHECK(node->user_data != nullptr);
   const OpData& data = *(static_cast<const OpData*>(node->user_data));
-  TfLiteEvalTensor filter_int8 = tflite::micro::MakeUnpackedInt4Tensor(
-      context, data.reference_op_data.filter_buffer_index, filter);
 
-  return EvalQuantizedPerChannel(context, node, params, data, input,
-                                 &filter_int8, bias, output);
+  return EvalQuantizedPerChannel<int8_t, int32_t, kTfLiteInt8>(
+      context, node, params, data, input, filter, bias, output);
 }
 
 TfLiteStatus EvalInt16x8(TfLiteContext* context, TfLiteNode* node) {
@@ -389,8 +384,17 @@
   TFLITE_DCHECK(node->user_data != nullptr);
   const OpData& data = *(static_cast<const OpData*>(node->user_data));
 
-  return EvalQuantizedPerChannel16x8(context, node, params, data, input, filter,
-                                     bias, output);
+  if (bias == nullptr || bias->type == kTfLiteInt32) {
+    return EvalQuantizedPerChannel<int16_t, int32_t, kTfLiteInt16>(
+        context, node, params, data, input, filter, bias, output);
+  } else if (bias->type == kTfLiteInt64) {
+    return EvalQuantizedPerChannel<int16_t, int64_t, kTfLiteInt16>(
+        context, node, params, data, input, filter, bias, output);
+  } else {
+    MicroPrintf("Bias type %s (%d) not supported.",
+                TfLiteTypeGetName(bias->type), bias->type);
+    return kTfLiteError;
+  }
 }
 
 TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
@@ -419,9 +423,6 @@
           (input->type == kTfLiteInt8 && filter->type == kTfLiteInt4),
       "Hybrid models are not supported on TFLite Micro.");
 
-  TfLiteEvalTensor filter_int8 = tflite::micro::MakeUnpackedInt4Tensor(
-      context, data.reference_op_data.filter_buffer_index, filter);
-
   switch (input->type) {  // Already know in/out types are same.
     case kTfLiteFloat32: {
       tflite::reference_ops::Conv(
@@ -437,30 +438,44 @@
           tflite::micro::GetTensorShape(nullptr), nullptr);
       break;
     }
-    case kTfLiteInt8:
-      switch (filter_int8.type) {
-        case kTfLiteInt8: {
-          return EvalQuantizedPerChannel(context, node, params, data, input,
-                                         &filter_int8, bias, output);
+    case kTfLiteInt8: {
+      switch (filter->type) {
+        case kTfLiteInt4: {
+          return EvalQuantizedPerChannel<int8_t, int32_t, kTfLiteInt4>(
+              context, node, params, data, input, filter, bias, output);
         }
-
+        case kTfLiteInt8: {
+          return EvalQuantizedPerChannel<int8_t, int32_t, kTfLiteInt8>(
+              context, node, params, data, input, filter, bias, output);
+        }
         default: {
           MicroPrintf("Filter type %s (%d) not supported.",
                       TfLiteTypeGetName(filter->type), filter->type);
           return kTfLiteError;
         }
       }
-
       break;
-    case kTfLiteInt16:
-      return EvalQuantizedPerChannel16x8(context, node, params, data, input,
-                                         filter, bias, output);
+    }
+    case kTfLiteInt16: {
+      if (bias == nullptr || bias->type == kTfLiteInt32) {
+        return EvalQuantizedPerChannel<int16_t, int32_t, kTfLiteInt16>(
+            context, node, params, data, input, filter, bias, output);
+      } else if (bias->type == kTfLiteInt64) {
+        return EvalQuantizedPerChannel<int16_t, int64_t, kTfLiteInt16>(
+            context, node, params, data, input, filter, bias, output);
+      } else {
+        MicroPrintf("Bias type %s (%d) not supported.",
+                    TfLiteTypeGetName(bias->type), bias->type);
+        return kTfLiteError;
+      }
       break;
+    }
     default:
       MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type),
                   input->type);
       return kTfLiteError;
   }
+
   return kTfLiteOk;
 }
 
@@ -470,6 +485,10 @@
   return tflite::micro::RegisterOp(Init, Prepare, Eval);
 }
 
+TFLMRegistration Register_CONV_2D_INT4() {
+  return tflite::micro::RegisterOp(Init, Prepare, EvalInt4);
+}
+
 TFLMRegistration Register_CONV_2D_INT8() {
   return tflite::micro::RegisterOp(Init, Prepare, EvalInt8);
 }
diff --git a/tensorflow/lite/micro/kernels/cmsis_nn/depthwise_conv.cc b/tensorflow/lite/micro/kernels/cmsis_nn/depthwise_conv.cc
index 7b733b7..7183a28 100644
--- a/tensorflow/lite/micro/kernels/cmsis_nn/depthwise_conv.cc
+++ b/tensorflow/lite/micro/kernels/cmsis_nn/depthwise_conv.cc
@@ -1,4 +1,4 @@
-/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -75,6 +75,20 @@
       micro_context->AllocateTempOutputTensor(node, kDepthwiseConvOutputTensor);
   TF_LITE_ENSURE(context, output != nullptr);
 
+  TF_LITE_ENSURE_EQ(context, input->type, output->type);
+  TF_LITE_ENSURE_MSG(context,
+                     input->type == kTfLiteFloat32 ||
+                         input->type == kTfLiteInt16 ||
+                         input->type == kTfLiteInt8,
+                     "Input data type not supported");
+  TF_LITE_ENSURE_MSG(
+      context,
+      (input->type == kTfLiteFloat32 && filter->type == kTfLiteFloat32) ||
+          (input->type == kTfLiteInt16 && filter->type == kTfLiteInt8) ||
+          (input->type == kTfLiteInt8 &&
+           (filter->type == kTfLiteInt4 || filter->type == kTfLiteInt8)),
+      "Hybrid models are not supported on TFLite Micro.");
+
   const TfLiteType data_type = input->type;
   int input_width = SizeOfDimension(input, 2);
   int input_height = SizeOfDimension(input, 1);
@@ -118,15 +132,6 @@
             context, num_channels * sizeof(int32_t)));
   }
 
-  if (filter->type == kTfLiteInt4) {
-    int filter_size =
-        RuntimeShape(filter->dims->size,
-                     reinterpret_cast<const int32_t*>(filter->dims->data))
-            .FlatSize();
-    context->RequestScratchBufferInArena(
-        context, filter_size, &data->reference_op_data.filter_buffer_index);
-  }
-
   TF_LITE_ENSURE_STATUS(CalculateOpDataDepthwiseConv(
       context, node, params, input_width, input_height, filter_width,
       filter_height, output_width, output_height, data_type,
@@ -168,8 +173,18 @@
     dw_conv_params.dilation.h = params.dilation_height_factor;
     dw_conv_params.dilation.w = params.dilation_width_factor;
 
-    const int32_t buf_size = arm_depthwise_conv_wrapper_s8_get_buffer_size(
-        &dw_conv_params, &input_dims, &filter_dims, &output_dims);
+    int32_t buf_size = 0;
+    if (filter->type == kTfLiteInt8) {
+      buf_size = arm_depthwise_conv_wrapper_s8_get_buffer_size(
+          &dw_conv_params, &input_dims, &filter_dims, &output_dims);
+    } else if (filter->type == kTfLiteInt4) {
+      buf_size = arm_depthwise_conv_wrapper_s4_get_buffer_size(
+          &dw_conv_params, &input_dims, &filter_dims, &output_dims);
+    } else {
+      MicroPrintf("Filter type %s (%d) not supported.",
+                  TfLiteTypeGetName(filter->type), filter->type);
+      return kTfLiteError;
+    }
 
     if (buf_size > 0) {
       TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena(
@@ -285,6 +300,43 @@
       ARM_CMSIS_NN_SUCCESS);
 }
 
+void EvalQuantizedPerChannelInt4(TfLiteContext* context, TfLiteNode* node,
+                                 const TfLiteDepthwiseConvParams& params,
+                                 const OpData& data,
+                                 const TfLiteEvalTensor* input,
+                                 const TfLiteEvalTensor* filter,
+                                 const TfLiteEvalTensor* bias,
+                                 TfLiteEvalTensor* output) {
+  cmsis_nn_dw_conv_params dw_conv_params;
+  cmsis_nn_per_channel_quant_params quant_params;
+  cmsis_nn_dims input_dims;
+  cmsis_nn_dims filter_dims;
+  cmsis_nn_dims bias_dims;
+  cmsis_nn_dims output_dims;
+
+  PopulateDwConvParams(&dw_conv_params, &quant_params, &input_dims,
+                       &filter_dims, &bias_dims, &output_dims, params, data,
+                       input, filter, bias, output);
+
+  cmsis_nn_context ctx;
+  ctx.buf = nullptr;
+  /* 'size' is unused */
+  ctx.size = 0;
+
+  if (data.buffer_idx > -1) {
+    ctx.buf = context->GetScratchBuffer(context, data.buffer_idx);
+  }
+
+  TFLITE_DCHECK_EQ(
+      arm_depthwise_conv_wrapper_s4(
+          &ctx, &dw_conv_params, &quant_params, &input_dims,
+          tflite::micro::GetTensorData<int8_t>(input), &filter_dims,
+          tflite::micro::GetTensorData<int8_t>(filter), &bias_dims,
+          tflite::micro::GetOptionalTensorData<int32_t>(bias), &output_dims,
+          tflite::micro::GetTensorData<int8_t>(output)),
+      ARM_CMSIS_NN_SUCCESS);
+}
+
 void EvalQuantizedPerChannel16x8(TfLiteContext* context, TfLiteNode* node,
                                  const TfLiteDepthwiseConvParams& params,
                                  const OpData& data,
@@ -337,9 +389,6 @@
           ? tflite::micro::GetEvalInput(context, node, kDepthwiseConvBiasTensor)
           : nullptr;
 
-  TfLiteEvalTensor filter_int8 = tflite::micro::MakeUnpackedInt4Tensor(
-      context, data.reference_op_data.filter_buffer_index, filter);
-
   switch (input->type) {  // Already know in/out types are same.
     case kTfLiteFloat32: {
       tflite::reference_ops::DepthwiseConv(
@@ -355,10 +404,15 @@
       break;
     }
     case kTfLiteInt8:
-      switch (filter_int8.type) {
+      switch (filter->type) {
         case kTfLiteInt8: {
-          EvalQuantizedPerChannel(context, node, params, data, input,
-                                  &filter_int8, bias, output);
+          EvalQuantizedPerChannel(context, node, params, data, input, filter,
+                                  bias, output);
+          break;
+        }
+        case kTfLiteInt4: {
+          EvalQuantizedPerChannelInt4(context, node, params, data, input,
+                                      filter, bias, output);
           break;
         }
         default: {
@@ -399,11 +453,8 @@
           ? tflite::micro::GetEvalInput(context, node, kDepthwiseConvBiasTensor)
           : nullptr;
 
-  TfLiteEvalTensor filter_int8 = tflite::micro::MakeUnpackedInt4Tensor(
-      context, data.reference_op_data.filter_buffer_index, filter);
-
-  EvalQuantizedPerChannel(context, node, params, data, input, &filter_int8,
-                          bias, output);
+  EvalQuantizedPerChannel(context, node, params, data, input, filter, bias,
+                          output);
   return kTfLiteOk;
 }
 
@@ -431,6 +482,30 @@
   return kTfLiteOk;
 }
 
+TfLiteStatus EvalInt4(TfLiteContext* context, TfLiteNode* node) {
+  TFLITE_DCHECK(node->user_data != nullptr);
+  TFLITE_DCHECK(node->builtin_data != nullptr);
+
+  const auto& params =
+      *(reinterpret_cast<TfLiteDepthwiseConvParams*>(node->builtin_data));
+  const OpData& data = *(static_cast<OpData*>(node->user_data));
+
+  TfLiteEvalTensor* output =
+      tflite::micro::GetEvalOutput(context, node, kDepthwiseConvOutputTensor);
+  const TfLiteEvalTensor* input =
+      tflite::micro::GetEvalInput(context, node, kDepthwiseConvInputTensor);
+  const TfLiteEvalTensor* filter =
+      tflite::micro::GetEvalInput(context, node, kDepthwiseConvWeightsTensor);
+  const TfLiteEvalTensor* bias =
+      (NumInputs(node) == 3)
+          ? tflite::micro::GetEvalInput(context, node, kDepthwiseConvBiasTensor)
+          : nullptr;
+
+  EvalQuantizedPerChannelInt4(context, node, params, data, input, filter, bias,
+                              output);
+  return kTfLiteOk;
+}
+
 }  // namespace
 
 TFLMRegistration Register_DEPTHWISE_CONV_2D() {
@@ -445,4 +520,8 @@
   return tflite::micro::RegisterOp(Init, Prepare, EvalInt16x8);
 }
 
+TFLMRegistration Register_DEPTHWISE_CONV_2D_INT4() {
+  return tflite::micro::RegisterOp(Init, Prepare, EvalInt4);
+}
+
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/cmsis_nn/fully_connected.cc b/tensorflow/lite/micro/kernels/cmsis_nn/fully_connected.cc
index a7ab8f1..7c373b5 100644
--- a/tensorflow/lite/micro/kernels/cmsis_nn/fully_connected.cc
+++ b/tensorflow/lite/micro/kernels/cmsis_nn/fully_connected.cc
@@ -1,4 +1,4 @@
-/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -26,6 +26,7 @@
 #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
 #include "tensorflow/lite/kernels/kernel_util.h"
 #include "tensorflow/lite/micro/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/micro_arena_constants.h"
 #include "tensorflow/lite/micro/micro_log.h"
 
 namespace tflite {
@@ -42,6 +43,8 @@
   // Index to buffer for optimizations if applicable.
   int buffer_idx;
 
+  int32_t* kernel_sums;
+
   int32_t batches;
   int32_t accum_depth;
   int32_t output_depth;
@@ -73,7 +76,19 @@
       node, kFullyConnectedOutputTensor);
   TF_LITE_ENSURE(context, output != nullptr);
 
-  TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
+  TF_LITE_ENSURE_EQ(context, input->type, output->type);
+  TF_LITE_ENSURE_MSG(context,
+                     input->type == kTfLiteFloat32 ||
+                         input->type == kTfLiteInt16 ||
+                         input->type == kTfLiteInt8,
+                     "Input data type not supported");
+  TF_LITE_ENSURE_MSG(
+      context,
+      (input->type == kTfLiteFloat32 && filter->type == kTfLiteFloat32) ||
+          (input->type == kTfLiteInt16 && filter->type == kTfLiteInt8) ||
+          (input->type == kTfLiteInt8 &&
+           (filter->type == kTfLiteInt4 || filter->type == kTfLiteInt8)),
+      "Hybrid models are not supported on TFLite Micro.");
 
   const RuntimeShape filter_shape = GetTensorShape(filter);
   const RuntimeShape output_shape = GetTensorShape(output);
@@ -101,7 +116,7 @@
     TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0);
     TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
     buf_size = arm_fully_connected_s16_get_buffer_size(&filter_dims);
-  } else if (input->type == kTfLiteInt8) {
+  } else if (input->type == kTfLiteInt8 && filter->type != kTfLiteInt4) {
     const RuntimeShape input_shape = GetTensorShape(input);
 
     TFLITE_DCHECK_GE(output_dim_count, 2);
@@ -122,18 +137,23 @@
       input_dims.c = data->accum_depth;
 
       buf_size = arm_convolve_1x1_s8_fast_get_buffer_size(&input_dims);
-    } else {
+    } else if (input->type == kTfLiteInt8) {
       buf_size = arm_fully_connected_s8_get_buffer_size(&filter_dims);
-    }
-  }
 
-  if (filter->type == kTfLiteInt4) {
-    int filter_size =
-        RuntimeShape(filter->dims->size,
-                     reinterpret_cast<const int32_t*>(filter->dims->data))
-            .FlatSize();
-    context->RequestScratchBufferInArena(
-        context, filter_size, &data->reference_op_data.filter_buffer_index);
+      int8_t* filter_data = GetTensorData<int8_t>(filter);
+      data->kernel_sums = nullptr;
+
+      if (buf_size > 0 && filter_data != nullptr) {
+        data->kernel_sums = static_cast<int32_t*>(
+            context->AllocatePersistentBuffer(context, buf_size));
+
+        arm_vector_sum_s8(data->kernel_sums, filter_dims.n, data->output_depth,
+                          filter_data, 1, nullptr);
+
+        // Do not request a scratch buffer since using persistent memory
+        buf_size = 0;
+      }
+    }
   }
 
   if (buf_size > 0) {
@@ -188,6 +208,49 @@
   }
 }
 
+TfLiteStatus EvalQuantizedInt4(TfLiteContext* context, TfLiteNode* node,
+                               const OpData& data,
+                               const TfLiteEvalTensor* input,
+                               const TfLiteEvalTensor* filter,
+                               const TfLiteEvalTensor* bias,
+                               TfLiteEvalTensor* output) {
+  const RuntimeShape output_shape = tflite::micro::GetTensorShape(output);
+  const int output_dim_count = output_shape.DimensionsCount();
+  TFLITE_DCHECK_GE(output_dim_count, 2);
+  TFLITE_DCHECK_LE(output_dim_count, 4);
+
+  cmsis_nn_per_tensor_quant_params quant_params;
+  cmsis_nn_dims input_dims;
+  cmsis_nn_dims filter_dims;
+  cmsis_nn_dims bias_dims;
+  cmsis_nn_dims output_dims;
+  cmsis_nn_context ctx;
+
+  PopulateCommonParams(context, &quant_params, &input_dims, &filter_dims,
+                       &bias_dims, &output_dims, &ctx, data);
+
+  const int32_t* bias_data =
+      tflite::micro::GetOptionalTensorData<int32_t>(bias);
+
+  cmsis_nn_fc_params fc_params;
+  fc_params.input_offset = -data.reference_op_data.input_zero_point;
+  fc_params.output_offset = data.reference_op_data.output_zero_point;
+  fc_params.filter_offset = 0;
+  fc_params.activation.min = data.reference_op_data.output_activation_min;
+  fc_params.activation.max = data.reference_op_data.output_activation_max;
+
+  TF_LITE_ENSURE_EQ(
+      context,
+      arm_fully_connected_s4(
+          &ctx, &fc_params, &quant_params, &input_dims,
+          tflite::micro::GetTensorData<int8_t>(input), &filter_dims,
+          tflite::micro::GetTensorData<int8_t>(filter), &bias_dims, bias_data,
+          &output_dims, tflite::micro::GetTensorData<int8_t>(output)),
+      ARM_CMSIS_NN_SUCCESS);
+
+  return kTfLiteOk;
+}
+
 TfLiteStatus EvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node,
                                const OpData& data,
                                const TfLiteEvalTensor* input,
@@ -247,11 +310,20 @@
   } else {
     cmsis_nn_fc_params fc_params;
     fc_params.input_offset = -data.reference_op_data.input_zero_point;
+    fc_params.filter_offset = -data.reference_op_data.filter_zero_point;
     fc_params.output_offset = data.reference_op_data.output_zero_point;
-    fc_params.filter_offset = 0;
     fc_params.activation.min = data.reference_op_data.output_activation_min;
     fc_params.activation.max = data.reference_op_data.output_activation_max;
 
+    if (data.kernel_sums != nullptr) {
+      ctx.buf = data.kernel_sums;
+    } else if (ctx.buf != nullptr) {
+      // If behaving like batch matmul we calculate kernel sums in eval.
+      arm_vector_sum_s8(
+          static_cast<int32_t*>(ctx.buf), filter_dims.n, data.output_depth,
+          tflite::micro::GetTensorData<int8_t>(filter), 1, nullptr);
+    }
+
     TF_LITE_ENSURE_EQ(
         context,
         arm_fully_connected_s8(
@@ -319,9 +391,6 @@
   TFLITE_DCHECK(node->user_data != nullptr);
   const OpData& data = *(static_cast<const OpData*>(node->user_data));
 
-  TfLiteEvalTensor filter_int8 = tflite::micro::MakeUnpackedInt4Tensor(
-      context, data.reference_op_data.filter_buffer_index, filter);
-
   // Checks in Prepare ensure input, output and filter types are all the same.
   switch (input->type) {
     case kTfLiteFloat32: {
@@ -339,10 +408,13 @@
       break;
     }
     case kTfLiteInt8: {
-      switch (filter_int8.type) {
+      switch (filter->type) {
+        case kTfLiteInt4:
+          return EvalQuantizedInt4(context, node, data, input, filter, bias,
+                                   output);
         case kTfLiteInt8:
-          return EvalQuantizedInt8(context, node, data, input, &filter_int8,
-                                   bias, output);
+          return EvalQuantizedInt8(context, node, data, input, filter, bias,
+                                   output);
         default:
           MicroPrintf("Filter Type %s (%d) not supported.",
                       TfLiteTypeGetName(filter->type), filter->type);
@@ -363,6 +435,29 @@
   return kTfLiteOk;
 }
 
+TfLiteStatus EvalInt4(TfLiteContext* context, TfLiteNode* node) {
+  const TfLiteEvalTensor* input =
+      tflite::micro::GetEvalInput(context, node, kFullyConnectedInputTensor);
+  const TfLiteEvalTensor* filter =
+      tflite::micro::GetEvalInput(context, node, kFullyConnectedWeightsTensor);
+  const TfLiteEvalTensor* bias =
+      tflite::micro::GetEvalInput(context, node, kFullyConnectedBiasTensor);
+  TfLiteEvalTensor* output =
+      tflite::micro::GetEvalOutput(context, node, kFullyConnectedOutputTensor);
+
+  TFLITE_DCHECK(node->user_data != nullptr);
+  const OpData& data = *(static_cast<const OpData*>(node->user_data));
+
+  // Checks in Prepare ensure input, output and filter types are all the same.
+  if (input->type != kTfLiteInt8 && filter->type != kTfLiteInt4) {
+    MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type),
+                input->type);
+    return kTfLiteError;
+  }
+
+  return EvalQuantizedInt4(context, node, data, input, filter, bias, output);
+}
+
 // Note that the current function names are not ideal at all (this EvalInt8
 // function internally calls EvalQuantizedInt8, and there is similar name
 // aliasing in the Eval function too). We will be attempting to have a more
@@ -389,11 +484,7 @@
     return kTfLiteError;
   }
 
-  TfLiteEvalTensor filter_int8 = tflite::micro::MakeUnpackedInt4Tensor(
-      context, data.reference_op_data.filter_buffer_index, filter);
-
-  return EvalQuantizedInt8(context, node, data, input, &filter_int8, bias,
-                           output);
+  return EvalQuantizedInt8(context, node, data, input, filter, bias, output);
 }
 
 TfLiteStatus EvalInt16(TfLiteContext* context, TfLiteNode* node) {
@@ -425,6 +516,10 @@
   return tflite::micro::RegisterOp(Init, Prepare, Eval);
 }
 
+TFLMRegistration Register_FULLY_CONNECTED_INT4() {
+  return tflite::micro::RegisterOp(Init, Prepare, EvalInt4);
+}
+
 TFLMRegistration Register_FULLY_CONNECTED_INT8() {
   return tflite::micro::RegisterOp(Init, Prepare, EvalInt8);
 }
@@ -433,4 +528,8 @@
   return tflite::micro::RegisterOp(Init, Prepare, EvalInt16);
 }
 
+TFLMInferenceRegistration RegisterInference_FULLY_CONNECTED() {
+  return tflite::micro::RegisterOp(Eval);
+}
+
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/cmsis_nn/softmax.cc b/tensorflow/lite/micro/kernels/cmsis_nn/softmax.cc
index f83a090..6515691 100644
--- a/tensorflow/lite/micro/kernels/cmsis_nn/softmax.cc
+++ b/tensorflow/lite/micro/kernels/cmsis_nn/softmax.cc
@@ -1,4 +1,4 @@
-/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -52,6 +52,17 @@
   TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0);
   TF_LITE_ENSURE(context, output != nullptr);
 
+  TF_LITE_ENSURE_MSG(
+      context,
+      input->type == output->type ||
+          (input->type == kTfLiteInt8 && output->type == kTfLiteInt16),
+      "Input and output data types are not supported together.");
+  TF_LITE_ENSURE_MSG(context,
+                     input->type == kTfLiteFloat32 ||
+                         input->type == kTfLiteInt16 ||
+                         input->type == kTfLiteInt8,
+                     "Input data type not supported");
+
   TF_LITE_ENSURE(context, node->user_data != nullptr);
   CMSISNNSoftmaxParams* op_data =
       static_cast<CMSISNNSoftmaxParams*>(node->user_data);
diff --git a/tensorflow/lite/micro/kernels/cmsis_nn/svdf.cc b/tensorflow/lite/micro/kernels/cmsis_nn/svdf.cc
index 03dbaee..bf64016 100644
--- a/tensorflow/lite/micro/kernels/cmsis_nn/svdf.cc
+++ b/tensorflow/lite/micro/kernels/cmsis_nn/svdf.cc
@@ -1,4 +1,4 @@
-/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -31,9 +31,195 @@
 namespace tflite {
 namespace {
 
+struct CmsisNnOpDataSvdf {
+  int32_t effective_scale_1_a;
+  int32_t effective_scale_2_a;
+  // b versions of each scale are kept at int since the numbers are just the
+  // shift value - typically between [-32, 32].
+  int effective_scale_1_b;
+  int effective_scale_2_b;
+  int scratch_tensor_index;
+  int scratch_output_tensor_index;
+
+  // Cached tensor zero point values for quantized operations.
+  int input_zero_point;
+  int output_zero_point;
+  int activation_state_zero_point;
+  int32_t* kernel_sums;
+};
+
 void* Init(TfLiteContext* context, const char* buffer, size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
-  return context->AllocatePersistentBuffer(context, sizeof(OpDataSvdf));
+  return context->AllocatePersistentBuffer(context, sizeof(CmsisNnOpDataSvdf));
+}
+
+TfLiteStatus CmsisNnPrepareSvdf(TfLiteContext* context, TfLiteNode* node) {
+  TFLITE_DCHECK(node->builtin_data != nullptr);
+
+  const auto* params = static_cast<const TfLiteSVDFParams*>(node->builtin_data);
+
+  MicroContext* micro_context = GetMicroContext(context);
+
+  // Validate Tensor Inputs (dtype depends on quantization):
+  // [0] = Input, {2, batch_size, input_size}
+  // [1] = Weights Feature, {2, num_filters, input_size}
+  // [2] = Weights Time, {2, num_filters, memory_size}
+  // [3] = Bias (optional), {1, num_units}
+  // [4] = Activation State (variable),
+  //         {2, batch_size, memory_size * num_filters}
+  TfLiteTensor* input =
+      micro_context->AllocateTempInputTensor(node, kSvdfInputTensor);
+  TF_LITE_ENSURE(context, input != nullptr);
+  TfLiteTensor* weights_feature =
+      micro_context->AllocateTempInputTensor(node, kSvdfWeightsFeatureTensor);
+  TF_LITE_ENSURE(context, weights_feature != nullptr);
+  TfLiteTensor* weights_time =
+      micro_context->AllocateTempInputTensor(node, kSvdfWeightsTimeTensor);
+  TF_LITE_ENSURE(context, weights_time != nullptr);
+  TfLiteTensor* bias =
+      micro_context->AllocateTempInputTensor(node, kSvdfBiasTensor);
+  TfLiteTensor* activation_state = micro_context->AllocateTempInputTensor(
+      node, kSvdfInputActivationStateTensor);
+  TF_LITE_ENSURE(context, activation_state != nullptr);
+
+  // Define input constants based on input tensor definition above:
+  const int rank = params->rank;
+  const int input_size = input->dims->data[1];
+  const int batch_size = input->dims->data[0];
+  const int num_filters = weights_feature->dims->data[0];
+  TF_LITE_ENSURE_EQ(context, num_filters % rank, 0);
+  const int num_units = num_filters / rank;
+  const int memory_size = weights_time->dims->data[1];
+
+  // Validate Input Tensor:
+  TF_LITE_ENSURE(context,
+                 input->type == kTfLiteFloat32 || input->type == kTfLiteInt8);
+  TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2);
+
+  // Validate Tensor Output:
+  // [0] = float/int8_t, {2, batch_size, num_units}
+  TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
+  TfLiteTensor* output =
+      micro_context->AllocateTempOutputTensor(node, kSvdfOutputTensor);
+  TF_LITE_ENSURE(context, output != nullptr);
+  TF_LITE_ENSURE_EQ(context, NumDimensions(output), 2);
+  TF_LITE_ENSURE_EQ(context, output->dims->data[0], batch_size);
+  TF_LITE_ENSURE_EQ(context, output->dims->data[1], num_units);
+
+  // Validate Weights Feature Input Tensor:
+  TF_LITE_ENSURE_EQ(context, NumDimensions(weights_feature), 2);
+  TF_LITE_ENSURE_EQ(context, weights_feature->dims->data[1], input_size);
+
+  // Validate Weights Time Input Tensor:
+  TF_LITE_ENSURE_EQ(context, NumDimensions(weights_time), 2);
+  TF_LITE_ENSURE_EQ(context, weights_time->dims->data[0], num_filters);
+  TF_LITE_ENSURE_EQ(context, weights_time->dims->data[1], memory_size);
+
+  // Validate Optional Bias Input Tensor:
+  if (bias != nullptr) {
+    TF_LITE_ENSURE_EQ(context, bias->dims->data[0], num_units);
+  }
+
+  // Validate Activation State Input Tensor:
+  TF_LITE_ENSURE_EQ(context, NumDimensions(activation_state), 2);
+  TF_LITE_ENSURE_EQ(context, activation_state->dims->data[0], batch_size);
+  TF_LITE_ENSURE_EQ(context, activation_state->dims->data[1],
+                    memory_size * num_filters);
+  // Since is_variable is not part of TFLiteEvalTensor, check is_variable here.
+  TF_LITE_ENSURE_EQ(context, activation_state->is_variable, true);
+
+  TF_LITE_ENSURE_EQ(context, node->inputs->size, 5);
+
+  TFLITE_DCHECK(node->user_data != nullptr);
+  CmsisNnOpDataSvdf* data = static_cast<CmsisNnOpDataSvdf*>(node->user_data);
+
+  if (input->type == kTfLiteInt8) {
+    TF_LITE_ENSURE_EQ(context, weights_feature->type, kTfLiteInt8);
+    TF_LITE_ENSURE(context, (weights_time->type == kTfLiteInt16) ||
+                                (weights_time->type == kTfLiteInt8));
+    TF_LITE_ENSURE(context, (activation_state->type == kTfLiteInt16) ||
+                                (activation_state->type == kTfLiteInt8));
+    if (bias != nullptr) {
+      TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteInt32);
+    }
+
+    TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8);
+
+    const double effective_scale_1 = static_cast<double>(
+        input->params.scale * weights_feature->params.scale /
+        activation_state->params.scale);
+    const double effective_scale_2 =
+        static_cast<double>(activation_state->params.scale *
+                            weights_time->params.scale / output->params.scale);
+
+    // TODO(b/162018098): Use TF_LITE_ENSURE_NEAR when it is ready.
+    // TODO(#1751): account for optional bias tensor
+    TF_LITE_ENSURE(
+        context,
+        std::abs(static_cast<double>(bias->params.scale) -
+                 static_cast<double>(activation_state->params.scale *
+                                     weights_time->params.scale)) < 1e-5);
+
+    QuantizeMultiplier(effective_scale_1, &(data->effective_scale_1_a),
+                       &(data->effective_scale_1_b));
+    QuantizeMultiplier(effective_scale_2, &(data->effective_scale_2_a),
+                       &(data->effective_scale_2_b));
+
+    data->input_zero_point = input->params.zero_point;
+    data->output_zero_point = output->params.zero_point;
+    data->activation_state_zero_point = activation_state->params.zero_point;
+
+    TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr);
+
+    const TfLiteStatus scratch_status = context->RequestScratchBufferInArena(
+        context, batch_size * num_filters * sizeof(int32_t),
+        &(data->scratch_tensor_index));
+    TF_LITE_ENSURE_OK(context, scratch_status);
+
+    const TfLiteStatus scratch_output_status =
+        context->RequestScratchBufferInArena(
+            context, batch_size * num_units * sizeof(int32_t),
+            &(data->scratch_output_tensor_index));
+    TF_LITE_ENSURE_OK(context, scratch_output_status);
+
+    cmsis_nn_dims weights_feature_dims;
+    weights_feature_dims.n = num_filters;
+    weights_feature_dims.h = input_size;
+
+    const int32_t buf_size = arm_svdf_s8_get_buffer_size(&weights_feature_dims);
+
+    if (buf_size > 0) {
+      data->kernel_sums = static_cast<int32_t*>(
+          context->AllocatePersistentBuffer(context, buf_size));
+
+      arm_vector_sum_s8(data->kernel_sums, input_size, num_filters,
+                        GetTensorData<int8_t>(weights_feature), 1, nullptr);
+    }
+
+  } else {
+    TF_LITE_ENSURE_EQ(context, weights_feature->type, kTfLiteFloat32);
+    TF_LITE_ENSURE_EQ(context, weights_time->type, kTfLiteFloat32);
+    TF_LITE_ENSURE_EQ(context, activation_state->type, kTfLiteFloat32);
+    if (bias != nullptr) {
+      TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteFloat32);
+    }
+    TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32);
+
+    TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr);
+    const TfLiteStatus scratch_status = context->RequestScratchBufferInArena(
+        context, batch_size * num_filters * sizeof(float),
+        &(data->scratch_tensor_index));
+    TF_LITE_ENSURE_OK(context, scratch_status);
+  }
+
+  micro_context->DeallocateTempTfLiteTensor(input);
+  micro_context->DeallocateTempTfLiteTensor(weights_feature);
+  micro_context->DeallocateTempTfLiteTensor(weights_time);
+  micro_context->DeallocateTempTfLiteTensor(activation_state);
+  micro_context->DeallocateTempTfLiteTensor(output);
+  // TODO(#1751): account for optional bias tensor
+  micro_context->DeallocateTempTfLiteTensor(bias);
+  return kTfLiteOk;
 }
 
 TfLiteStatus EvalIntegerSVDF(TfLiteContext* context, TfLiteNode* node,
@@ -44,7 +230,7 @@
                              const TfLiteSVDFParams* params,
                              TfLiteEvalTensor* activation_state_tensor,
                              TfLiteEvalTensor* output_tensor,
-                             const OpDataSvdf& data) {
+                             const CmsisNnOpDataSvdf& data) {
   cmsis_nn_dims input_dims;
   input_dims.n = input_tensor->dims->data[0];
   input_dims.h = input_tensor->dims->data[1];
@@ -102,9 +288,12 @@
 
   switch (weights_time_tensor->type) {
     case kTfLiteInt8: {
+      cmsis_nn_context ctx;
+      ctx.buf = data.kernel_sums;
+
       arm_svdf_s8(
-          &scratch_ctx, &scratch_output_ctx, &svdf_params, &in_quant_params,
-          &out_quant_params, &input_dims,
+          &ctx, &scratch_ctx, &scratch_output_ctx, &svdf_params,
+          &in_quant_params, &out_quant_params, &input_dims,
           tflite::micro::GetTensorData<int8_t>(input_tensor), &state_dims,
           tflite::micro::GetTensorData<int8_t>(activation_state_tensor),
           &weights_feature_dims,
@@ -141,7 +330,8 @@
 TfLiteStatus EvalSvdf(TfLiteContext* context, TfLiteNode* node) {
   auto* params = reinterpret_cast<TfLiteSVDFParams*>(node->builtin_data);
   TFLITE_DCHECK(node->user_data != nullptr);
-  const OpDataSvdf& data = *(static_cast<const OpDataSvdf*>(node->user_data));
+  const CmsisNnOpDataSvdf& data =
+      *(static_cast<const CmsisNnOpDataSvdf*>(node->user_data));
 
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kSvdfInputTensor);
@@ -184,7 +374,8 @@
 TfLiteStatus EvalSvdfInt8(TfLiteContext* context, TfLiteNode* node) {
   auto* params = reinterpret_cast<TfLiteSVDFParams*>(node->builtin_data);
   TFLITE_DCHECK(node->user_data != nullptr);
-  const OpDataSvdf& data = *(static_cast<const OpDataSvdf*>(node->user_data));
+  const CmsisNnOpDataSvdf& data =
+      *(static_cast<const CmsisNnOpDataSvdf*>(node->user_data));
 
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kSvdfInputTensor);
@@ -213,11 +404,11 @@
 }  // namespace
 
 TFLMRegistration Register_SVDF() {
-  return tflite::micro::RegisterOp(Init, PrepareSvdf, EvalSvdf);
+  return tflite::micro::RegisterOp(Init, CmsisNnPrepareSvdf, EvalSvdf);
 }
 
 TFLMRegistration Register_SVDF_INT8() {
-  return tflite::micro::RegisterOp(Init, PrepareSvdf, EvalSvdfInt8);
+  return tflite::micro::RegisterOp(Init, CmsisNnPrepareSvdf, EvalSvdfInt8);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/cmsis_nn/transpose_conv.cc b/tensorflow/lite/micro/kernels/cmsis_nn/transpose_conv.cc
new file mode 100644
index 0000000..20cf0e1
--- /dev/null
+++ b/tensorflow/lite/micro/kernels/cmsis_nn/transpose_conv.cc
@@ -0,0 +1,556 @@
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/lite/micro/kernels/transpose_conv.h"
+
+#include "Include/arm_nnfunctions.h"
+#include "tensorflow/lite/c/builtin_op_data.h"
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/kernels/internal/common.h"
+#include "tensorflow/lite/kernels/internal/quantization_util.h"
+#include "tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h"
+#include "tensorflow/lite/kernels/internal/reference/transpose_conv.h"
+#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
+#include "tensorflow/lite/kernels/kernel_util.h"
+#include "tensorflow/lite/kernels/padding.h"
+#include "tensorflow/lite/micro/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/micro_log.h"
+
+namespace tflite {
+namespace {
+
+// For the TfLite transpose_conv implementation, input tensor 0 corresponds to
+// the OutputShapeTensor. However, since TFLM does not support dynamic tensors,
+// the TFLM implementation ignores input tensor 0 and the only inputs we care
+// about are kFilterTensor, kInputTensor and kBiasTensor.
+constexpr int kFilterTensor = 1;
+constexpr int kInputTensor = 2;
+constexpr int kBiasTensor = 3;
+constexpr int kOutputTensor = 0;
+
+// Conv is quantized along dimension 0:
+// https://www.tensorflow.org/lite/performance/quantization_spec
+constexpr int kConvQuantizedDimension = 0;
+
+struct OpData {
+  ConvParams params;
+
+  // Scratch buffers are required for quantized implementations.
+  int scratch_buffer_index;
+  int scratch_buffer_output_index;
+
+  // TODO(b/192090531): Remove this once all 8x16 transpose conv models use
+  // 64-bit biases.
+  int bias_converted_buffer_index;
+
+  // Multiplier and shift arrays are required for the int8 implementation.
+  int32_t* per_channel_output_multiplier;
+  int32_t* per_channel_output_shift;
+};
+
+inline PaddingType RuntimePaddingType(TfLitePadding padding) {
+  switch (padding) {
+    case TfLitePadding::kTfLitePaddingSame:
+      return PaddingType::kSame;
+    case TfLitePadding::kTfLitePaddingValid:
+      return PaddingType::kValid;
+    case TfLitePadding::kTfLitePaddingUnknown:
+    default:
+      return PaddingType::kNone;
+  }
+}
+
+TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node,
+                             const TfLiteTransposeConvParams* params, int width,
+                             int height, int filter_width, int filter_height,
+                             const TfLiteType data_type, OpData* data) {
+  bool has_bias = node->inputs->size == 4;
+  // Check number of inputs/outputs
+  TF_LITE_ENSURE(context, has_bias || node->inputs->size == 3);
+  TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
+
+  // Matching GetWindowedOutputSize in TensorFlow.
+  auto padding = params->padding;
+  int pad_output_width;
+  int pad_output_height;
+
+  TfLitePaddingValues padding_values = ComputePaddingHeightWidth(
+      params->stride_height, params->stride_width, 1,
+      1,  // Dilation height and width are always 1 for transpose_conv.
+      height, width, filter_height, filter_width, padding, &pad_output_height,
+      &pad_output_width);
+
+  data->params.padding_type = RuntimePaddingType(padding);
+  data->params.padding_values.width = padding_values.width;
+  data->params.padding_values.height = padding_values.height;
+  data->params.padding_values.width_offset =
+      padding_values.width_offset + padding_values.width;
+  data->params.padding_values.height_offset =
+      padding_values.height_offset + padding_values.height;
+
+  // Note that quantized inference requires that all tensors have their
+  // parameters set. This is usually done during quantized training.
+  if (data_type != kTfLiteFloat32) {
+    MicroContext* micro_context = GetMicroContext(context);
+
+    TfLiteTensor* input =
+        micro_context->AllocateTempInputTensor(node, kInputTensor);
+    TF_LITE_ENSURE(context, input != nullptr);
+    TfLiteTensor* filter =
+        micro_context->AllocateTempInputTensor(node, kFilterTensor);
+    TF_LITE_ENSURE(context, filter != nullptr);
+    TfLiteTensor* bias =
+        micro_context->AllocateTempInputTensor(node, kBiasTensor);
+    TfLiteTensor* output =
+        micro_context->AllocateTempOutputTensor(node, kOutputTensor);
+    TF_LITE_ENSURE(context, output != nullptr);
+    int output_channels = filter->dims->data[kConvQuantizedDimension];
+
+    TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams(
+        context, input, filter, bias, output, kTfLiteActNone,
+        &data->params.output_multiplier, &data->params.output_shift,
+        &data->params.quantized_activation_min,
+        &data->params.quantized_activation_max,
+        data->per_channel_output_multiplier, data->per_channel_output_shift,
+        output_channels));
+
+    // TODO(b/192090531): Remove this once all 8x16 transpose conv models use
+    // 64-bit biases.
+    if (input->type == kTfLiteInt16) {
+      TFLITE_DCHECK(filter->type == kTfLiteInt8);
+      TFLITE_DCHECK(output->type == kTfLiteInt16);
+      if (bias->type == kTfLiteInt16) {
+        TFLITE_DCHECK(
+            context->RequestScratchBufferInArena(
+                context, GetTensorShape(bias).FlatSize() * sizeof(std::int64_t),
+                &(data->bias_converted_buffer_index)) == kTfLiteOk);
+      }
+    }
+
+    micro_context->DeallocateTempTfLiteTensor(input);
+    micro_context->DeallocateTempTfLiteTensor(filter);
+    micro_context->DeallocateTempTfLiteTensor(output);
+    if (bias != nullptr) {
+      micro_context->DeallocateTempTfLiteTensor(bias);
+    }
+  }
+  return kTfLiteOk;
+}
+
+void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+  TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
+  return context->AllocatePersistentBuffer(context, sizeof(OpData));
+}
+
+TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+  TFLITE_DCHECK(node->user_data != nullptr);
+  TFLITE_DCHECK(node->builtin_data != nullptr);
+
+  OpData* data = static_cast<OpData*>(node->user_data);
+  const auto params =
+      static_cast<const TfLiteTransposeConvParams*>(node->builtin_data);
+
+  MicroContext* micro_context = GetMicroContext(context);
+
+  TfLiteTensor* output =
+      micro_context->AllocateTempOutputTensor(node, kOutputTensor);
+  TF_LITE_ENSURE(context, output != nullptr);
+  TfLiteTensor* input =
+      micro_context->AllocateTempInputTensor(node, kInputTensor);
+  TF_LITE_ENSURE(context, input != nullptr);
+  TfLiteTensor* filter =
+      micro_context->AllocateTempInputTensor(node, kFilterTensor);
+  TF_LITE_ENSURE(context, filter != nullptr);
+
+  TF_LITE_ENSURE_EQ(context, input->type, output->type);
+  TF_LITE_ENSURE_MSG(context,
+                     input->type == kTfLiteFloat32 ||
+                         input->type == kTfLiteInt16 ||
+                         input->type == kTfLiteInt8,
+                     "Input data type not supported");
+  TF_LITE_ENSURE_MSG(
+      context,
+      (input->type == kTfLiteFloat32 && filter->type == kTfLiteFloat32) ||
+          (input->type == kTfLiteInt16 && filter->type == kTfLiteInt8) ||
+          (input->type == kTfLiteInt8 && filter->type == kTfLiteInt8),
+      "Hybrid models are not supported on TFLite Micro.");
+
+  // Get height and width of the output.
+  const int width = SizeOfDimension(output, 2);
+  const int height = SizeOfDimension(output, 1);
+  const int filter_width = SizeOfDimension(filter, 2);
+  const int filter_height = SizeOfDimension(filter, 1);
+
+  // Dynamically allocate per-channel quantization parameters.
+  const int num_channels = filter->dims->data[kConvQuantizedDimension];
+  data->per_channel_output_multiplier =
+      static_cast<int32_t*>(context->AllocatePersistentBuffer(
+          context, num_channels * sizeof(int32_t)));
+  data->per_channel_output_shift =
+      static_cast<int32_t*>(context->AllocatePersistentBuffer(
+          context, num_channels * sizeof(int32_t)));
+
+  if (input->type == kTfLiteInt8) {
+    TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr);
+
+    RuntimeShape input_shape = GetTensorShape(input);
+    RuntimeShape output_shape = GetTensorShape(output);
+    RuntimeShape filter_shape = GetTensorShape(filter);
+
+    const int batch_size = MatchingDim(input_shape, 0, output_shape, 0);
+    const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3);
+
+    cmsis_nn_dims output_dims;
+    output_dims.n = batch_size;
+    output_dims.h = output_shape.Dims(1);
+    output_dims.w = output_shape.Dims(2);
+    output_dims.c = output_depth;
+
+#if defined(KERNELS_OPTIMIZED_FOR_SPEED)
+    const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3);
+
+    cmsis_nn_dims input_dims;
+    input_dims.n = batch_size;
+    input_dims.h = input_shape.Dims(1);
+    input_dims.w = input_shape.Dims(2);
+    input_dims.c = input_depth;
+
+    cmsis_nn_dims filter_dims;
+    filter_dims.n = output_depth;
+    filter_dims.h = filter_shape.Dims(1);
+    filter_dims.w = filter_shape.Dims(2);
+    filter_dims.c = input_depth;
+
+    const size_t buf_size = arm_transpose_conv_s8_get_buffer_size(
+        &input_dims, &filter_dims, &output_dims);
+    TFLITE_DCHECK(context->RequestScratchBufferInArena(
+                      context, buf_size, &(data->scratch_buffer_index)) ==
+                  kTfLiteOk);
+#endif
+
+    // Quantized 8-bit kernels use an int32 scratch buffer.
+    TFLITE_DCHECK(
+        context->RequestScratchBufferInArena(
+            context,
+            output_dims.h * output_dims.w * output_dims.c * sizeof(int32_t),
+            &(data->scratch_buffer_output_index)) == kTfLiteOk);
+  }
+
+  // Quantized 16x8 kernels use an int64 scratch buffer.
+  if (input->type == kTfLiteInt16) {
+    TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr);
+    TFLITE_DCHECK(context->RequestScratchBufferInArena(
+                      context,
+                      GetTensorShape(output).FlatSize() * sizeof(std::int64_t),
+                      &(data->scratch_buffer_index)) == kTfLiteOk);
+  }
+
+  // All per-channel quantized tensors need valid zero point and scale arrays.
+  if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) {
+    TF_LITE_ENSURE_EQ(context, filter->quantization.type,
+                      kTfLiteAffineQuantization);
+
+    const auto* affine_quantization =
+        static_cast<TfLiteAffineQuantization*>(filter->quantization.params);
+    TF_LITE_ENSURE(context, affine_quantization);
+    TF_LITE_ENSURE(context, affine_quantization->scale);
+    TF_LITE_ENSURE(context, affine_quantization->zero_point);
+
+    TF_LITE_ENSURE(context,
+                   affine_quantization->scale->size == 1 ||
+                       affine_quantization->scale->size ==
+                           filter->dims->data[kConvQuantizedDimension]);
+    TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size,
+                      affine_quantization->zero_point->size);
+  }
+
+  TF_LITE_ENSURE_STATUS(CalculateOpData(context, node, params, width, height,
+                                        filter_width, filter_height,
+                                        input->type, data));
+
+  // Offsets (zero points)
+  data->params.input_offset = -input->params.zero_point;
+  data->params.weights_offset = -filter->params.zero_point;
+  data->params.output_offset = output->params.zero_point;
+
+  // Stride
+  data->params.stride_width = params->stride_width;
+  data->params.stride_height = params->stride_height;
+
+  micro_context->DeallocateTempTfLiteTensor(output);
+  micro_context->DeallocateTempTfLiteTensor(input);
+  micro_context->DeallocateTempTfLiteTensor(filter);
+  return kTfLiteOk;
+}
+
+#if defined(KERNELS_OPTIMIZED_FOR_SPEED)
+TfLiteStatus EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
+                                     const TfLiteConvParams& params,
+                                     const OpData& data,
+                                     const TfLiteEvalTensor* input,
+                                     const TfLiteEvalTensor* filter,
+                                     const TfLiteEvalTensor* bias,
+                                     TfLiteEvalTensor* output) {
+  cmsis_nn_transpose_conv_params conv_params;
+  conv_params.dilation.h = 1;
+  conv_params.dilation.w = 1;
+
+  // Initialize cmsis_nn convolution parameters
+  conv_params.input_offset = data.params.input_offset;
+  conv_params.output_offset = data.params.output_offset;
+  conv_params.stride.h = params.stride_height;
+  conv_params.stride.w = params.stride_width;
+  conv_params.padding.h = data.params.padding_values.height;
+  conv_params.padding.w = data.params.padding_values.width;
+  conv_params.padding_offsets.h = data.params.padding_values.height_offset;
+  conv_params.padding_offsets.w = data.params.padding_values.width_offset;
+  conv_params.activation.min = data.params.quantized_activation_min;
+  conv_params.activation.max = data.params.quantized_activation_max;
+
+  // Initialize cmsis_nn per channel quantization parameters
+  cmsis_nn_per_channel_quant_params quant_params;
+  quant_params.multiplier =
+      const_cast<int32_t*>(data.per_channel_output_multiplier);
+  quant_params.shift = const_cast<int32_t*>(data.per_channel_output_shift);
+
+  RuntimeShape filter_shape = tflite::micro::GetTensorShape(filter);
+  RuntimeShape input_shape = tflite::micro::GetTensorShape(input);
+  RuntimeShape output_shape = tflite::micro::GetTensorShape(output);
+  RuntimeShape bias_shape = tflite::micro::GetTensorShape(bias);
+
+  // Consistency check.
+  TFLITE_DCHECK_LE(conv_params.activation.min, conv_params.activation.max);
+  TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
+  TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
+  TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
+  const int batch_size = MatchingDim(input_shape, 0, output_shape, 0);
+  const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3);
+  const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3);
+  if (tflite::micro::GetOptionalTensorData<int32_t>(bias)) {
+    TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
+  }
+
+  cmsis_nn_dims input_dims;
+  input_dims.n = batch_size;
+  input_dims.h = input_shape.Dims(1);
+  input_dims.w = input_shape.Dims(2);
+  input_dims.c = input_depth;
+
+  cmsis_nn_dims filter_dims;
+  filter_dims.n = output_depth;
+  filter_dims.h = filter_shape.Dims(1);
+  filter_dims.w = filter_shape.Dims(2);
+  filter_dims.c = input_depth;
+
+  cmsis_nn_dims bias_dims;
+  bias_dims.n = 1;
+  bias_dims.h = 1;
+  bias_dims.w = 1;
+  bias_dims.c = output_depth;
+
+  cmsis_nn_dims output_dims;
+  output_dims.n = batch_size;
+  output_dims.h = output_shape.Dims(1);
+  output_dims.w = output_shape.Dims(2);
+  output_dims.c = output_depth;
+
+  cmsis_nn_context ctx;
+  ctx.size = 0;  // Note: ctx.size is currently not used in cmsis_nn.
+  ctx.buf = context->GetScratchBuffer(context, data.scratch_buffer_index);
+
+  cmsis_nn_context scratch_output_ctx;
+  scratch_output_ctx.size =
+      0;  // Note: ctx.size is currently not used in cmsis_nn.
+  scratch_output_ctx.buf =
+      context->GetScratchBuffer(context, data.scratch_buffer_output_index);
+
+  TFLITE_DCHECK_EQ(
+      arm_transpose_conv_s8(
+          &ctx, &scratch_output_ctx, &conv_params, &quant_params, &input_dims,
+          tflite::micro::GetTensorData<int8_t>(input), &filter_dims,
+          tflite::micro::GetTensorData<int8_t>(filter), &bias_dims,
+          tflite::micro::GetOptionalTensorData<int32_t>(bias), &output_dims,
+          tflite::micro::GetTensorData<int8_t>(output)),
+      ARM_CMSIS_NN_SUCCESS);
+
+  return kTfLiteOk;
+}
+#endif
+
+TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+  const TfLiteEvalTensor* input =
+      tflite::micro::GetEvalInput(context, node, kInputTensor);
+  const TfLiteEvalTensor* filter =
+      tflite::micro::GetEvalInput(context, node, kFilterTensor);
+  const TfLiteEvalTensor* bias =
+      (NumInputs(node) == 4)
+          ? tflite::micro::GetEvalInput(context, node, kBiasTensor)
+          : nullptr;
+  TfLiteEvalTensor* output =
+      tflite::micro::GetEvalOutput(context, node, kOutputTensor);
+
+  TFLITE_DCHECK(node->user_data != nullptr);
+  const OpData& data = *(static_cast<const OpData*>(node->user_data));
+
+  TF_LITE_ENSURE_EQ(context, input->type, output->type);
+  const auto& params =
+      *(reinterpret_cast<TfLiteConvParams*>(node->builtin_data));
+
+  switch (input->type) {  // Already know in/out types are same.
+    case kTfLiteFloat32: {
+      ConvParams op_params = data.params;
+      CalculateActivationRange(params.activation,
+                               &op_params.float_activation_min,
+                               &op_params.float_activation_max);
+
+      reference_ops::TransposeConv(
+          op_params, tflite::micro::GetTensorShape(input),
+          tflite::micro::GetTensorData<float>(input),
+          tflite::micro::GetTensorShape(filter),
+          tflite::micro::GetTensorData<float>(filter),
+          tflite::micro::GetTensorShape(bias),
+          tflite::micro::GetOptionalTensorData<float>(bias),
+          tflite::micro::GetTensorShape(output),
+          tflite::micro::GetTensorData<float>(output),
+          tflite::micro::GetTensorShape(nullptr), nullptr);
+      break;
+    }
+    case kTfLiteInt8: {
+#if defined(KERNELS_OPTIMIZED_FOR_SIZE)
+      int32_t* scratch_buffer = static_cast<int32_t*>(
+          context->GetScratchBuffer(context, data.scratch_buffer_index));
+      reference_integer_ops::TransposeConv(
+          data.params, data.per_channel_output_multiplier,
+          data.per_channel_output_shift, tflite::micro::GetTensorShape(input),
+          tflite::micro::GetTensorData<int8_t>(input),
+          tflite::micro::GetTensorShape(filter),
+          tflite::micro::GetTensorData<int8_t>(filter),
+          tflite::micro::GetTensorShape(bias),
+          tflite::micro::GetOptionalTensorData<int32_t>(bias),
+          tflite::micro::GetTensorShape(output),
+          tflite::micro::GetTensorData<int8_t>(output),
+          tflite::micro::GetTensorShape(nullptr), nullptr, scratch_buffer);
+#elif defined(KERNELS_OPTIMIZED_FOR_SPEED)
+      return EvalQuantizedPerChannel(context, node, params, data, input, filter,
+                                     bias, output);
+#else
+      MicroPrintf(
+          "Either KERNELS_OPTIMIZED_FOR_SIZE or KERNELS_OPTIMIZED_FOR_SPEED "
+          "must be defined");
+      return kTfLiteError;
+#endif
+      break;
+    }
+    case kTfLiteInt16: {
+      std::int64_t* scratch_buffer = static_cast<int64_t*>(
+          context->GetScratchBuffer(context, data.scratch_buffer_index));
+      // TODO(b/192090531): Remove this once all 8x16 transpose conv models use
+      // 64-bit biases.
+      if (bias != nullptr && bias->type == kTfLiteInt16) {
+        std::int64_t* bias_converted_buffer =
+            static_cast<int64_t*>(context->GetScratchBuffer(
+                context, data.bias_converted_buffer_index));
+        for (int i = 0; i < tflite::micro::GetTensorShape(bias).FlatSize();
+             i++) {
+          bias_converted_buffer[i] = bias->data.i16[i];
+        }
+        reference_integer_ops::TransposeConv(
+            data.params, data.per_channel_output_multiplier,
+            data.per_channel_output_shift, tflite::micro::GetTensorShape(input),
+            tflite::micro::GetTensorData<int16_t>(input),
+            tflite::micro::GetTensorShape(filter),
+            tflite::micro::GetTensorData<int8_t>(filter),
+            tflite::micro::GetTensorShape(bias), bias_converted_buffer,
+            tflite::micro::GetTensorShape(output),
+            tflite::micro::GetTensorData<int16_t>(output),
+            tflite::micro::GetTensorShape(nullptr), nullptr, scratch_buffer);
+      } else {
+        reference_integer_ops::TransposeConv(
+            data.params, data.per_channel_output_multiplier,
+            data.per_channel_output_shift, tflite::micro::GetTensorShape(input),
+            tflite::micro::GetTensorData<int16_t>(input),
+            tflite::micro::GetTensorShape(filter),
+            tflite::micro::GetTensorData<int8_t>(filter),
+            tflite::micro::GetTensorShape(bias),
+            tflite::micro::GetOptionalTensorData<std::int64_t>(bias),
+            tflite::micro::GetTensorShape(output),
+            tflite::micro::GetTensorData<int16_t>(output),
+            tflite::micro::GetTensorShape(nullptr), nullptr, scratch_buffer);
+      }
+      break;
+    }
+    default:
+      MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type),
+                  input->type);
+      return kTfLiteError;
+  }
+  return kTfLiteOk;
+}
+
+TfLiteStatus EvalInt8(TfLiteContext* context, TfLiteNode* node) {
+  const TfLiteEvalTensor* input =
+      tflite::micro::GetEvalInput(context, node, kInputTensor);
+  const TfLiteEvalTensor* filter =
+      tflite::micro::GetEvalInput(context, node, kFilterTensor);
+  const TfLiteEvalTensor* bias =
+      (NumInputs(node) == 4)
+          ? tflite::micro::GetEvalInput(context, node, kBiasTensor)
+          : nullptr;
+  TfLiteEvalTensor* output =
+      tflite::micro::GetEvalOutput(context, node, kOutputTensor);
+
+  TFLITE_DCHECK(node->user_data != nullptr);
+  const OpData& data = *(static_cast<const OpData*>(node->user_data));
+
+#if defined(KERNELS_OPTIMIZED_FOR_SIZE)
+  int32_t* scratch_buffer = static_cast<int32_t*>(
+      context->GetScratchBuffer(context, data.scratch_buffer_index));
+  reference_integer_ops::TransposeConv(
+      data.params, data.per_channel_output_multiplier,
+      data.per_channel_output_shift, tflite::micro::GetTensorShape(input),
+      tflite::micro::GetTensorData<int8_t>(input),
+      tflite::micro::GetTensorShape(filter),
+      tflite::micro::GetTensorData<int8_t>(filter),
+      tflite::micro::GetTensorShape(bias),
+      tflite::micro::GetOptionalTensorData<int32_t>(bias),
+      tflite::micro::GetTensorShape(output),
+      tflite::micro::GetTensorData<int8_t>(output),
+      tflite::micro::GetTensorShape(nullptr), nullptr, scratch_buffer);
+#elif defined(KERNELS_OPTIMIZED_FOR_SPEED)
+  const auto& params =
+      *(reinterpret_cast<TfLiteConvParams*>(node->builtin_data));
+
+  return EvalQuantizedPerChannel(context, node, params, data, input, filter,
+                                 bias, output);
+#else
+  MicroPrintf(
+      "Either KERNELS_OPTIMIZED_FOR_SIZE or KERNELS_OPTIMIZED_FOR_SPEED must "
+      "be defined");
+  return kTfLiteError;
+#endif
+  return kTfLiteOk;
+}
+
+}  // namespace
+
+TFLMRegistration Register_TRANSPOSE_CONV() {
+  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+}
+
+TFLMRegistration Register_TRANSPOSE_CONV_INT8() {
+  return tflite::micro::RegisterOp(Init, Prepare, EvalInt8);
+}
+
+}  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/cmsis_nn/unidirectional_sequence_lstm.cc b/tensorflow/lite/micro/kernels/cmsis_nn/unidirectional_sequence_lstm.cc
index f66ce80..49da4d9 100644
--- a/tensorflow/lite/micro/kernels/cmsis_nn/unidirectional_sequence_lstm.cc
+++ b/tensorflow/lite/micro/kernels/cmsis_nn/unidirectional_sequence_lstm.cc
@@ -1,4 +1,4 @@
-/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -28,477 +28,300 @@
 #include "tensorflow/lite/micro/kernels/lstm_eval.h"
 #include "tensorflow/lite/micro/kernels/lstm_shared.h"
 #include "tensorflow/lite/micro/kernels/micro_tensor_utils.h"
-
 namespace tflite {
 
 namespace {
 
 struct OpData {
-  OpDataLSTM params_ref;
-  cmsis_nn_lstm_params params_cmsis_nn;
+  OpDataLSTM params_ref;                 // Used for fallback implementation
+  cmsis_nn_lstm_params params_cmsis_nn;  // Used for  CMSIS-NN implementation
 };
 
-/*Helper Functions*/
-TfLiteStatus PrecomputeZeroPointTimesWeightWithBias(
-    TfLiteContext* context, int32_t zero_point,
-    const TfLiteTensor* weight_tensor, const TfLiteTensor* bias_tensor,
-    int32_t** output) {
-  if (weight_tensor == nullptr) {
-    return kTfLiteOk;
-  }
+LSTMBuffers<int16_t> CMSIS_NN_CreateLSTMBuffers(TfLiteContext* context,
+                                                const int* buffer_indices) {
+  LSTMBuffers<int16_t> buffers;
+  buffers.buffer0 = reinterpret_cast<int16_t*>(
+      context->GetScratchBuffer(context, buffer_indices[0]));
+  buffers.buffer1 = reinterpret_cast<int16_t*>(
+      context->GetScratchBuffer(context, buffer_indices[1]));
+  buffers.buffer2 = reinterpret_cast<int16_t*>(
+      context->GetScratchBuffer(context, buffer_indices[2]));
 
-  const RuntimeShape& weight_shape = GetTensorShape(weight_tensor);
-  TF_LITE_ENSURE_EQ(context, weight_shape.DimensionsCount(), 2);
-  const int row = weight_shape.Dims(0);
-  const int col = weight_shape.Dims(1);
-  TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
-  *output = static_cast<int32_t*>(
-      context->AllocatePersistentBuffer(context, row * sizeof(int32_t)));
-
-  if (bias_tensor == nullptr) {
-    memset(*output, 0, row * sizeof(int32_t));
-  } else {
-    const int32_t* bias = GetTensorData<int32_t>(bias_tensor);
-    memcpy(*output, bias, row * sizeof(int32_t));
-  }
-
-  if (zero_point != 0) {
-    const int8_t* weight = GetTensorData<int8_t>(weight_tensor);
-    tflite::tensor_utils::MatrixScalarMultiplyAccumulate(weight, zero_point,
-                                                         row, col, *output);
-  }
-  return kTfLiteOk;
+  return buffers;
 }
 
-TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node,
-                             const LstmTensors& lstm_tensors, OpData* op_data) {
-  const TfLiteTensor* input = lstm_tensors.GetInternalTensor(kLstmInputTensor);
-  const TfLiteTensor* output_state =
-      lstm_tensors.GetInternalTensor(tflite::kLstmOutputStateTensor);
+void CMSIS_NN_VectorSum(int32_t* kernel_sum, const int32_t size1,
+                        const int32_t size2, const int8_t* weights,
+                        const int32_t offset, const int32_t* biases) {
+  arm_vector_sum_s8(kernel_sum, size1, size2, weights, offset, biases);
+}
 
-  TF_LITE_ENSURE(context, input->type == kTfLiteInt8);
+void CMSIS_NN_VectorSum(int64_t* kernel_sum, const int32_t size1,
+                        const int32_t size2, const int8_t* weights,
+                        const int32_t offset, const int64_t* biases) {
+  arm_vector_sum_s8_s64(kernel_sum, size1, size2, weights, offset, biases);
+}
 
-  op_data->params_cmsis_nn.output_state_offset =
-      output_state->params.zero_point;
+template <typename BiasType>
+TfLiteStatus CMSIS_NN_PortOpData(TfLiteContext* context, OpDataLSTM* params_ref,
+                                 const LSTMKernelContents& kernel_content,
+                                 cmsis_nn_lstm_params* params_cmsis_nn) {
+  // Unwrap pointers
+  const BiasType* input_gate_bias =
+      tflite::micro::GetOptionalTensorData<BiasType>(
+          kernel_content.GetInternalTensor(tflite::kLstmInputGateBiasTensor));
+  const BiasType* forget_gate_bias =
+      tflite::micro::GetOptionalTensorData<BiasType>(
+          kernel_content.GetInternalTensor(tflite::kLstmForgetGateBiasTensor));
+  const BiasType* cell_gate_bias =
+      tflite::micro::GetOptionalTensorData<BiasType>(
+          kernel_content.GetInternalTensor(tflite::kLstmCellGateBiasTensor));
+  const BiasType* output_gate_bias =
+      tflite::micro::GetOptionalTensorData<BiasType>(
+          kernel_content.GetInternalTensor(tflite::kLstmOutputGateBiasTensor));
 
-  const TfLiteTensor* input_to_forget_weights =
-      lstm_tensors.GetInternalTensor(kLstmInputToForgetWeightsTensor);
-  const TfLiteTensor* input_to_input_weights =
-      lstm_tensors.GetInternalTensor(kLstmInputToInputWeightsTensor);
-  const TfLiteTensor* input_to_output_weights =
-      lstm_tensors.GetInternalTensor(kLstmInputToOutputWeightsTensor);
-  const TfLiteTensor* input_to_cell_weights =
-      lstm_tensors.GetInternalTensor(kLstmInputToCellWeightsTensor);
-  const TfLiteTensor* forget_gate_bias =
-      lstm_tensors.GetInternalTensor(kLstmForgetGateBiasTensor);
-  const TfLiteTensor* cell_state =
-      lstm_tensors.GetInternalTensor(kLstmCellStateTensor);
+  const int8_t* input_to_input_weights =
+      tflite::micro::GetOptionalTensorData<int8_t>(
+          kernel_content.GetInternalTensor(
+              tflite::kLstmInputToInputWeightsTensor));
+  const int8_t* input_to_forget_weights =
+      tflite::micro::GetOptionalTensorData<int8_t>(
+          kernel_content.GetInternalTensor(
+              tflite::kLstmInputToForgetWeightsTensor));
+  const int8_t* input_to_cell_weights =
+      tflite::micro::GetOptionalTensorData<int8_t>(
+          kernel_content.GetInternalTensor(
+              tflite::kLstmInputToCellWeightsTensor));
+  const int8_t* input_to_output_weights =
+      tflite::micro::GetOptionalTensorData<int8_t>(
+          kernel_content.GetInternalTensor(
+              tflite::kLstmInputToOutputWeightsTensor));
 
-  const TfLiteTensor* cell_gate_bias =
-      lstm_tensors.GetInternalTensor(kLstmCellGateBiasTensor);
-  const TfLiteTensor* output_gate_bias =
-      lstm_tensors.GetInternalTensor(kLstmOutputGateBiasTensor);
-  const TfLiteTensor* input_gate_bias =
-      lstm_tensors.GetInternalTensor(kLstmInputGateBiasTensor);
-  const TfLiteTensor* recurrent_to_forget_weights =
-      lstm_tensors.GetInternalTensor(kLstmRecurrentToForgetWeightsTensor);
-  const TfLiteTensor* recurrent_to_cell_weights =
-      lstm_tensors.GetInternalTensor(kLstmRecurrentToCellWeightsTensor);
-  const TfLiteTensor* recurrent_to_output_weights =
-      lstm_tensors.GetInternalTensor(kLstmRecurrentToOutputWeightsTensor);
-  const TfLiteTensor* recurrent_to_input_weights =
-      lstm_tensors.GetInternalTensor(kLstmRecurrentToInputWeightsTensor);
-  const TfLiteTensor* cell_to_output_weights =
-      lstm_tensors.GetInternalTensor(kLstmCellToOutputWeightsTensor);
-  const TfLiteTensor* forget_layer_norm_coefficients =
-      lstm_tensors.GetInternalTensor(kLstmForgetLayerNormCoefficientsTensor);
-  const TfLiteTensor* projection_weights =
-      lstm_tensors.GetInternalTensor(kLstmProjectionWeightsTensor);
+  const int8_t* recurrent_to_input_weights =
+      tflite::micro::GetOptionalTensorData<int8_t>(
+          kernel_content.GetInternalTensor(
+              tflite::kLstmRecurrentToInputWeightsTensor));
+  const int8_t* recurrent_to_forget_weights =
+      tflite::micro::GetOptionalTensorData<int8_t>(
+          kernel_content.GetInternalTensor(
+              tflite::kLstmRecurrentToForgetWeightsTensor));
+  const int8_t* recurrent_to_cell_weights =
+      tflite::micro::GetOptionalTensorData<int8_t>(
+          kernel_content.GetInternalTensor(
+              tflite::kLstmRecurrentToCellWeightsTensor));
+  const int8_t* recurrent_to_output_weights =
+      tflite::micro::GetOptionalTensorData<int8_t>(
+          kernel_content.GetInternalTensor(
+              tflite::kLstmRecurrentToOutputWeightsTensor));
 
-  const bool use_layer_norm = (forget_layer_norm_coefficients != nullptr);
-  const bool use_peephole = (cell_to_output_weights != nullptr);
-  const bool use_projection = (projection_weights != nullptr);
-  const bool use_cifg = (input_to_input_weights == nullptr);
-  const bool lstm_unsupported_config =
-      use_layer_norm || use_peephole || use_projection || use_cifg;
-  TFLITE_DCHECK(!lstm_unsupported_config);
+  int32_t size_data = params_ref->size_info.input_dimension;
+  int32_t size_hidden = params_ref->size_info.state_dimension;
 
-  // Pre-calculate bias + zero_point * weight.
-  int32_t* input_to_forget_effective_bias = nullptr;
-  int32_t* recurrent_to_forget_effective_bias = nullptr;
-  int32_t* input_to_cell_effective_bias = nullptr;
-  int32_t* recurrent_to_cell_effective_bias = nullptr;
-  int32_t* input_to_output_effective_bias = nullptr;
-  int32_t* recurrent_to_output_effective_bias = nullptr;
-  int32_t* input_to_input_effective_bias = nullptr;
-  int32_t* recurrent_to_input_effective_bias = nullptr;
+  BiasType* input_data_kernel_sum{
+      static_cast<BiasType*>(context->AllocatePersistentBuffer(
+          context, size_hidden * sizeof(BiasType)))};
+  BiasType* forget_data_kernel_sum{
+      static_cast<BiasType*>(context->AllocatePersistentBuffer(
+          context, size_hidden * sizeof(BiasType)))};
+  BiasType* cell_data_kernel_sum{
+      static_cast<BiasType*>(context->AllocatePersistentBuffer(
+          context, size_hidden * sizeof(BiasType)))};
+  BiasType* output_data_kernel_sum{
+      static_cast<BiasType*>(context->AllocatePersistentBuffer(
+          context, size_hidden * sizeof(BiasType)))};
 
-  const int32_t output_state_zero_point =
-      -op_data->params_cmsis_nn.output_state_offset;
-  const int32_t input_zero_point = -input->params.zero_point;
+  BiasType* input_hidden_kernel_sum{
+      static_cast<BiasType*>(context->AllocatePersistentBuffer(
+          context, size_hidden * sizeof(BiasType)))};
+  BiasType* forget_hidden_kernel_sum{
+      static_cast<BiasType*>(context->AllocatePersistentBuffer(
+          context, size_hidden * sizeof(BiasType)))};
+  BiasType* cell_hidden_kernel_sum = {
+      static_cast<BiasType*>(context->AllocatePersistentBuffer(
+          context, size_hidden * sizeof(BiasType)))};
+  BiasType* output_hidden_kernel_sum = {
+      static_cast<BiasType*>(context->AllocatePersistentBuffer(
+          context, size_hidden * sizeof(BiasType)))};
 
-  TF_LITE_ENSURE_OK(context,
-                    PrecomputeZeroPointTimesWeightWithBias(
-                        context, input_zero_point, input_to_forget_weights,
-                        forget_gate_bias, &input_to_forget_effective_bias));
+  // Compute effective biases
+  CMSIS_NN_VectorSum(
+      input_data_kernel_sum, size_data, size_hidden, input_to_input_weights,
+      params_ref->input_gate_parameters.input_fc_params.input_offset,
+      input_gate_bias);
 
-  TF_LITE_ENSURE_OK(context, PrecomputeZeroPointTimesWeightWithBias(
-                                 context, output_state_zero_point,
-                                 recurrent_to_forget_weights, nullptr,
-                                 &recurrent_to_forget_effective_bias));
+  CMSIS_NN_VectorSum(
+      forget_data_kernel_sum, size_data, size_hidden, input_to_forget_weights,
+      params_ref->forget_gate_parameters.input_fc_params.input_offset,
+      forget_gate_bias);
 
-  // Modulation gate.
-  TF_LITE_ENSURE_OK(context,
-                    PrecomputeZeroPointTimesWeightWithBias(
-                        context, input_zero_point, input_to_cell_weights,
-                        cell_gate_bias, &input_to_cell_effective_bias));
-  TF_LITE_ENSURE_OK(
-      context, PrecomputeZeroPointTimesWeightWithBias(
-                   context, output_state_zero_point, recurrent_to_cell_weights,
-                   nullptr, &recurrent_to_cell_effective_bias));
+  CMSIS_NN_VectorSum(
+      cell_data_kernel_sum, size_data, size_hidden, input_to_cell_weights,
+      params_ref->cell_gate_parameters.input_fc_params.input_offset,
+      cell_gate_bias);
 
-  // Output gate.
-  TF_LITE_ENSURE_OK(context,
-                    PrecomputeZeroPointTimesWeightWithBias(
-                        context, input_zero_point, input_to_output_weights,
-                        output_gate_bias, &input_to_output_effective_bias));
+  CMSIS_NN_VectorSum(
+      output_data_kernel_sum, size_data, size_hidden, input_to_output_weights,
+      params_ref->output_gate_parameters.input_fc_params.input_offset,
+      output_gate_bias);
 
-  TF_LITE_ENSURE_OK(context, PrecomputeZeroPointTimesWeightWithBias(
-                                 context, output_state_zero_point,
-                                 recurrent_to_output_weights, nullptr,
-                                 &recurrent_to_output_effective_bias));
+  CMSIS_NN_VectorSum(
+      input_hidden_kernel_sum, size_hidden, size_hidden,
+      recurrent_to_input_weights,
+      -params_ref->inter_gate_parameters.output_mul_params.output_offset,
+      nullptr);
 
-  // Input gate. The calculation is only meaningful for non-cifg case.
-  TF_LITE_ENSURE_OK(context,
-                    PrecomputeZeroPointTimesWeightWithBias(
-                        context, input_zero_point, input_to_input_weights,
-                        input_gate_bias, &input_to_input_effective_bias));
-  TF_LITE_ENSURE_OK(
-      context, PrecomputeZeroPointTimesWeightWithBias(
-                   context, output_state_zero_point, recurrent_to_input_weights,
-                   nullptr, &recurrent_to_input_effective_bias));
+  CMSIS_NN_VectorSum(
+      forget_hidden_kernel_sum, size_hidden, size_hidden,
+      recurrent_to_forget_weights,
+      -params_ref->inter_gate_parameters.output_mul_params.output_offset,
+      nullptr);
 
-  op_data->params_cmsis_nn.i2f_effective_bias = input_to_forget_effective_bias;
-  op_data->params_cmsis_nn.r2f_effective_bias =
-      recurrent_to_forget_effective_bias;
-  op_data->params_cmsis_nn.i2c_effective_bias = input_to_cell_effective_bias;
-  op_data->params_cmsis_nn.r2c_effective_bias =
-      recurrent_to_cell_effective_bias;
-  op_data->params_cmsis_nn.i2o_effective_bias = input_to_output_effective_bias;
-  op_data->params_cmsis_nn.r2o_effective_bias =
-      recurrent_to_output_effective_bias;
-  op_data->params_cmsis_nn.i2i_effective_bias = input_to_input_effective_bias;
-  op_data->params_cmsis_nn.r2i_effective_bias =
-      recurrent_to_input_effective_bias;
+  CMSIS_NN_VectorSum(
+      cell_hidden_kernel_sum, size_hidden, size_hidden,
+      recurrent_to_cell_weights,
+      -params_ref->inter_gate_parameters.output_mul_params.output_offset,
+      nullptr);
 
-  // Get intermediate scales and zero points.
-  float intermediate_scale[5];
-  int32_t intermediate_zp[5];
-  for (int i = 0; i < 4; ++i) {
-    // Q3.12 for activation functions.
-    intermediate_scale[i] = std::pow(2.0f, -12.0f);
-    intermediate_zp[i] = 0;
-  }
+  CMSIS_NN_VectorSum(
+      output_hidden_kernel_sum, size_hidden, size_hidden,
+      recurrent_to_output_weights,
+      -params_ref->inter_gate_parameters.output_mul_params.output_offset,
+      nullptr);
 
-  MicroContext* micro_context = GetMicroContext(context);
-  // In the absence of projection, hidden becomes otuput and this intermediate
-  // is ignored.
-  TfLiteTensor* hidden = micro_context->AllocateTempIntermediateTensor(node, 4);
-  TF_LITE_ENSURE(context, hidden->quantization.type != kTfLiteNoQuantization);
-  auto* hidden_params =
-      static_cast<TfLiteAffineQuantization*>(hidden->quantization.params);
-  intermediate_scale[4] = hidden_params->scale->data[0];
-  intermediate_zp[4] = hidden_params->zero_point->data[0];
-  if (hidden != nullptr) {
-    micro_context->DeallocateTempTfLiteTensor(hidden);
-  }
+  // Create input gate parameters
+  cmsis_nn_lstm_gate gate_input{
+      params_ref->input_gate_parameters.input_fc_params.output_multiplier,
+      params_ref->input_gate_parameters.input_fc_params.output_shift,
+      input_to_input_weights,
+      input_data_kernel_sum,
+      params_ref->input_gate_parameters.recurrent_fc_params.output_multiplier,
+      params_ref->input_gate_parameters.recurrent_fc_params.output_shift,
+      recurrent_to_input_weights,
+      input_hidden_kernel_sum,
+      input_gate_bias,
+      ARM_SIGMOID};
 
-  // Scales.
-  const float default_scale = 1.0;
-  float input_scale = default_scale;
-  float input_to_input_weight_scale = default_scale;
-  float recurrent_to_input_weight_scale = default_scale;
-  float input_to_forget_weight_scale = default_scale;
-  float recurrent_to_forget_weight_scale = default_scale;
-  float input_to_cell_weight_scale = default_scale;
-  float recurrent_to_cell_weight_scale = default_scale;
-  float input_to_output_weight_scale = default_scale;
-  float recurrent_to_output_weight_scale = default_scale;
-  float output_state_scale = default_scale;
-  int cell_scale = 1;
+  // Create forget gate parameters
+  cmsis_nn_lstm_gate gate_forget{
+      params_ref->forget_gate_parameters.input_fc_params.output_multiplier,
+      params_ref->forget_gate_parameters.input_fc_params.output_shift,
+      input_to_forget_weights,
+      forget_data_kernel_sum,
+      params_ref->forget_gate_parameters.recurrent_fc_params.output_multiplier,
+      params_ref->forget_gate_parameters.recurrent_fc_params.output_shift,
+      recurrent_to_forget_weights,
+      forget_hidden_kernel_sum,
+      forget_gate_bias,
+      ARM_SIGMOID};
 
-  // Effective scales.
-  float effective_input_to_input_scale = default_scale;
-  float effective_recurrent_to_input_scale = default_scale;
-  float effective_cell_to_input_scale = default_scale;
-  float effective_input_to_forget_scale = default_scale;
-  float effective_recurrent_to_forget_scale = default_scale;
-  float effective_cell_to_forget_scale = default_scale;
-  float effective_input_to_cell_scale = default_scale;
-  float effective_recurrent_to_cell_scale = default_scale;
-  float effective_input_to_output_scale = default_scale;
-  float effective_recurrent_to_output_scale = default_scale;
-  float effective_cell_to_output_scale = default_scale;
-  float effective_hidden_scale = default_scale;
+  auto cell_gate_nonlinear_type =
+      (params_ref->cell_gate_nonlinear_type == kTfLiteActTanh) ? ARM_TANH
+                                                               : ARM_SIGMOID;
+  // Create cell gate parameters
+  cmsis_nn_lstm_gate gate_cell{
+      params_ref->cell_gate_parameters.input_fc_params.output_multiplier,
+      params_ref->cell_gate_parameters.input_fc_params.output_shift,
+      input_to_cell_weights,
+      cell_data_kernel_sum,
+      params_ref->cell_gate_parameters.recurrent_fc_params.output_multiplier,
+      params_ref->cell_gate_parameters.recurrent_fc_params.output_shift,
+      recurrent_to_cell_weights,
+      cell_hidden_kernel_sum,
+      cell_gate_bias,
+      cell_gate_nonlinear_type};
 
-  // Populate scales.
-  input_to_input_weight_scale = input_to_input_weights->params.scale;
-  recurrent_to_input_weight_scale = recurrent_to_input_weights->params.scale;
+  // Create output gate parameters
+  cmsis_nn_lstm_gate gate_output{
+      params_ref->output_gate_parameters.input_fc_params.output_multiplier,
+      params_ref->output_gate_parameters.input_fc_params.output_shift,
+      input_to_output_weights,
+      output_data_kernel_sum,
+      params_ref->output_gate_parameters.recurrent_fc_params.output_multiplier,
+      params_ref->output_gate_parameters.recurrent_fc_params.output_shift,
+      recurrent_to_output_weights,
+      output_hidden_kernel_sum,
+      output_gate_bias,
+      ARM_SIGMOID};
 
-  output_state_scale = output_state->params.scale;
-
-  input_to_forget_weight_scale = input_to_forget_weights->params.scale;
-  input_to_cell_weight_scale = input_to_cell_weights->params.scale;
-  input_to_output_weight_scale = input_to_output_weights->params.scale;
-  recurrent_to_forget_weight_scale = recurrent_to_forget_weights->params.scale;
-  recurrent_to_cell_weight_scale = recurrent_to_cell_weights->params.scale;
-  recurrent_to_output_weight_scale = recurrent_to_output_weights->params.scale;
-
-  // Check cell state (already used above)
-  TF_LITE_ENSURE(context, CheckedLog2(cell_state->params.scale, &cell_scale));
-  TF_LITE_ENSURE(context, cell_scale <= -9);
-
-  op_data->params_cmsis_nn.cell_state_shift = cell_scale;
-  input_scale = input->params.scale;
-
-  // Calculate effective scales.
-  effective_input_to_input_scale =
-      input_to_input_weight_scale * input_scale / intermediate_scale[0];
-  effective_recurrent_to_input_scale = recurrent_to_input_weight_scale *
-                                       output_state_scale /
-                                       intermediate_scale[0];
-
-  effective_input_to_forget_scale =
-      input_to_forget_weight_scale * input_scale / intermediate_scale[1];
-  effective_recurrent_to_forget_scale = recurrent_to_forget_weight_scale *
-                                        output_state_scale /
-                                        intermediate_scale[1];
-
-  effective_input_to_cell_scale =
-      input_to_cell_weight_scale * input_scale / intermediate_scale[2];
-  effective_recurrent_to_cell_scale = recurrent_to_cell_weight_scale *
-                                      output_state_scale /
-                                      intermediate_scale[2];
-
-  effective_input_to_output_scale =
-      input_to_output_weight_scale * input_scale / intermediate_scale[3];
-  effective_recurrent_to_output_scale = recurrent_to_output_weight_scale *
-                                        output_state_scale /
-                                        intermediate_scale[3];
-
-  effective_hidden_scale =
-      std::pow(2.0f, -15.0f) / intermediate_scale[4] * std::pow(2.0f, -15.0f);
-
-  // Decompose scales.
-  int shift_output;
-  QuantizeMultiplier(
-      static_cast<double>(effective_input_to_input_scale),
-      &op_data->params_cmsis_nn.input_to_input_scaling.multiplier,
-      &shift_output);
-  op_data->params_cmsis_nn.input_to_input_scaling.shift =
-      static_cast<int32_t>(shift_output);
-
-  QuantizeMultiplier(
-      static_cast<double>(effective_recurrent_to_input_scale),
-      &op_data->params_cmsis_nn.recurrent_to_input_scaling.multiplier,
-      &shift_output);
-  op_data->params_cmsis_nn.recurrent_to_input_scaling.shift =
-      static_cast<int32_t>(shift_output);
-  QuantizeMultiplier(static_cast<double>(effective_cell_to_input_scale),
-                     &op_data->params_cmsis_nn.cell_to_input_scaling.multiplier,
-                     &shift_output);
-  op_data->params_cmsis_nn.cell_to_input_scaling.shift =
-      static_cast<int32_t>(shift_output);
-  QuantizeMultiplier(
-      static_cast<double>(effective_input_to_forget_scale),
-      &op_data->params_cmsis_nn.input_to_forget_scaling.multiplier,
-      &shift_output);
-  op_data->params_cmsis_nn.input_to_forget_scaling.shift =
-      static_cast<int32_t>(shift_output);
-  QuantizeMultiplier(
-      static_cast<double>(effective_recurrent_to_forget_scale),
-      &op_data->params_cmsis_nn.recurrent_to_forget_scaling.multiplier,
-      &shift_output);
-  op_data->params_cmsis_nn.recurrent_to_forget_scaling.shift =
-      static_cast<int32_t>(shift_output);
-  QuantizeMultiplier(
-      static_cast<double>(effective_cell_to_forget_scale),
-      &op_data->params_cmsis_nn.cell_to_forget_scaling.multiplier,
-      &shift_output);
-  // ok
-  op_data->params_cmsis_nn.cell_to_forget_scaling.shift =
-      static_cast<int32_t>(shift_output);
-  QuantizeMultiplier(static_cast<double>(effective_input_to_cell_scale),
-                     &op_data->params_cmsis_nn.input_to_cell_scaling.multiplier,
-                     &shift_output);
-  op_data->params_cmsis_nn.input_to_cell_scaling.shift =
-      static_cast<int32_t>(shift_output);
-  QuantizeMultiplier(
-      static_cast<double>(effective_recurrent_to_cell_scale),
-      &op_data->params_cmsis_nn.recurrent_to_cell_scaling.multiplier,
-      &shift_output);
-  op_data->params_cmsis_nn.recurrent_to_cell_scaling.shift =
-      static_cast<int32_t>(shift_output);
-  QuantizeMultiplier(
-      static_cast<double>(effective_input_to_output_scale),
-      &op_data->params_cmsis_nn.input_to_output_scaling.multiplier,
-      &shift_output);
-  op_data->params_cmsis_nn.input_to_output_scaling.shift =
-      static_cast<int32_t>(shift_output);
-  QuantizeMultiplier(
-      static_cast<double>(effective_recurrent_to_output_scale),
-      &op_data->params_cmsis_nn.recurrent_to_output_scaling.multiplier,
-      &shift_output);
-  op_data->params_cmsis_nn.recurrent_to_output_scaling.shift =
-      static_cast<int32_t>(shift_output);
-  QuantizeMultiplier(
-      static_cast<double>(effective_cell_to_output_scale),
-      &op_data->params_cmsis_nn.cell_to_output_scaling.multiplier,
-      &shift_output);
-  op_data->params_cmsis_nn.cell_to_output_scaling.shift =
-      static_cast<int32_t>(shift_output);
-
-  op_data->params_cmsis_nn.projection_scaling.shift =
-      static_cast<int32_t>(shift_output);
-
-  QuantizeMultiplier(static_cast<double>(effective_hidden_scale),
-                     &op_data->params_cmsis_nn.hidden_scaling.multiplier,
-                     &shift_output);
-  op_data->params_cmsis_nn.hidden_scaling.shift =
-      static_cast<int32_t>(shift_output);
-
-  op_data->params_cmsis_nn.hidden_offset = intermediate_zp[4];
-
-  op_data->params_cmsis_nn.activation.min = std::numeric_limits<int16_t>::min();
-  op_data->params_cmsis_nn.activation.max = std::numeric_limits<int16_t>::max();
+  // Create the complete lstm data struct
+  *params_cmsis_nn = {
+      params_ref->size_info.time_major,
+      params_ref->size_info.batch_size,
+      params_ref->size_info.time_steps,
+      params_ref->size_info.input_dimension,
+      params_ref->size_info.state_dimension,
+      params_ref->forget_gate_parameters.input_fc_params.input_offset,
+      params_ref->inter_gate_parameters.forget_cell_mul_params
+          .output_multiplier,
+      params_ref->inter_gate_parameters.forget_cell_mul_params.output_shift,
+      params_ref->inter_gate_parameters.input_mul_params.output_multiplier,
+      params_ref->inter_gate_parameters.input_mul_params.output_shift,
+      params_ref->cell_state_info.quantized_cell_clip,
+      params_ref->cell_state_info.cell_state_scale_power,
+      params_ref->inter_gate_parameters.output_mul_params.output_multiplier,
+      params_ref->inter_gate_parameters.output_mul_params.output_shift,
+      params_ref->inter_gate_parameters.output_mul_params.output_offset,
+      gate_forget,
+      gate_input,
+      gate_cell,
+      gate_output};
 
   return kTfLiteOk;
 }
 
-template <typename CellType>
 TfLiteStatus CMSIS_NN_EvalInteger8x8_16Lstm(
     const OpData& op_data, const LSTMKernelContents& kernel_content,
-    const LSTMBuffers<CellType>& buffers) {
-  const OpDataLSTM& op_data_lstm = op_data.params_ref;
-  const TfLiteEvalTensor* input =
-      kernel_content.GetInternalTensor(tflite::kLstmInputTensor);
-  const TfLiteEvalTensor* input_gate_bias =
-      kernel_content.GetInternalTensor(tflite::kLstmInputGateBiasTensor);
-  const TfLiteEvalTensor* forget_gate_bias =
-      kernel_content.GetInternalTensor(tflite::kLstmForgetGateBiasTensor);
-  const TfLiteEvalTensor* cell_gate_bias =
-      kernel_content.GetInternalTensor(tflite::kLstmCellGateBiasTensor);
-  const TfLiteEvalTensor* output_gate_bias =
-      kernel_content.GetInternalTensor(tflite::kLstmOutputGateBiasTensor);
-  const TfLiteEvalTensor* input_to_output_weights =
-      kernel_content.GetInternalTensor(tflite::kLstmInputToOutputWeightsTensor);
-  const TfLiteEvalTensor* recurrent_to_output_weights =
-      kernel_content.GetInternalTensor(
-          tflite::kLstmRecurrentToOutputWeightsTensor);
-  const TfLiteEvalTensor* input_to_input_weights =
-      kernel_content.GetInternalTensor(tflite::kLstmInputToInputWeightsTensor);
-  const TfLiteEvalTensor* input_to_forget_weights =
-      kernel_content.GetInternalTensor(tflite::kLstmInputToForgetWeightsTensor);
-  const TfLiteEvalTensor* input_to_cell_weights =
-      kernel_content.GetInternalTensor(tflite::kLstmInputToCellWeightsTensor);
-  const TfLiteEvalTensor* recurrent_to_input_weights =
-      kernel_content.GetInternalTensor(
-          tflite::kLstmRecurrentToInputWeightsTensor);
-  const TfLiteEvalTensor* recurrent_to_forget_weights =
-      kernel_content.GetInternalTensor(
-          tflite::kLstmRecurrentToForgetWeightsTensor);
-  const TfLiteEvalTensor* recurrent_to_cell_weights =
-      kernel_content.GetInternalTensor(
-          tflite::kLstmRecurrentToCellWeightsTensor);
-  const TfLiteEvalTensor* cell_to_input_weights =
-      kernel_content.GetInternalTensor(tflite::kLstmCellToInputWeightsTensor);
-  const TfLiteEvalTensor* cell_to_forget_weights =
-      kernel_content.GetInternalTensor(tflite::kLstmCellToForgetWeightsTensor);
-  const TfLiteEvalTensor* cell_to_output_weights =
-      kernel_content.GetInternalTensor(tflite::kLstmCellToOutputWeightsTensor);
-  const TfLiteEvalTensor* cell_state =
-      kernel_content.GetInternalTensor(tflite::kLstmCellStateTensor);
-  const TfLiteEvalTensor* output_state =
-      kernel_content.GetInternalTensor(tflite::kLstmOutputStateTensor);
-  const TfLiteEvalTensor* output = kernel_content.output_tensor;
+    const LSTMBuffers<int16_t>& buffers) {
+  TFLITE_DCHECK(
+      kernel_content.GetInternalTensor(tflite::kLstmInputTensor)->dims->size >=
+          2 &&
+      kernel_content.GetInternalTensor(tflite::kLstmInputTensor)->dims->size <=
+          3);
 
-  TFLITE_DCHECK(input->dims->size >= 2 && input->dims->size <= 3);
+  const int8_t* input = tflite::micro::GetOptionalTensorData<int8_t>(
+      kernel_content.GetInternalTensor(tflite::kLstmInputTensor));
+  int8_t* output =
+      tflite::micro::GetTensorData<int8_t>(kernel_content.output_tensor);
 
-  cmsis_nn_lstm_context scratch_buffers;
-  scratch_buffers.input_gate = reinterpret_cast<int16_t*>(buffers.buffer0);
-  scratch_buffers.forget_gate = reinterpret_cast<int16_t*>(buffers.buffer1);
-  scratch_buffers.cell_gate = reinterpret_cast<int16_t*>(buffers.buffer2);
-  scratch_buffers.output_gate = reinterpret_cast<int16_t*>(buffers.buffer3);
+  // Create lstm buffer struct
+  cmsis_nn_lstm_context cmsis_buffers;
+  cmsis_buffers.temp1 = reinterpret_cast<int16_t*>(buffers.buffer0);
+  cmsis_buffers.temp2 = reinterpret_cast<int16_t*>(buffers.buffer1);
+  cmsis_buffers.cell_state = reinterpret_cast<int16_t*>(buffers.buffer2);
 
-  cmsis_nn_lstm_params cmsis_lstm_params = op_data.params_cmsis_nn;
-  cmsis_lstm_params.time_major = op_data_lstm.size_info.time_major;
-  cmsis_lstm_params.clip.cell =
-      op_data_lstm.cell_state_info.quantized_cell_clip;
+  arm_lstm_unidirectional_s8(input, output, &op_data.params_cmsis_nn,
+                             &cmsis_buffers);
 
-  cmsis_lstm_params.input_gate_bias = const_cast<int32_t*>(
-      tflite::micro::GetOptionalTensorData<int32_t>(input_gate_bias));
-  cmsis_lstm_params.forget_gate_bias = const_cast<int32_t*>(
-      tflite::micro::GetOptionalTensorData<int32_t>(forget_gate_bias));
-  cmsis_lstm_params.cell_gate_bias = const_cast<int32_t*>(
-      tflite::micro::GetOptionalTensorData<int32_t>(cell_gate_bias));
-  cmsis_lstm_params.output_gate_bias = const_cast<int32_t*>(
-      tflite::micro::GetOptionalTensorData<int32_t>(output_gate_bias));
+  return kTfLiteOk;
+}
 
-  const bool time_major = op_data_lstm.size_info.time_major;
-  const int n_input = input->dims->data[input->dims->size - 1];
-  const int n_output = recurrent_to_output_weights->dims->data[1];
+TfLiteStatus CMSIS_NN_EvalInteger16x8_16Lstm(
+    const OpData& op_data, const LSTMKernelContents& kernel_content,
+    const LSTMBuffers<int16_t>& buffers) {
+  TFLITE_DCHECK(
+      kernel_content.GetInternalTensor(tflite::kLstmInputTensor)->dims->size >=
+          2 &&
+      kernel_content.GetInternalTensor(tflite::kLstmInputTensor)->dims->size <=
+          3);
 
-  int max_time, n_batch;
-  if (input->dims->size == 2) {
-    max_time = 1;
-    n_batch = input->dims->data[0];
-  } else {
-    max_time = (time_major) ? input->dims->data[0] : input->dims->data[1];
-    n_batch = (time_major) ? input->dims->data[1] : input->dims->data[0];
-  }
+  const int16_t* input = tflite::micro::GetOptionalTensorData<int16_t>(
+      kernel_content.GetInternalTensor(tflite::kLstmInputTensor));
+  int16_t* output =
+      tflite::micro::GetTensorData<int16_t>(kernel_content.output_tensor);
 
-  cmsis_nn_lstm_dims lstm_dims;
-  lstm_dims.num_inputs = n_input;
-  lstm_dims.num_outputs = n_output;
-  lstm_dims.num_batches = n_batch;
-  lstm_dims.max_time = max_time;
+  // Create lstm buffer struct
+  cmsis_nn_lstm_context cmsis_buffers;
+  cmsis_buffers.temp1 = reinterpret_cast<int16_t*>(buffers.buffer0);
+  cmsis_buffers.temp2 = reinterpret_cast<int16_t*>(buffers.buffer1);
+  cmsis_buffers.cell_state = reinterpret_cast<int16_t*>(buffers.buffer2);
 
-  arm_lstm_unidirectional_s16_s8(
-      &scratch_buffers,
-      const_cast<int8_t*>(tflite::micro::GetTensorData<int8_t>(input)),
-      &lstm_dims,
-      const_cast<int8_t*>(
-          tflite::micro::GetOptionalTensorData<int8_t>(input_to_input_weights)),
-      const_cast<int8_t*>(tflite::micro::GetOptionalTensorData<int8_t>(
-          input_to_forget_weights)),
-      const_cast<int8_t*>(
-          tflite::micro::GetOptionalTensorData<int8_t>(input_to_cell_weights)),
-      const_cast<int8_t*>(tflite::micro::GetOptionalTensorData<int8_t>(
-          input_to_output_weights)),
-      const_cast<int8_t*>(tflite::micro::GetOptionalTensorData<int8_t>(
-          recurrent_to_input_weights)),
-      const_cast<int8_t*>(tflite::micro::GetOptionalTensorData<int8_t>(
-          recurrent_to_forget_weights)),
-      const_cast<int8_t*>(tflite::micro::GetOptionalTensorData<int8_t>(
-          recurrent_to_cell_weights)),
-      const_cast<int8_t*>(tflite::micro::GetOptionalTensorData<int8_t>(
-          recurrent_to_output_weights)),
-      const_cast<int16_t*>(
-          tflite::micro::GetOptionalTensorData<int16_t>(cell_to_input_weights)),
-      const_cast<int16_t*>(tflite::micro::GetOptionalTensorData<int16_t>(
-          cell_to_forget_weights)),
-      const_cast<int16_t*>(tflite::micro::GetOptionalTensorData<int16_t>(
-          cell_to_output_weights)),
-      nullptr, &cmsis_lstm_params,
-      const_cast<int8_t*>(tflite::micro::GetTensorData<int8_t>(output_state)),
-      const_cast<int16_t*>(tflite::micro::GetTensorData<int16_t>(cell_state)),
-      const_cast<int8_t*>(tflite::micro::GetTensorData<int8_t>(output)));
+  arm_lstm_unidirectional_s16(input, output, &op_data.params_cmsis_nn,
+                              &cmsis_buffers);
 
   return kTfLiteOk;
 }
 
 /*Kernel functions*/
-
 void* UnidirectionalSequenceLstmInit(TfLiteContext* context, const char* buffer,
                                      size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
@@ -531,15 +354,9 @@
   const TfLiteTensor* input = lstm_tensors.GetInternalTensor(kLstmInputTensor);
   const auto activation_type = input->type;
 
-  if (kTfLiteInt8 == activation_type) {
-    TF_LITE_ENSURE_STATUS(
-        CalculateOpData(context, node, lstm_tensors, op_data));
-  }
-
   TF_LITE_ENSURE_OK(context, ValidateTensorSize(context, lstm_tensors,
                                                 op_data_lstm->size_info));
 
-  // Create cell state information and gate parameters (Fully Connected and Mul)
   auto cell_state_type =
       lstm_tensors.GetInternalTensor(kLstmCellStateTensor)->type;
   if (cell_state_type == kTfLiteFloat32) {
@@ -559,8 +376,24 @@
         TfLiteTypeGetName(cell_state_type), cell_state_type);
     return kTfLiteError;
   }
-  // request buffers (four buffers)
-  for (size_t i = 0; i < 4; i++) {
+
+  size_t number_of_buffers;
+  if (activation_type == kTfLiteInt8 && cell_state_type == kTfLiteInt16) {
+    auto kernel_content = CreateLSTMKernelContent(context, node);
+    number_of_buffers = 3;
+    CMSIS_NN_PortOpData<int32_t>(context, op_data_lstm, kernel_content,
+                                 &op_data->params_cmsis_nn);
+  } else if (activation_type == kTfLiteInt16 &&
+             cell_state_type == kTfLiteInt16) {
+    auto kernel_content = CreateLSTMKernelContent(context, node);
+    number_of_buffers = 3;
+    CMSIS_NN_PortOpData<int64_t>(context, op_data_lstm, kernel_content,
+                                 &op_data->params_cmsis_nn);
+  } else {
+    number_of_buffers = 4;
+  }
+
+  for (size_t i = 0; i < number_of_buffers; i++) {
     TF_LITE_ENSURE_OK(context, context->RequestScratchBufferInArena(
                                    context,
                                    op_data_lstm->size_info.batch_size *
@@ -598,9 +431,8 @@
         case kTfLiteInt8: {
           // 8(activation)x8(weight)->16(cell) LSTM with 32 bits bias
           LSTMBuffers<int16_t> buffers =
-              CreateLSTMBuffers<int16_t>(context, op_data_lstm.buffer_indices);
-          return CMSIS_NN_EvalInteger8x8_16Lstm<int16_t>(
-              op_data, kernel_content, buffers);
+              CMSIS_NN_CreateLSTMBuffers(context, op_data_lstm.buffer_indices);
+          CMSIS_NN_EvalInteger8x8_16Lstm(op_data, kernel_content, buffers);
           break;
         }
         default: {
@@ -616,9 +448,8 @@
         case kTfLiteInt8: {
           // 16(activation)x8(weight)->16(cell) LSTM with 64 bits bias
           LSTMBuffers<int16_t> buffers =
-              CreateLSTMBuffers<int16_t>(context, op_data_lstm.buffer_indices);
-          EvalLstm<int16_t, int8_t, int16_t, int64_t>(op_data_lstm,
-                                                      kernel_content, buffers);
+              CMSIS_NN_CreateLSTMBuffers(context, op_data_lstm.buffer_indices);
+          CMSIS_NN_EvalInteger16x8_16Lstm(op_data, kernel_content, buffers);
           break;
         }
         default: {
@@ -654,10 +485,36 @@
 
   if (activation_type == kTfLiteInt8) {
     LSTMBuffers<int16_t> buffers =
-        CreateLSTMBuffers<int16_t>(context, op_data_lstm.buffer_indices);
+        CMSIS_NN_CreateLSTMBuffers(context, op_data_lstm.buffer_indices);
 
-    return CMSIS_NN_EvalInteger8x8_16Lstm<int16_t>(op_data, kernel_content,
-                                                   buffers);
+    return CMSIS_NN_EvalInteger8x8_16Lstm(op_data, kernel_content, buffers);
+  } else {
+    MicroPrintf("Input type %s (%d) not supported.",
+                TfLiteTypeGetName(activation_type), activation_type);
+    return kTfLiteError;
+  }
+  return kTfLiteOk;
+}
+
+TfLiteStatus UnidirectionalSequenceLstmEvalInt16(TfLiteContext* context,
+                                                 TfLiteNode* node) {
+  TFLITE_DCHECK(node->user_data != nullptr);
+  const OpData& op_data = *reinterpret_cast<const OpData*>(node->user_data);
+  const OpDataLSTM& op_data_lstm = op_data.params_ref;
+  auto kernel_content = CreateLSTMKernelContent(context, node);
+  const auto activation_type =
+      kernel_content.internal_tensors[kLstmInputTensor]->type;
+  const auto weight_type =
+      kernel_content.internal_tensors[kLstmInputToInputWeightsTensor]->type;
+
+  TFLITE_DCHECK(weight_type == kTfLiteInt16 &&
+                "Only int16 filter type supported.");
+
+  if (activation_type == kTfLiteInt16) {
+    LSTMBuffers<int16_t> buffers =
+        CMSIS_NN_CreateLSTMBuffers(context, op_data_lstm.buffer_indices);
+
+    return CMSIS_NN_EvalInteger16x8_16Lstm(op_data, kernel_content, buffers);
   } else {
     MicroPrintf("Input type %s (%d) not supported.",
                 TfLiteTypeGetName(activation_type), activation_type);
@@ -680,4 +537,10 @@
                                    UnidirectionalSequenceLstmEvalInt8);
 }
 
+TFLMRegistration Register_UNIDIRECTIONAL_SEQUENCE_LSTM_INT16() {
+  return tflite::micro::RegisterOp(UnidirectionalSequenceLstmInit,
+                                   UnidirectionalSequenceLstmPrepare,
+                                   UnidirectionalSequenceLstmEvalInt16);
+}
+
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/comparisons.cc b/tensorflow/lite/micro/kernels/comparisons.cc
index 4056316..69b3c61 100644
--- a/tensorflow/lite/micro/kernels/comparisons.cc
+++ b/tensorflow/lite/micro/kernels/comparisons.cc
@@ -533,7 +533,7 @@
   return context->AllocatePersistentBuffer(context, sizeof(OpData));
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ComparisonsPrepare(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
   OpData* data = static_cast<OpData*>(node->user_data);
 
@@ -580,27 +580,27 @@
 }  // namespace
 
 TFLMRegistration Register_EQUAL() {
-  return tflite::micro::RegisterOp(Init, Prepare, EqualEval);
+  return tflite::micro::RegisterOp(Init, ComparisonsPrepare, EqualEval);
 }
 
 TFLMRegistration Register_NOT_EQUAL() {
-  return tflite::micro::RegisterOp(Init, Prepare, NotEqualEval);
+  return tflite::micro::RegisterOp(Init, ComparisonsPrepare, NotEqualEval);
 }
 
 TFLMRegistration Register_GREATER() {
-  return tflite::micro::RegisterOp(Init, Prepare, GreaterEval);
+  return tflite::micro::RegisterOp(Init, ComparisonsPrepare, GreaterEval);
 }
 
 TFLMRegistration Register_GREATER_EQUAL() {
-  return tflite::micro::RegisterOp(Init, Prepare, GreaterEqualEval);
+  return tflite::micro::RegisterOp(Init, ComparisonsPrepare, GreaterEqualEval);
 }
 
 TFLMRegistration Register_LESS() {
-  return tflite::micro::RegisterOp(Init, Prepare, LessEval);
+  return tflite::micro::RegisterOp(Init, ComparisonsPrepare, LessEval);
 }
 
 TFLMRegistration Register_LESS_EQUAL() {
-  return tflite::micro::RegisterOp(Init, Prepare, LessEqualEval);
+  return tflite::micro::RegisterOp(Init, ComparisonsPrepare, LessEqualEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/concatenation.cc b/tensorflow/lite/micro/kernels/concatenation.cc
index b4a838f..57d63a9 100644
--- a/tensorflow/lite/micro/kernels/concatenation.cc
+++ b/tensorflow/lite/micro/kernels/concatenation.cc
@@ -103,12 +103,13 @@
                                tflite::micro::GetTensorData<data_type>(output));
 }
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* ConcatenationInit(TfLiteContext* context, const char* buffer,
+                        size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context, sizeof(OpData));
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ConcatenationPrepare(TfLiteContext* context, TfLiteNode* node) {
   // This function only checks the types. Additional shape validations are
   // performed in the reference implementation called during Eval().
   const TfLiteConcatenationParams* params =
@@ -214,7 +215,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ConcatenationEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* output_tensor =
       tflite::micro::GetEvalOutput(context, node, kOutputTensor);
   TF_LITE_ENSURE(context, output_tensor != nullptr);
@@ -252,7 +253,8 @@
 }  // namespace
 
 TFLMRegistration Register_CONCATENATION() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(ConcatenationInit, ConcatenationPrepare,
+                                   ConcatenationEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/conv.cc b/tensorflow/lite/micro/kernels/conv.cc
index 550f5b0..0df35fc 100644
--- a/tensorflow/lite/micro/kernels/conv.cc
+++ b/tensorflow/lite/micro/kernels/conv.cc
@@ -1,4 +1,4 @@
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -27,12 +27,7 @@
 namespace tflite {
 namespace {
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
-  TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
-  return context->AllocatePersistentBuffer(context, sizeof(OpDataConv));
-}
-
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ConvEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kConvInputTensor);
   const TfLiteEvalTensor* filter =
@@ -50,14 +45,6 @@
   TFLITE_DCHECK(node->user_data != nullptr);
   const auto& data = *(static_cast<const OpDataConv*>(node->user_data));
 
-  TF_LITE_ENSURE_EQ(context, input->type, output->type);
-  TF_LITE_ENSURE_MSG(
-      context,
-      input->type == filter->type ||
-          (input->type == kTfLiteInt16 && filter->type == kTfLiteInt8) ||
-          (input->type == kTfLiteInt8 && filter->type == kTfLiteInt4),
-      "Hybrid models are not supported on TFLite Micro.");
-
   switch (input->type) {  // Already know in/out types are same.
     case kTfLiteFloat32: {
       tflite::reference_ops::Conv(
@@ -73,39 +60,34 @@
       break;
     }
     case kTfLiteInt16: {
-      switch (bias->type) {
-        case kTfLiteInt32: {
-          reference_integer_ops::ConvPerChannel(
-              ConvParamsQuantized(params, data),
-              data.per_channel_output_multiplier, data.per_channel_output_shift,
-              tflite::micro::GetTensorShape(input),
-              tflite::micro::GetTensorData<int16_t>(input),
-              tflite::micro::GetTensorShape(filter),
-              tflite::micro::GetTensorData<int8_t>(filter),
-              tflite::micro::GetTensorShape(bias),
-              tflite::micro::GetOptionalTensorData<std::int32_t>(bias),
-              tflite::micro::GetTensorShape(output),
-              tflite::micro::GetTensorData<int16_t>(output));
-          break;
-        }
-        case kTfLiteInt64: {
-          reference_integer_ops::ConvPerChannel(
-              ConvParamsQuantized(params, data),
-              data.per_channel_output_multiplier, data.per_channel_output_shift,
-              tflite::micro::GetTensorShape(input),
-              tflite::micro::GetTensorData<int16_t>(input),
-              tflite::micro::GetTensorShape(filter),
-              tflite::micro::GetTensorData<int8_t>(filter),
-              tflite::micro::GetTensorShape(bias),
-              tflite::micro::GetOptionalTensorData<std::int64_t>(bias),
-              tflite::micro::GetTensorShape(output),
-              tflite::micro::GetTensorData<int16_t>(output));
-          break;
-        }
-        default:
-          MicroPrintf("Bias type %s (%d) not supported.",
-                      TfLiteTypeGetName(bias->type), bias->type);
-          return kTfLiteError;
+      if (bias == nullptr || bias->type == kTfLiteInt32) {
+        reference_integer_ops::ConvPerChannel(
+            ConvParamsQuantized(params, data),
+            data.per_channel_output_multiplier, data.per_channel_output_shift,
+            tflite::micro::GetTensorShape(input),
+            tflite::micro::GetTensorData<int16_t>(input),
+            tflite::micro::GetTensorShape(filter),
+            tflite::micro::GetTensorData<int8_t>(filter),
+            tflite::micro::GetTensorShape(bias),
+            tflite::micro::GetOptionalTensorData<std::int32_t>(bias),
+            tflite::micro::GetTensorShape(output),
+            tflite::micro::GetTensorData<int16_t>(output));
+      } else if (bias->type == kTfLiteInt64) {
+        reference_integer_ops::ConvPerChannel(
+            ConvParamsQuantized(params, data),
+            data.per_channel_output_multiplier, data.per_channel_output_shift,
+            tflite::micro::GetTensorShape(input),
+            tflite::micro::GetTensorData<int16_t>(input),
+            tflite::micro::GetTensorShape(filter),
+            tflite::micro::GetTensorData<int8_t>(filter),
+            tflite::micro::GetTensorShape(bias),
+            tflite::micro::GetOptionalTensorData<std::int64_t>(bias),
+            tflite::micro::GetTensorShape(output),
+            tflite::micro::GetTensorData<int16_t>(output));
+      } else {
+        MicroPrintf("Bias type %s (%d) not supported.",
+                    TfLiteTypeGetName(bias->type), bias->type);
+        return kTfLiteError;
       }
       break;
     }
@@ -162,7 +144,7 @@
 }  // namespace
 
 TFLMRegistration Register_CONV_2D() {
-  return tflite::micro::RegisterOp(Init, ConvPrepare, Eval);
+  return tflite::micro::RegisterOp(ConvInit, ConvPrepare, ConvEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/conv.h b/tensorflow/lite/micro/kernels/conv.h
index 3b122ad..0c8073f 100644
--- a/tensorflow/lite/micro/kernels/conv.h
+++ b/tensorflow/lite/micro/kernels/conv.h
@@ -1,4 +1,4 @@
-/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -74,6 +74,8 @@
                                  int out_height, const TfLiteType data_type,
                                  OpDataConv* data);
 
+void* ConvInit(TfLiteContext* context, const char* buffer, size_t length);
+
 TfLiteStatus ConvPrepare(TfLiteContext* context, TfLiteNode* node);
 
 // This is the most generic TFLMRegistration. The actual supported types
@@ -86,14 +88,24 @@
 // int8 activations and int8 weights and always calls the reference
 // implementation.
 TFLMRegistration Register_CONV_2D_INT8REF();
+
 #else
 inline TFLMRegistration Register_CONV_2D_INT8REF() {
   return Register_CONV_2D();
 }
-#endif
+#endif  // defined(XTENSA)
 
 #if defined(CMSIS_NN)
 // Returns a TFLMRegistration struct for kernel variant that only supports
+// int8 activations and int4 weights and uses the latency optimized
+// implementations.
+TFLMRegistration Register_CONV_2D_INT4();
+#else
+inline TFLMRegistration Register_CONV_2D_INT4() { return Register_CONV_2D(); }
+#endif  // defined(CMSIS_NN)
+
+#if defined(CMSIS_NN) || defined(XTENSA)
+// Returns a TFLMRegistration struct for kernel variant that only supports
 // int8 activations and int8 weights and uses the latency optimized
 // implementations.
 TFLMRegistration Register_CONV_2D_INT8();
@@ -107,7 +119,7 @@
 inline TFLMRegistration Register_CONV_2D_INT8() { return Register_CONV_2D(); }
 
 inline TFLMRegistration Register_CONV_2D_INT16() { return Register_CONV_2D(); }
-#endif
+#endif  // defined(CMSIS_NN) || defined(XTENSA)
 
 }  // namespace tflite
 
diff --git a/tensorflow/lite/micro/kernels/conv_common.cc b/tensorflow/lite/micro/kernels/conv_common.cc
index c548c93..51c7a6f 100644
--- a/tensorflow/lite/micro/kernels/conv_common.cc
+++ b/tensorflow/lite/micro/kernels/conv_common.cc
@@ -1,4 +1,4 @@
-/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -71,6 +71,11 @@
   return op_params;
 }
 
+void* ConvInit(TfLiteContext* context, const char* buffer, size_t length) {
+  TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
+  return context->AllocatePersistentBuffer(context, sizeof(OpDataConv));
+}
+
 TfLiteStatus CalculateOpDataConv(TfLiteContext* context, TfLiteNode* node,
                                  const TfLiteConvParams& params, int width,
                                  int height, int filter_width,
@@ -120,10 +125,12 @@
   data->filter_zero_point = filter->params.zero_point;
   data->output_zero_point = output->params.zero_point;
 
+  micro_context->DeallocateTempTfLiteTensor(output);
   micro_context->DeallocateTempTfLiteTensor(input);
   micro_context->DeallocateTempTfLiteTensor(filter);
-  micro_context->DeallocateTempTfLiteTensor(output);
-  micro_context->DeallocateTempTfLiteTensor(bias);
+  if (bias != nullptr) {
+    micro_context->DeallocateTempTfLiteTensor(bias);
+  }
 
   return kTfLiteOk;
 }
@@ -147,6 +154,15 @@
       micro_context->AllocateTempInputTensor(node, kConvWeightsTensor);
   TF_LITE_ENSURE(context, filter != nullptr);
 
+  TF_LITE_ENSURE_EQ(context, input->type, output->type);
+  TF_LITE_ENSURE_MSG(
+      context,
+      (input->type == kTfLiteFloat32 && filter->type == kTfLiteFloat32) ||
+          (input->type == kTfLiteInt16 && filter->type == kTfLiteInt8) ||
+          (input->type == kTfLiteInt8 &&
+           (filter->type == kTfLiteInt4 || filter->type == kTfLiteInt8)),
+      "Hybrid models are not supported on TFLite Micro.");
+
   const int input_width = input->dims->data[2];
   const int input_height = input->dims->data[1];
   const int filter_width = filter->dims->data[2];
@@ -196,7 +212,6 @@
   micro_context->DeallocateTempTfLiteTensor(filter);
   micro_context->DeallocateTempTfLiteTensor(input);
   micro_context->DeallocateTempTfLiteTensor(output);
-
   return kTfLiteOk;
 }
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/conv_test.cc b/tensorflow/lite/micro/kernels/conv_test.cc
index 98c2615..0fb9411 100644
--- a/tensorflow/lite/micro/kernels/conv_test.cc
+++ b/tensorflow/lite/micro/kernels/conv_test.cc
@@ -1,4 +1,4 @@
-/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -53,6 +53,7 @@
     kTfLiteActNone,       // activation
     1,                    // dilation_width_factor
     1,                    // dilation_height_factor
+    kTfLiteNoType         // quantized_bias_type
 };
 
 }  // namespace
@@ -61,10 +62,6 @@
 
 TF_LITE_MICRO_TESTS_BEGIN
 
-#if !defined(VISION_P6)  // TODO(b/270720625): disabled int8 and int4 test for
-// conv for fully connected vision p6 kernels, because vision p6 conv doesn't
-// work with per channel quantization
-
 TF_LITE_MICRO_TEST(SimpleTestQuantized4bitPerChannel) {
   const int output_dims_count = 12;
   int8_t output_data[output_dims_count];
@@ -125,12 +122,6 @@
           output_data));
 }
 
-#endif  // !defined(VISION_P6)
-
-#if !defined(XTENSA)  // TODO(b/170321206): xtensa kernels are less general than
-                      // reference kernels and we ifdef out test cases that are
-                      // currently known to fail.
-
 TF_LITE_MICRO_TEST(SimpleTestFloat) {
   float output_data[tflite::testing::kOutputElements];
 
@@ -255,7 +246,6 @@
           output_data));
 }
 
-#if !defined(CMSIS_NN)
 TF_LITE_MICRO_TEST(SimpleTestQuantized16x8PerChannel32bBias) {
   const int output_dims_count = 12;
   int16_t output_data[output_dims_count];
@@ -285,7 +275,6 @@
           &tflite::testing::common_conv_params, tflite::Register_CONV_2D(),
           output_data));
 }
-#endif
 
 TF_LITE_MICRO_TEST(SimpleTestDilatedQuantizedPerChannel) {
   const int output_dims_count = 24;
@@ -396,7 +385,6 @@
           &conv_params, tflite::Register_CONV_2D(), output_data));
 }
 
-#if !defined(CMSIS_NN)
 TF_LITE_MICRO_TEST(SimpleTestQuantized16x8PerChannelRelu632bBias) {
   const int output_dims_count = 12;
   int16_t output_data[output_dims_count];
@@ -429,13 +417,12 @@
           golden_data, golden_quantized, output_scale, output_zero_point,
           &conv_params, tflite::Register_CONV_2D(), output_data));
 }
-#endif
 
 TF_LITE_MICRO_TEST(Kernel1x1QuantizedPerChannel) {
   // conv params:
   // padding, stride_<width,height>, activation, dilation_<width, height>
-  TfLiteConvParams conv_params = {kTfLitePaddingValid, 1, 1,
-                                  kTfLiteActNone,      1, 1};
+  TfLiteConvParams conv_params = {
+      kTfLitePaddingValid, 1, 1, kTfLiteActNone, 1, 1, kTfLiteNoType};
 
   int input_shape[] = {4, 1, 2, 2, 4};  // [len,N,H,W,C]
   constexpr int input_elements =
@@ -487,8 +474,8 @@
 TF_LITE_MICRO_TEST(Kernel1x1QuantizedPerChannelRelu6) {
   // conv params:
   // padding, stride_<width,height>, activation, dilation_<width, height>
-  TfLiteConvParams conv_params = {kTfLitePaddingValid, 1, 1,
-                                  kTfLiteActRelu6,     1, 1};
+  TfLiteConvParams conv_params = {
+      kTfLitePaddingValid, 1, 1, kTfLiteActRelu6, 1, 1, kTfLiteNoType};
 
   int input_shape[] = {4, 1, 2, 2, 4};  // [len,N,H,W,C]
   constexpr int input_elements =
@@ -540,8 +527,8 @@
 TF_LITE_MICRO_TEST(Kernel1x1Quantized16x8PerChannelRelu6) {
   // conv params:
   // padding, stride_<width,height>, activation, dilation_<width, height>
-  TfLiteConvParams conv_params = {kTfLitePaddingValid, 1, 1,
-                                  kTfLiteActRelu6,     1, 1};
+  TfLiteConvParams conv_params = {
+      kTfLitePaddingValid, 1, 1, kTfLiteActRelu6, 1, 1, kTfLiteNoType};
 
   int input_shape[] = {4, 1, 2, 2, 4};  // [len,N,H,W,C]
   const int input_elements = 1 * 2 * 2 * 4;
@@ -673,8 +660,6 @@
                      tflite::Register_CONV_2D(), output_data));
 }
 
-#endif  // !defined(XTENSA)
-
 TF_LITE_MICRO_TEST(Int8Filter1x3x3x1ShouldMatchGoldenEvenInputPaddingSame) {
   using tflite::ElementCount;
   using tflite::kConvFilter1x3x3x1;
diff --git a/tensorflow/lite/micro/kernels/conv_test.h b/tensorflow/lite/micro/kernels/conv_test.h
index 39d3fa7..c655f04 100644
--- a/tensorflow/lite/micro/kernels/conv_test.h
+++ b/tensorflow/lite/micro/kernels/conv_test.h
@@ -34,10 +34,6 @@
                         int output_length, TfLiteConvParams* conv_params,
                         TFLMRegistration registration, int8_t* output_data);
 
-TfLiteStatus InvokeConv(TfLiteTensor* tensors, int tensors_size,
-                        int output_length, TfLiteConvParams* conv_params,
-                        TFLMRegistration registration, uint8_t* output_data);
-
 TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size,
                                  const float* expected_output_data,
                                  int output_length,
@@ -52,13 +48,6 @@
                                  TFLMRegistration registration,
                                  int8_t* output_data, float tolerance = 1e-5);
 
-TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size,
-                                 const uint8_t* expected_output_data,
-                                 int output_length,
-                                 TfLiteConvParams* conv_params,
-                                 TFLMRegistration registration,
-                                 uint8_t* output_data, float tolerance = 1e-5);
-
 TfLiteStatus TestConvFloat(int* input_dims_data, const float* input_data,
                            int* filter_dims_data, const float* filter_data,
                            int* bias_dims_data, const float* bias_data,
@@ -67,15 +56,6 @@
                            TfLiteConvParams* conv_params,
                            TFLMRegistration registration, float* output_data);
 
-TfLiteStatus TestConvQuantizedPerLayer(
-    int* input_dims_data, const float* input_data, uint8_t* input_quantized,
-    float input_scale, int* filter_dims_data, const float* filter_data,
-    uint8_t* filter_quantized, float filter_scale, int* bias_dims_data,
-    const float* bias_data, int32_t* bias_quantized, int* output_dims_data,
-    const float* expected_output_data, uint8_t* expected_output_quantized,
-    float output_scale, TfLiteConvParams* conv_params,
-    TFLMRegistration registration, uint8_t* output_data);
-
 TfLiteStatus TestConvQuantizedPerChannel(
     int* input_dims_data, const float* input_data, int8_t* input_quantized,
     float input_scale, int input_zero_point, int* filter_dims_data,
diff --git a/tensorflow/lite/micro/kernels/conv_test_common.cc b/tensorflow/lite/micro/kernels/conv_test_common.cc
index bdc9466..a0f733b 100644
--- a/tensorflow/lite/micro/kernels/conv_test_common.cc
+++ b/tensorflow/lite/micro/kernels/conv_test_common.cc
@@ -1,4 +1,4 @@
-/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -145,8 +145,8 @@
       input_data, input_quantized, input_dims, input_scale, input_zero_point);
   TfLiteTensor filter_tensor = CreateSymmetricPerChannelQuantizedTensor(
       filter_data, filter_data_quantized, filter_dims, filter_scales,
-      filter_zero_points, &filter_quant, 0, false,
-      tensor_weight_type /* quantized dimension */);
+      filter_zero_points, &filter_quant, 0 /* quantized dimension */, false,
+      tensor_weight_type);
   TfLiteTensor bias_tensor = CreatePerChannelQuantizedBiasTensor(
       bias_data, bias_data_quantized, bias_dims, input_scale, &filter_scales[1],
       bias_scales, bias_zero_points, &bias_quant, 0 /* quantized dimension */);
diff --git a/tensorflow/lite/micro/kernels/cumsum.cc b/tensorflow/lite/micro/kernels/cumsum.cc
index f62f2a5..258cf8d 100644
--- a/tensorflow/lite/micro/kernels/cumsum.cc
+++ b/tensorflow/lite/micro/kernels/cumsum.cc
@@ -104,11 +104,11 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus CumSumPrepare(TfLiteContext* context, TfLiteNode* node) {
   return CalculateOpData(context, node);
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus CumSumEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kInputTensor);
   const TfLiteEvalTensor* axis_tensor =
@@ -169,7 +169,7 @@
 }  // namespace
 
 TFLMRegistration Register_CUMSUM() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, CumSumPrepare, CumSumEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/depth_to_space.cc b/tensorflow/lite/micro/kernels/depth_to_space.cc
index 7e0a8fa..d4faf7c 100644
--- a/tensorflow/lite/micro/kernels/depth_to_space.cc
+++ b/tensorflow/lite/micro/kernels/depth_to_space.cc
@@ -93,11 +93,11 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus DepthToSpacePrepare(TfLiteContext* context, TfLiteNode* node) {
   return CalculateOpData(context, node);
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus DepthToSpaceEval(TfLiteContext* context, TfLiteNode* node) {
   auto* params =
       reinterpret_cast<TfLiteDepthToSpaceParams*>(node->builtin_data);
 
@@ -136,7 +136,8 @@
 }  // namespace
 
 TFLMRegistration Register_DEPTH_TO_SPACE() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, DepthToSpacePrepare,
+                                   DepthToSpaceEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/depthwise_conv.cc b/tensorflow/lite/micro/kernels/depthwise_conv.cc
index 398f8cd..fa55a70 100644
--- a/tensorflow/lite/micro/kernels/depthwise_conv.cc
+++ b/tensorflow/lite/micro/kernels/depthwise_conv.cc
@@ -27,12 +27,13 @@
 namespace tflite {
 namespace {
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* DepthwiseConvInit(TfLiteContext* context, const char* buffer,
+                        size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context, sizeof(OpDataConv));
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus DepthwiseConvEval(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
   TFLITE_DCHECK(node->builtin_data != nullptr);
 
@@ -143,7 +144,8 @@
 }  // namespace
 
 TFLMRegistration Register_DEPTHWISE_CONV_2D() {
-  return tflite::micro::RegisterOp(Init, DepthwiseConvPrepare, Eval);
+  return tflite::micro::RegisterOp(DepthwiseConvInit, DepthwiseConvPrepare,
+                                   DepthwiseConvEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/depthwise_conv.h b/tensorflow/lite/micro/kernels/depthwise_conv.h
index d8cc78d..5f2d87e 100644
--- a/tensorflow/lite/micro/kernels/depthwise_conv.h
+++ b/tensorflow/lite/micro/kernels/depthwise_conv.h
@@ -1,4 +1,4 @@
-/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -65,6 +65,11 @@
 // implementations.
 TFLMRegistration Register_DEPTHWISE_CONV_2D_INT16();
 
+// Returns a TFLMRegistration struct for kernel variant that only supports
+// int8 activations and int4 weights and uses the latency optimized
+// implementations.
+TFLMRegistration Register_DEPTHWISE_CONV_2D_INT4();
+
 #else
 inline TFLMRegistration Register_DEPTHWISE_CONV_2D_INT8() {
   return Register_DEPTHWISE_CONV_2D();
@@ -73,6 +78,11 @@
 inline TFLMRegistration Register_DEPTHWISE_CONV_2D_INT16() {
   return Register_DEPTHWISE_CONV_2D();
 }
+
+inline TFLMRegistration Register_DEPTHWISE_CONV_2D_INT4() {
+  return Register_DEPTHWISE_CONV_2D();
+}
+
 #endif
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/detection_postprocess.cc b/tensorflow/lite/micro/kernels/detection_postprocess.cc
index e807f35..fa2d4ca 100644
--- a/tensorflow/lite/micro/kernels/detection_postprocess.cc
+++ b/tensorflow/lite/micro/kernels/detection_postprocess.cc
@@ -117,7 +117,8 @@
   TfLiteQuantizationParams input_anchors;
 };
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* DetectionPostProcessInit(TfLiteContext* context, const char* buffer,
+                               size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   OpData* op_data = nullptr;
 
@@ -149,7 +150,8 @@
   return op_data;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus DetectionPostProcessPrepare(TfLiteContext* context,
+                                         TfLiteNode* node) {
   auto* op_data = static_cast<OpData*>(node->user_data);
 
   MicroContext* micro_context = GetMicroContext(context);
@@ -774,7 +776,8 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus DetectionPostProcessEval(TfLiteContext* context,
+                                      TfLiteNode* node) {
   TF_LITE_ENSURE(context, (kBatchSize == 1));
   auto* op_data = static_cast<OpData*>(node->user_data);
 
@@ -800,7 +803,9 @@
 }  // namespace
 
 TFLMRegistration* Register_DETECTION_POSTPROCESS() {
-  static TFLMRegistration r = tflite::micro::RegisterOp(Init, Prepare, Eval);
+  static TFLMRegistration r = tflite::micro::RegisterOp(
+      DetectionPostProcessInit, DetectionPostProcessPrepare,
+      DetectionPostProcessEval);
   return &r;
 }
 
diff --git a/tensorflow/lite/micro/kernels/div.cc b/tensorflow/lite/micro/kernels/div.cc
index cc90e22..a80b3f2 100644
--- a/tensorflow/lite/micro/kernels/div.cc
+++ b/tensorflow/lite/micro/kernels/div.cc
@@ -1,4 +1,4 @@
-/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -65,12 +65,12 @@
   return kTfLiteOk;
 }
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* DivInit(TfLiteContext* context, const char* buffer, size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context, sizeof(OpDataDiv));
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus DivPrepare(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
   TFLITE_DCHECK(node->builtin_data != nullptr);
 
@@ -91,12 +91,21 @@
   TF_LITE_ENSURE_STATUS(
       CalculateOpDataDiv(context, input1, input2, output, params, data));
 
+  if (output->type == kTfLiteInt32) {
+    // Only support int32 unquantized DIV for now.
+    TF_LITE_ENSURE_EQ(context, input1->quantization.type,
+                      kTfLiteNoQuantization);
+    TF_LITE_ENSURE_EQ(context, input2->quantization.type,
+                      kTfLiteNoQuantization);
+  }
+
   micro_context->DeallocateTempTfLiteTensor(input1);
   micro_context->DeallocateTempTfLiteTensor(input2);
   micro_context->DeallocateTempTfLiteTensor(output);
   return kTfLiteOk;
 }
 
+template <typename T>
 void EvalDiv(TfLiteContext* context, TfLiteNode* node, TfLiteDivParams* params,
              const OpDataDiv* data, const TfLiteEvalTensor* input1,
              const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) {
@@ -120,9 +129,9 @@
       tflite::micro::GetTensorShape(input2), &op_params);
 
   if (requires_broadcast) {
-    TF_LITE_DIV(reference_ops, BroadcastDivSlow, float);
+    TF_LITE_DIV(reference_ops, BroadcastDivSlow, T);
   } else {
-    TF_LITE_DIV(reference_ops, Div, float);
+    TF_LITE_DIV(reference_ops, Div, T);
   }
 #undef TF_LITE_DIV
 }
@@ -170,7 +179,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus DivEval(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->builtin_data != nullptr);
   auto* params = static_cast<TfLiteDivParams*>(node->builtin_data);
   TFLITE_DCHECK(node->user_data != nullptr);
@@ -184,13 +193,15 @@
       tflite::micro::GetEvalOutput(context, node, kOutputTensor);
 
   if (output->type == kTfLiteFloat32) {
-    EvalDiv(context, node, params, data, input1, input2, output);
+    EvalDiv<float>(context, node, params, data, input1, input2, output);
+  } else if (output->type == kTfLiteInt32) {
+    EvalDiv<int32_t>(context, node, params, data, input1, input2, output);
   } else if (output->type == kTfLiteInt8) {
     TF_LITE_ENSURE_OK(context, EvalQuantized(context, node, params, data,
                                              input1, input2, output));
   } else {
     MicroPrintf(
-        "DIV only supports FLOAT32, quantized INT8 "
+        "DIV only supports FLOAT32, INT32, quantized INT8 "
         "now, got type %s (%d).",
         TfLiteTypeGetName(output->type), output->type);
     return kTfLiteError;
@@ -202,7 +213,7 @@
 }  // namespace
 
 TFLMRegistration Register_DIV() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(DivInit, DivPrepare, DivEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/div_test.cc b/tensorflow/lite/micro/kernels/div_test.cc
index e020255..ef35d0c 100644
--- a/tensorflow/lite/micro/kernels/div_test.cc
+++ b/tensorflow/lite/micro/kernels/div_test.cc
@@ -1,4 +1,4 @@
-/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -374,4 +374,65 @@
                                               kTfLiteActNone, &params);
 }
 
+TF_LITE_MICRO_TEST(IntegerDivOpTestNoActivation) {
+  int kDims[] = {4, 1, 2, 2, 1};
+  constexpr int32_t kInput1[] = {-2, 2, -15, 8};
+  constexpr int32_t kInput2[] = {5, -2, -3, 5};
+  constexpr int32_t kExpect[] = {0, -1, 5, 1};
+  constexpr int kOutputCount = std::extent<decltype(kExpect)>::value;
+  int32_t output_data[kOutputCount];
+
+  tflite::testing::TestDiv(kDims, kInput1, kDims, kInput2, kDims, kExpect,
+                           output_data, kTfLiteActNone);
+}
+
+TF_LITE_MICRO_TEST(IntegerDivOpTestActivationRELU_N1_TO_1) {
+  int kDims[] = {4, 1, 2, 2, 1};
+  constexpr int32_t kInput1[] = {-2, 2, -12, 8};
+  constexpr int32_t kInput2[] = {1, 2, -15, 5};
+  constexpr int32_t kExpect[] = {-1, 1, 0, 1};
+  constexpr int kOutputCount = std::extent<decltype(kExpect)>::value;
+  int32_t output_data[kOutputCount];
+
+  tflite::testing::TestDiv(kDims, kInput1, kDims, kInput2, kDims, kExpect,
+                           output_data, kTfLiteActReluN1To1);
+}
+
+TF_LITE_MICRO_TEST(IntegerDivOpTestVariousInputShapes) {
+  int kShape1[] = {1, 6};
+  int kShape2[] = {2, 2, 3};
+  int kShape3[] = {3, 2, 1, 3};
+  int kShape4[] = {4, 1, 3, 1, 2};
+  int* kDims[] = {kShape1, kShape2, kShape3, kShape4};
+  constexpr int kDimsCount = std::extent<decltype(kDims)>::value;
+
+  constexpr int32_t kInput1[] = {-20, 2, 3, 8, 11, -20};
+  constexpr int32_t kInput2[] = {1, 2, 6, 5, -11, -1};
+  constexpr int32_t kExpect[] = {-20, 1, 0, 1, -1, 20};
+  constexpr int kOutputCount = std::extent<decltype(kExpect)>::value;
+  int32_t output_data[kOutputCount];
+
+  tflite::testing::TestDivMultiShape(kDims, kDimsCount, kInput1, kInput2,
+                                     kExpect, output_data, kTfLiteActNone);
+}
+
+TF_LITE_MICRO_TEST(IntegerDivOpTestWithBroadcast) {
+  int kShape1[] = {1, 8};
+  int kShape2[] = {2, 2, 4};
+  int kShape3[] = {3, 2, 1, 4};
+  int kShape4[] = {4, 1, 4, 1, 2};
+  int kShape5[] = {5, 1, 2, 1, 2, 2};
+  int* kDims[] = {kShape1, kShape2, kShape3, kShape4, kShape5};
+  constexpr int kDimsCount = std::extent<decltype(kDims)>::value;
+
+  constexpr int32_t kInput1[] = {-20, 21, 7, 8, 11, -123, -42, -48};
+  constexpr int32_t kInput2[] = {3};
+  constexpr int32_t kExpect[] = {-6, 7, 2, 2, 3, -41, -14, -16};
+  constexpr int kOutputCount = std::extent<decltype(kExpect)>::value;
+  int32_t output_data[kOutputCount];
+
+  tflite::testing::TestDivMultiBroadcast(kDims, kDimsCount, kInput1, kInput2,
+                                         kExpect, output_data, kTfLiteActNone);
+}
+
 TF_LITE_MICRO_TESTS_END
diff --git a/tensorflow/lite/micro/kernels/embedding_lookup.cc b/tensorflow/lite/micro/kernels/embedding_lookup.cc
index 77ac0e0..6a4be87 100644
--- a/tensorflow/lite/micro/kernels/embedding_lookup.cc
+++ b/tensorflow/lite/micro/kernels/embedding_lookup.cc
@@ -65,7 +65,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus EmbeddingLookUpPrepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
 
@@ -178,7 +178,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus EmbeddingLookUpEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* lookup =
       tflite::micro::GetEvalInput(context, node, kInputTensor_0);
   const TfLiteEvalTensor* value =
@@ -207,7 +207,8 @@
 }  // namespace
 
 TFLMRegistration Register_EMBEDDING_LOOKUP() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, EmbeddingLookUpPrepare,
+                                   EmbeddingLookUpEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/exp.cc b/tensorflow/lite/micro/kernels/exp.cc
index 1a2e00c..8d1da8f 100644
--- a/tensorflow/lite/micro/kernels/exp.cc
+++ b/tensorflow/lite/micro/kernels/exp.cc
@@ -27,7 +27,7 @@
 constexpr int kInputTensor = 0;
 constexpr int kOutputTensor = 0;
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ExpPrepare(TfLiteContext* context, TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
 
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
@@ -51,7 +51,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ExpEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kInputTensor);
   TfLiteEvalTensor* output =
@@ -73,7 +73,7 @@
 }  // namespace
 
 TFLMRegistration Register_EXP() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, ExpPrepare, ExpEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/expand_dims.cc b/tensorflow/lite/micro/kernels/expand_dims.cc
index 0c4c6ff..d47b42c 100644
--- a/tensorflow/lite/micro/kernels/expand_dims.cc
+++ b/tensorflow/lite/micro/kernels/expand_dims.cc
@@ -13,6 +13,8 @@
 limitations under the License.
 ==============================================================================*/
 
+#include <cstdint>
+
 #include "tensorflow/lite/c/common.h"
 #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
 #include "tensorflow/lite/kernels/kernel_util.h"
@@ -82,7 +84,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ExpandDimsPrepare(TfLiteContext* context, TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
 
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
@@ -116,7 +118,7 @@
   }
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ExpandDimsEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kInputTensor);
   TfLiteEvalTensor* output =
@@ -128,13 +130,18 @@
       memCopyN(tflite::micro::GetTensorData<float>(output),
                tflite::micro::GetTensorData<float>(input), flat_size);
     } break;
+    case kTfLiteInt16: {
+      memCopyN(tflite::micro::GetTensorData<int16_t>(output),
+               tflite::micro::GetTensorData<int16_t>(input), flat_size);
+    } break;
     case kTfLiteInt8: {
       memCopyN(tflite::micro::GetTensorData<int8_t>(output),
                tflite::micro::GetTensorData<int8_t>(input), flat_size);
     } break;
     default:
       MicroPrintf(
-          "Expand_Dims only currently supports int8 and float32, got %d.",
+          "Expand_Dims only currently supports int8, int16 and float32, got "
+          "%d.",
           input->type);
       return kTfLiteError;
   }
@@ -143,7 +150,7 @@
 }  // namespace
 
 TFLMRegistration Register_EXPAND_DIMS() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, ExpandDimsPrepare, ExpandDimsEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/expand_dims_test.cc b/tensorflow/lite/micro/kernels/expand_dims_test.cc
index d8e217e..39a83b5 100644
--- a/tensorflow/lite/micro/kernels/expand_dims_test.cc
+++ b/tensorflow/lite/micro/kernels/expand_dims_test.cc
@@ -13,6 +13,8 @@
 limitations under the License.
 ==============================================================================*/
 
+#include <cstdint>
+
 #include "tensorflow/lite/c/builtin_op_data.h"
 #include "tensorflow/lite/c/common.h"
 #include "tensorflow/lite/micro/kernels/kernel_runner.h"
@@ -138,6 +140,20 @@
                                           golden_data, output_data);
 }
 
+TF_LITE_MICRO_TEST(ExpandDimsPositiveAxisTest3) {
+  int16_t output_data[6];
+  int input_dims[] = {3, 3, 1, 2};
+  const int16_t input_data[] = {-1, 1, 2, -2, 0, 3};
+  const int16_t golden_data[] = {-1, 1, 2, -2, 0, 3};
+  int axis_dims[] = {1, 1};
+  const int32_t axis_data[] = {3};
+  int golden_dims[] = {1, 3, 1, 2};
+  int output_dims[] = {4, 3, 1, 2, 1};
+  tflite::testing::TestExpandDims<int16_t>(input_dims, input_data, axis_dims,
+                                           axis_data, golden_dims, output_dims,
+                                           golden_data, output_data);
+}
+
 TF_LITE_MICRO_TEST(ExpandDimsNegativeAxisTest4) {
   int8_t output_data[6];
   int input_dims[] = {3, 3, 1, 2};
diff --git a/tensorflow/lite/micro/kernels/fill.cc b/tensorflow/lite/micro/kernels/fill.cc
index b1b366e..1486fcb 100644
--- a/tensorflow/lite/micro/kernels/fill.cc
+++ b/tensorflow/lite/micro/kernels/fill.cc
@@ -64,7 +64,7 @@
 constexpr int kValueTensor = 1;
 constexpr int kOutputTensor = 0;
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FillPrepare(TfLiteContext* context, TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
 
   // Ensure inputs and outputs exist.
@@ -107,7 +107,7 @@
       micro::GetTensorShape(output), micro::GetTensorData<T>(output));
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FillEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* value =
       micro::GetEvalInput(context, node, kValueTensor);
   TfLiteEvalTensor* output = micro::GetEvalOutput(context, node, kOutputTensor);
@@ -134,7 +134,7 @@
 }  // namespace
 
 TFLMRegistration Register_FILL() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, FillPrepare, FillEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/floor.cc b/tensorflow/lite/micro/kernels/floor.cc
index 094c8b5..f92b7e0 100644
--- a/tensorflow/lite/micro/kernels/floor.cc
+++ b/tensorflow/lite/micro/kernels/floor.cc
@@ -26,7 +26,7 @@
 constexpr int kInputTensor = 0;
 constexpr int kOutputTensor = 0;
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FloorEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kInputTensor);
   TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
@@ -42,7 +42,7 @@
 }  // namespace
 
 TFLMRegistration Register_FLOOR() {
-  return tflite::micro::RegisterOp(nullptr, nullptr, Eval);
+  return tflite::micro::RegisterOp(nullptr, nullptr, FloorEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/floor_div.cc b/tensorflow/lite/micro/kernels/floor_div.cc
index 5c00808..9adf614 100644
--- a/tensorflow/lite/micro/kernels/floor_div.cc
+++ b/tensorflow/lite/micro/kernels/floor_div.cc
@@ -57,11 +57,11 @@
   return kTfLiteOk;
 }
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* FloorDivInit(TfLiteContext* context, const char* buffer, size_t length) {
   return nullptr;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FloorDivPrepare(TfLiteContext* context, TfLiteNode* node) {
   return CalculateOpData(context, node);
 }
 
@@ -101,7 +101,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FloorDivEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input1 =
       tflite::micro::GetEvalInput(context, node, kInputTensor1);
   const TfLiteEvalTensor* input2 =
@@ -124,7 +124,7 @@
 }  // namespace
 
 TFLMRegistration Register_FLOOR_DIV() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(FloorDivInit, FloorDivPrepare, FloorDivEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/floor_mod.cc b/tensorflow/lite/micro/kernels/floor_mod.cc
index f459892..da2a7c9 100644
--- a/tensorflow/lite/micro/kernels/floor_mod.cc
+++ b/tensorflow/lite/micro/kernels/floor_mod.cc
@@ -62,11 +62,11 @@
   return kTfLiteOk;
 }
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* FloorModInit(TfLiteContext* context, const char* buffer, size_t length) {
   return nullptr;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FloorModPrepare(TfLiteContext* context, TfLiteNode* node) {
   return CalculateOpData(context, node);
 }
 
@@ -96,7 +96,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FloorModEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input1 =
       tflite::micro::GetEvalInput(context, node, kInputTensor1);
   const TfLiteEvalTensor* input2 =
@@ -122,7 +122,7 @@
 }  // namespace
 
 TFLMRegistration Register_FLOOR_MOD() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(FloorModInit, FloorModPrepare, FloorModEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/fully_connected.cc b/tensorflow/lite/micro/kernels/fully_connected.cc
index f732b29..65c8379 100644
--- a/tensorflow/lite/micro/kernels/fully_connected.cc
+++ b/tensorflow/lite/micro/kernels/fully_connected.cc
@@ -26,13 +26,14 @@
 namespace tflite {
 namespace {
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* FullyConnectedInit(TfLiteContext* context, const char* buffer,
+                         size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context,
                                            sizeof(OpDataFullyConnected));
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FullyConnectedPrepare(TfLiteContext* context, TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
 
   TFLITE_DCHECK(node->user_data != nullptr);
@@ -87,7 +88,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FullyConnectedEval(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->builtin_data != nullptr);
   const auto* params =
       static_cast<const TfLiteFullyConnectedParams*>(node->builtin_data);
@@ -200,7 +201,12 @@
 }  // namespace
 
 TFLMRegistration Register_FULLY_CONNECTED() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(FullyConnectedInit, FullyConnectedPrepare,
+                                   FullyConnectedEval);
+}
+
+TFLMInferenceRegistration RegisterInference_FULLY_CONNECTED() {
+  return tflite::micro::RegisterOp(FullyConnectedEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/fully_connected.h b/tensorflow/lite/micro/kernels/fully_connected.h
index 3fa6060..8308838 100644
--- a/tensorflow/lite/micro/kernels/fully_connected.h
+++ b/tensorflow/lite/micro/kernels/fully_connected.h
@@ -1,4 +1,4 @@
-/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -95,6 +95,10 @@
 // int16.
 TFLMRegistration Register_FULLY_CONNECTED_INT16();
 
+// Returns a TFLMRegistration struct for kernel variant that only supports
+// int8 and int4 packed kernels.
+TFLMRegistration Register_FULLY_CONNECTED_INT4();
+
 #else
 // Note that while this block gets used for both reference and optimized kernels
 // that do not have any specialized implementations, the only goal here is to
@@ -105,6 +109,10 @@
   return Register_FULLY_CONNECTED();
 }
 
+inline TFLMRegistration Register_FULLY_CONNECTED_INT4() {
+  return Register_FULLY_CONNECTED();
+}
+
 #endif
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/fully_connected_common.cc b/tensorflow/lite/micro/kernels/fully_connected_common.cc
index 5a8d312..66f8787 100644
--- a/tensorflow/lite/micro/kernels/fully_connected_common.cc
+++ b/tensorflow/lite/micro/kernels/fully_connected_common.cc
@@ -57,6 +57,24 @@
     TfLiteType data_type, const TfLiteTensor* input, const TfLiteTensor* filter,
     const TfLiteTensor* bias, TfLiteTensor* output,
     OpDataFullyConnected* data) {
+  // TODO(b/324385802): Support per-channel quantization for FullyConnected.
+  // If you have hit this failure message, you will need to disable this
+  // behavior. This can be done by setting the following flag to true:
+  // TfLiteConverter._experimental_disable_per_channel_quantization_for_dense_layers
+  // https://github.com/tensorflow/tensorflow/blob/377f47694fa790e98db6665b9adecde00b5e0d68/tensorflow/lite/python/lite.py#L674
+  if (filter->quantization.type == kTfLiteAffineQuantization &&
+      filter->quantization.params != nullptr) {
+    TfLiteAffineQuantization* affine_quantization =
+        reinterpret_cast<TfLiteAffineQuantization*>(
+            filter->quantization.params);
+    TF_LITE_ENSURE(context, affine_quantization->scale);
+    TF_LITE_ENSURE_MSG(
+        context, affine_quantization->scale->size == 1,
+        "FullyConnected per-channel quantization not yet supported. Please set "
+        "converter._experimental_disable_per_channel_quantization_for_dense_"
+        "layers = True.");
+  }
+
   if (data_type != kTfLiteFloat32) {
     double real_multiplier = 0.0;
     TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler(
diff --git a/tensorflow/lite/micro/kernels/fully_connected_test.cc b/tensorflow/lite/micro/kernels/fully_connected_test.cc
index 2e9206a..2ad1320 100644
--- a/tensorflow/lite/micro/kernels/fully_connected_test.cc
+++ b/tensorflow/lite/micro/kernels/fully_connected_test.cc
@@ -247,7 +247,8 @@
     const TfLiteFusedActivation activation, const float tolerance,
     const int output_len, const T* golden, T* output_data) {
   TfLiteFullyConnectedParams builtin_data = {
-      activation, kTfLiteFullyConnectedWeightsFormatDefault, false, false};
+      activation, kTfLiteFullyConnectedWeightsFormatDefault, false, false,
+      kTfLiteNoType};
 
   // Avoid variable length array warning.
   constexpr int inputs_array_len = 4;
diff --git a/tensorflow/lite/micro/kernels/gather.cc b/tensorflow/lite/micro/kernels/gather.cc
index 9955601..a0af4c0 100644
--- a/tensorflow/lite/micro/kernels/gather.cc
+++ b/tensorflow/lite/micro/kernels/gather.cc
@@ -97,7 +97,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus GatherPrepare(TfLiteContext* context, TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
 
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
@@ -188,7 +188,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus GatherEval(TfLiteContext* context, TfLiteNode* node) {
   const auto* params =
       reinterpret_cast<const TfLiteGatherParams*>(node->builtin_data);
   const TfLiteEvalTensor* input =
@@ -218,7 +218,7 @@
 }  // namespace
 
 TFLMRegistration Register_GATHER() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, GatherPrepare, GatherEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/gather_nd.cc b/tensorflow/lite/micro/kernels/gather_nd.cc
index 3774ddd..d01af7c 100644
--- a/tensorflow/lite/micro/kernels/gather_nd.cc
+++ b/tensorflow/lite/micro/kernels/gather_nd.cc
@@ -28,7 +28,7 @@
 constexpr int kOutputTensor = 0;
 constexpr int MAX_INDICES_ND = 5;
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus GatherNdPrepare(TfLiteContext* context, TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
 
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
@@ -185,7 +185,7 @@
   return status;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus GatherNdEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* params =
       tflite::micro::GetEvalInput(context, node, kParams);
   const TfLiteEvalTensor* indices =
@@ -206,7 +206,7 @@
 }  // namespace
 
 TFLMRegistration Register_GATHER_ND() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, GatherNdPrepare, GatherNdEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/if.cc b/tensorflow/lite/micro/kernels/if.cc
index 9143c9c..029846b 100644
--- a/tensorflow/lite/micro/kernels/if.cc
+++ b/tensorflow/lite/micro/kernels/if.cc
@@ -38,12 +38,12 @@
   int else_subgraph_index;
 };
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* IfInit(TfLiteContext* context, const char* buffer, size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context, sizeof(OpData));
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus IfPrepare(TfLiteContext* context, TfLiteNode* node) {
   OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
   const auto* params =
       reinterpret_cast<const TfLiteIfParams*>(node->builtin_data);
@@ -67,7 +67,7 @@
   // passed to the branch subgraphs. Therefore, the number of subgraph inputs
   // will be the number of node inputs - 1.
   size_t num_inputs = node->inputs->size - 1;
-  size_t num_outputs = node->outputs->size;
+  size_t num_outputs = NumOutputs(node);
 
   MicroGraph& graph_info = micro_context->graph();
 
@@ -85,7 +85,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus IfEval(TfLiteContext* context, TfLiteNode* node) {
   const OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
 
   tflite::MicroContext* micro_context = tflite::GetMicroContext(context);
@@ -117,7 +117,7 @@
 }  // namespace.
 
 TFLMRegistration Register_IF() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(IfInit, IfPrepare, IfEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/kernel_runner.cc b/tensorflow/lite/micro/kernels/kernel_runner.cc
index d5112a1..79824ef 100644
--- a/tensorflow/lite/micro/kernels/kernel_runner.cc
+++ b/tensorflow/lite/micro/kernels/kernel_runner.cc
@@ -1,4 +1,4 @@
-/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -18,7 +18,6 @@
 #include "tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h"
 #include "tensorflow/lite/micro/micro_arena_constants.h"
 #include "tensorflow/lite/micro/micro_log.h"
-#include "tensorflow/lite/micro/test_helpers.h"
 
 namespace tflite {
 namespace micro {
@@ -37,12 +36,23 @@
 KernelRunner::KernelRunner(const TFLMRegistration& registration,
                            TfLiteTensor* tensors, int tensors_size,
                            TfLiteIntArray* inputs, TfLiteIntArray* outputs,
-                           void* builtin_data, TfLiteIntArray* intermediates)
+                           const void* builtin_data,
+                           TfLiteIntArray* intermediates
+#ifdef USE_TFLM_COMPRESSION
+                           ,
+                           const CompressedTensorList* compressed_tensors
+#endif  // USE_TFLM_COMPRESSION
+                           )
     : registration_(registration),
       allocator_(SingleArenaBufferAllocator::Create(kKernelRunnerBuffer_,
                                                     kKernelRunnerBufferSize_)),
       mock_micro_graph_(allocator_),
-      fake_micro_context_(tensors, allocator_, &mock_micro_graph_) {
+      fake_micro_context_(tensors, allocator_, &mock_micro_graph_
+#ifdef USE_TFLM_COMPRESSION
+                          ,
+                          compressed_tensors
+#endif  // USE_TFLM_COMPRESSION
+      ) {
   // Prepare TfLiteContext:
   context_.impl_ = static_cast<void*>(&fake_micro_context_);
   context_.ReportError = MicroContextReportOpError;
@@ -57,7 +67,7 @@
   // Prepare TfLiteNode:
   node_.inputs = inputs;
   node_.outputs = outputs;
-  node_.builtin_data = builtin_data;
+  node_.builtin_data = const_cast<void*>(builtin_data);
   node_.intermediates = intermediates;
 }
 
diff --git a/tensorflow/lite/micro/kernels/kernel_runner.h b/tensorflow/lite/micro/kernels/kernel_runner.h
index d617c44..8dbd7f8 100644
--- a/tensorflow/lite/micro/kernels/kernel_runner.h
+++ b/tensorflow/lite/micro/kernels/kernel_runner.h
@@ -1,4 +1,4 @@
-/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -35,8 +35,13 @@
  public:
   KernelRunner(const TFLMRegistration& registration, TfLiteTensor* tensors,
                int tensors_size, TfLiteIntArray* inputs,
-               TfLiteIntArray* outputs, void* builtin_data,
-               TfLiteIntArray* intermediates = nullptr);
+               TfLiteIntArray* outputs, const void* builtin_data,
+               TfLiteIntArray* intermediates = nullptr
+#ifdef USE_TFLM_COMPRESSION
+               ,
+               const CompressedTensorList* compressed_tensors = nullptr
+#endif  // USE_TFLM_COMPRESSION
+  );
 
   // Calls init and prepare on the kernel (i.e. TFLMRegistration) struct.
   // Any exceptions will be DebugLog'd and returned as a status code.
diff --git a/tensorflow/lite/micro/kernels/kernel_util.cc b/tensorflow/lite/micro/kernels/kernel_util.cc
index ffffa08..a509f5d 100644
--- a/tensorflow/lite/micro/kernels/kernel_util.cc
+++ b/tensorflow/lite/micro/kernels/kernel_util.cc
@@ -53,6 +53,15 @@
           /*custom_name=*/nullptr};
 }
 
+TFLMInferenceRegistration RegisterOp(
+    TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node),
+    void (*reset)(TfLiteContext* context, void* buffer)) {
+  return {
+      /*invoke=*/invoke,
+      /*reset*/ reset,
+  };
+}
+
 // Returns a mutable tensor for a given input index. is_variable must be checked
 // during prepare when the full TfLiteTensor is available.
 TfLiteEvalTensor* GetMutableEvalInput(const TfLiteContext* context,
@@ -243,6 +252,7 @@
                                             TfLiteNode* node,
                                             MicroGraph* graph_info,
                                             int subgraph_idx) {
+  if (graph_info->NumSubgraphOutputs(subgraph_idx) == 0) return kTfLiteOk;
   TF_LITE_ENSURE(context, static_cast<size_t>(node->outputs->size) ==
                               graph_info->NumSubgraphOutputs(subgraph_idx));
   for (int i = 0; i < node->outputs->size; i++) {
diff --git a/tensorflow/lite/micro/kernels/kernel_util.h b/tensorflow/lite/micro/kernels/kernel_util.h
index 080a0b3..977ed95 100644
--- a/tensorflow/lite/micro/kernels/kernel_util.h
+++ b/tensorflow/lite/micro/kernels/kernel_util.h
@@ -1,4 +1,4 @@
-/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -35,6 +35,10 @@
     void (*free)(TfLiteContext* context, void* buffer) = nullptr,
     void (*reset)(TfLiteContext* context, void* buffer) = nullptr);
 
+TFLMInferenceRegistration RegisterOp(
+    TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node),
+    void (*reset)(TfLiteContext* context, void* buffer) = nullptr);
+
 // Prints out n bytes in a int8_t buffer as hex
 void PrintNBytes(const int8_t* tensor_data, int n_bytes,
                  const char* prefix = nullptr);
@@ -87,6 +91,31 @@
                            : reinterpret_cast<const T*>(tensor->data.raw);
 }
 
+#ifdef USE_TFLM_COMPRESSION
+
+// Overloads existing GetTensorData. If not compressed, this will return
+// tensor->data.
+//
+// TODO(ddavis-2015): make micro_context a const pointer
+template <typename T>
+const T* GetTensorData(MicroContext* micro_context,
+                       const TfLiteEvalTensor* tensor,
+                       const CompressionTensorData* compression_data,
+                       int scratch_buffer_handle) {
+  if (tensor == nullptr) {
+    return nullptr;
+  }
+  if (compression_data == nullptr) {
+    return reinterpret_cast<const T*>(tensor->data.data);
+  }
+
+  void* uncompressed_data = micro_context->DecompressTensorToScratchBuffer(
+      *tensor, *compression_data, scratch_buffer_handle);
+  return reinterpret_cast<const T*>(uncompressed_data);
+}
+
+#endif  // USE_TFLM_COMPRESSION
+
 // Returns the shape of a TfLiteEvalTensor struct.
 const RuntimeShape GetTensorShape(const TfLiteEvalTensor* tensor);
 
diff --git a/tensorflow/lite/micro/kernels/l2norm.cc b/tensorflow/lite/micro/kernels/l2norm.cc
index fa3601b..bde38de 100644
--- a/tensorflow/lite/micro/kernels/l2norm.cc
+++ b/tensorflow/lite/micro/kernels/l2norm.cc
@@ -33,7 +33,7 @@
 constexpr int kInputTensor = 0;
 constexpr int kOutputTensor = 0;
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus L2NormPrepare(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
   TFLITE_DCHECK(node->builtin_data != nullptr);
 
@@ -72,13 +72,13 @@
   return kTfLiteOk;
 }
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* L2NormInit(TfLiteContext* context, const char* buffer, size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context,
                                            sizeof(L2NormalizationParams));
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus L2NormEval(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
   const L2NormalizationParams& data =
       *(static_cast<const L2NormalizationParams*>(node->user_data));
@@ -132,7 +132,7 @@
 }  // namespace
 
 TFLMRegistration Register_L2NORM_REF() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(L2NormInit, L2NormPrepare, L2NormEval);
 }
 
 TFLMRegistration Register_L2_NORMALIZATION() { return Register_L2NORM_REF(); }
diff --git a/tensorflow/lite/micro/kernels/lstm_eval_test.cc b/tensorflow/lite/micro/kernels/lstm_eval_test.cc
index 53c0d7c..eaba2c4 100644
--- a/tensorflow/lite/micro/kernels/lstm_eval_test.cc
+++ b/tensorflow/lite/micro/kernels/lstm_eval_test.cc
@@ -454,6 +454,6 @@
                                        cell_state_tolerance,
                                        int16_node_contents);
 }
-
 #endif  // !defined(XTENSA)
+
 TF_LITE_MICRO_TESTS_END
diff --git a/tensorflow/lite/micro/kernels/micro_ops.h b/tensorflow/lite/micro/kernels/micro_ops.h
index 5bffa09..2e33a67 100644
--- a/tensorflow/lite/micro/kernels/micro_ops.h
+++ b/tensorflow/lite/micro/kernels/micro_ops.h
@@ -40,6 +40,7 @@
 TFLMRegistration Register_ARG_MIN();
 TFLMRegistration Register_ASSIGN_VARIABLE();
 TFLMRegistration Register_AVERAGE_POOL_2D();
+TFLMRegistration Register_BATCH_MATMUL();
 TFLMRegistration Register_BATCH_TO_SPACE_ND();
 TFLMRegistration Register_BROADCAST_ARGS();
 TFLMRegistration Register_BROADCAST_TO();
@@ -133,6 +134,9 @@
 TFLMRegistration Register_WHILE();
 TFLMRegistration Register_ZEROS_LIKE();
 
+// TODO(b/295174388): Add the rest of inference only registration functions.
+TFLMInferenceRegistration RegisterInference_FULLY_CONNECTED();
+
 // TODO(b/160234179): Change custom OPs to also return by value.
 namespace tflm_signal {
 TFLMRegistration* Register_DELAY();
@@ -144,6 +148,7 @@
 TFLMRegistration* Register_ENERGY();
 TFLMRegistration* Register_FRAMER();
 TFLMRegistration* Register_OVERLAP_ADD();
+TFLMRegistration* Register_PCAN();
 TFLMRegistration* Register_STACKER();
 TFLMRegistration* Register_WINDOW();
 }  // namespace tflm_signal
diff --git a/tensorflow/lite/micro/kernels/mirror_pad.cc b/tensorflow/lite/micro/kernels/mirror_pad.cc
index 4cbaf52..aa94e1b 100644
--- a/tensorflow/lite/micro/kernels/mirror_pad.cc
+++ b/tensorflow/lite/micro/kernels/mirror_pad.cc
@@ -100,7 +100,7 @@
   }
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus MirrorPadEval(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
   TfLiteStatus status = kTfLiteOk;
   const OpDataMirrorPad* data =
@@ -161,12 +161,12 @@
   return status;
 }
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* MirrorPadInit(TfLiteContext* context, const char* buffer, size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context, sizeof(OpDataMirrorPad));
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus MirrorPadPrepare(TfLiteContext* context, TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
 
   TFLITE_DCHECK(node->user_data != nullptr);
@@ -209,7 +209,8 @@
 }  // namespace
 
 TFLMRegistration Register_MIRROR_PAD() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(MirrorPadInit, MirrorPadPrepare,
+                                   MirrorPadEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/neg.cc b/tensorflow/lite/micro/kernels/neg.cc
index c80a809..a76ac01 100644
--- a/tensorflow/lite/micro/kernels/neg.cc
+++ b/tensorflow/lite/micro/kernels/neg.cc
@@ -27,7 +27,7 @@
 constexpr int kInputTensor = 0;
 constexpr int kOutputTensor = 0;
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus NegEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kInputTensor);
   TfLiteEvalTensor* output =
@@ -51,7 +51,7 @@
 }  // namespace
 
 TFLMRegistration Register_NEG() {
-  return tflite::micro::RegisterOp(nullptr, nullptr, Eval);
+  return tflite::micro::RegisterOp(nullptr, nullptr, NegEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/pack.cc b/tensorflow/lite/micro/kernels/pack.cc
index 5ee2759..0cfd91b 100644
--- a/tensorflow/lite/micro/kernels/pack.cc
+++ b/tensorflow/lite/micro/kernels/pack.cc
@@ -69,7 +69,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus PackEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLitePackParams* data =
       reinterpret_cast<TfLitePackParams*>(node->builtin_data);
 
@@ -109,7 +109,7 @@
 }  // namespace
 
 TFLMRegistration Register_PACK() {
-  return tflite::micro::RegisterOp(nullptr, nullptr, Eval);
+  return tflite::micro::RegisterOp(nullptr, nullptr, PackEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/pad.cc b/tensorflow/lite/micro/kernels/pad.cc
index f8d40ad..29f08fa 100644
--- a/tensorflow/lite/micro/kernels/pad.cc
+++ b/tensorflow/lite/micro/kernels/pad.cc
@@ -32,12 +32,12 @@
   int32_t output_zero_point;
 };
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* PadInit(TfLiteContext* context, const char* buffer, size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context, sizeof(OpData));
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus PadEval(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
   const OpData* data = static_cast<const OpData*>(node->user_data);
 
@@ -218,12 +218,12 @@
 }
 
 TFLMRegistration Register_PAD() {
-  return tflite::micro::RegisterOp(Init, PadPrepare, Eval);
+  return tflite::micro::RegisterOp(PadInit, PadPrepare, PadEval);
 }
 
 // Also register Pad as PadV2.
 TFLMRegistration Register_PADV2() {
-  return tflite::micro::RegisterOp(Init, PadPrepare, Eval);
+  return tflite::micro::RegisterOp(PadInit, PadPrepare, PadEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/pooling.cc b/tensorflow/lite/micro/kernels/pooling.cc
index e03f72e..c178d9b 100644
--- a/tensorflow/lite/micro/kernels/pooling.cc
+++ b/tensorflow/lite/micro/kernels/pooling.cc
@@ -91,7 +91,7 @@
   return kTfLiteOk;
 }
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* PoolInit(TfLiteContext* context, const char* buffer, size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context, sizeof(OpDataPooling));
 }
@@ -99,11 +99,11 @@
 }  // namespace
 
 TFLMRegistration Register_AVERAGE_POOL_2D() {
-  return tflite::micro::RegisterOp(Init, PoolingPrepare, AverageEval);
+  return tflite::micro::RegisterOp(PoolInit, PoolingPrepare, AverageEval);
 }
 
 TFLMRegistration Register_MAX_POOL_2D() {
-  return tflite::micro::RegisterOp(Init, PoolingPrepare, MaxEval);
+  return tflite::micro::RegisterOp(PoolInit, PoolingPrepare, MaxEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/quantize.cc b/tensorflow/lite/micro/kernels/quantize.cc
index 1ac6942..ba11f19 100644
--- a/tensorflow/lite/micro/kernels/quantize.cc
+++ b/tensorflow/lite/micro/kernels/quantize.cc
@@ -25,7 +25,8 @@
 namespace tflite {
 namespace {
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* InitQuantizeReference(TfLiteContext* context, const char* buffer,
+                            size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context,
                                            sizeof(OpDataQuantizeReference));
@@ -34,8 +35,8 @@
 }  // namespace
 
 TFLMRegistration Register_QUANTIZE() {
-  return tflite::micro::RegisterOp(Init, PrepareQuantizeReference,
-                                   EvalQuantizeReference);
+  return tflite::micro::RegisterOp(
+      InitQuantizeReference, PrepareQuantizeReference, EvalQuantizeReference);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/reduce.cc b/tensorflow/lite/micro/kernels/reduce.cc
index ab24a82..a689d3b 100644
--- a/tensorflow/lite/micro/kernels/reduce.cc
+++ b/tensorflow/lite/micro/kernels/reduce.cc
@@ -29,7 +29,9 @@
 namespace tflite {
 
 void* InitReduce(TfLiteContext* context, const char* buffer, size_t length) {
-  return context->AllocatePersistentBuffer(context, sizeof(OpDataReduce));
+  void* op_data =
+      context->AllocatePersistentBuffer(context, sizeof(OpDataReduce));
+  return new (op_data) OpDataReduce();
 }
 
 TfLiteStatus PrepareMax(TfLiteContext* context, TfLiteNode* node) {
diff --git a/tensorflow/lite/micro/kernels/reduce_common.cc b/tensorflow/lite/micro/kernels/reduce_common.cc
index 0dab49c..2c1a92a 100644
--- a/tensorflow/lite/micro/kernels/reduce_common.cc
+++ b/tensorflow/lite/micro/kernels/reduce_common.cc
@@ -74,7 +74,9 @@
   TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0);
   TfLiteTensor* axis = micro_context->AllocateTempInputTensor(node, 1);
 
+  op_data->input_zp = input->params.zero_point;
   op_data->input_scale = input->params.scale;
+  op_data->output_zp = output->params.zero_point;
   op_data->output_scale = output->params.scale;
   op_data->num_output_elements = NumElements(output);
 
diff --git a/tensorflow/lite/micro/kernels/reshape.cc b/tensorflow/lite/micro/kernels/reshape.cc
index 4e6c530..5527798 100644
--- a/tensorflow/lite/micro/kernels/reshape.cc
+++ b/tensorflow/lite/micro/kernels/reshape.cc
@@ -29,7 +29,7 @@
 namespace tflite {
 namespace {
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus EvalReshapeReference(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kReshapeInputTensor);
   TfLiteEvalTensor* output =
@@ -52,7 +52,8 @@
 }  // namespace
 
 TFLMRegistration Register_RESHAPE() {
-  return tflite::micro::RegisterOp(nullptr, PrepareReshapeReference, Eval);
+  return tflite::micro::RegisterOp(nullptr, PrepareReshapeReference,
+                                   EvalReshapeReference);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/resize_bilinear.cc b/tensorflow/lite/micro/kernels/resize_bilinear.cc
index e701e03..ab54e81 100644
--- a/tensorflow/lite/micro/kernels/resize_bilinear.cc
+++ b/tensorflow/lite/micro/kernels/resize_bilinear.cc
@@ -30,7 +30,7 @@
 constexpr int kSizeTensor = 1;
 constexpr int kOutputTensor = 0;
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ResizeBilinearPrepare(TfLiteContext* context, TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
 
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
@@ -66,7 +66,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ResizeBilinearEval(TfLiteContext* context, TfLiteNode* node) {
   auto* params =
       reinterpret_cast<TfLiteResizeBilinearParams*>(node->builtin_data);
 
@@ -110,7 +110,8 @@
 }  // namespace
 
 TFLMRegistration Register_RESIZE_BILINEAR() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, ResizeBilinearPrepare,
+                                   ResizeBilinearEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc b/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc
index 46b6ea1..ef2d35d 100644
--- a/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc
+++ b/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc
@@ -31,7 +31,8 @@
 constexpr int kSizeTensor = 1;
 constexpr int kOutputTensor = 0;
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ResizeNearestNeighborPrepare(TfLiteContext* context,
+                                          TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
 
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
@@ -65,7 +66,8 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ResizeNearestNeighborEval(TfLiteContext* context,
+                                       TfLiteNode* node) {
   auto* params =
       reinterpret_cast<TfLiteResizeNearestNeighborParams*>(node->builtin_data);
 
@@ -117,7 +119,8 @@
 }  // namespace
 
 TFLMRegistration Register_RESIZE_NEAREST_NEIGHBOR() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, ResizeNearestNeighborPrepare,
+                                   ResizeNearestNeighborEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/round.cc b/tensorflow/lite/micro/kernels/round.cc
index a42349e..ae8e354 100644
--- a/tensorflow/lite/micro/kernels/round.cc
+++ b/tensorflow/lite/micro/kernels/round.cc
@@ -26,7 +26,7 @@
 constexpr int kInputTensor = 0;
 constexpr int kOutputTensor = 0;
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus RoundPrepare(TfLiteContext* context, TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
 
   TfLiteTensor* input =
@@ -50,7 +50,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus RoundEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kInputTensor);
   TfLiteEvalTensor* output =
@@ -66,7 +66,7 @@
 }  // namespace
 
 TFLMRegistration Register_ROUND() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, RoundPrepare, RoundEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/shape.cc b/tensorflow/lite/micro/kernels/shape.cc
index a39bfc0..d95e450 100644
--- a/tensorflow/lite/micro/kernels/shape.cc
+++ b/tensorflow/lite/micro/kernels/shape.cc
@@ -35,14 +35,14 @@
   }
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ShapePrepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
 
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ShapeEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kInputTensor);
   TfLiteEvalTensor* output =
@@ -61,7 +61,7 @@
 }  // namespace
 
 TFLMRegistration Register_SHAPE() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, ShapePrepare, ShapeEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/slice.cc b/tensorflow/lite/micro/kernels/slice.cc
index 973da18..7963425 100644
--- a/tensorflow/lite/micro/kernels/slice.cc
+++ b/tensorflow/lite/micro/kernels/slice.cc
@@ -44,7 +44,7 @@
   }
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus SlicePrepare(TfLiteContext* context, TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
 
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
@@ -81,7 +81,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus SliceEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kInputTensor);
   const TfLiteEvalTensor* begin =
@@ -158,7 +158,7 @@
 }  // namespace
 
 TFLMRegistration Register_SLICE() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, SlicePrepare, SliceEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/space_to_batch_nd.cc b/tensorflow/lite/micro/kernels/space_to_batch_nd.cc
index 6b536ee..f8df149 100644
--- a/tensorflow/lite/micro/kernels/space_to_batch_nd.cc
+++ b/tensorflow/lite/micro/kernels/space_to_batch_nd.cc
@@ -39,12 +39,13 @@
 const int kInputOutputMinDimensionNum = 3;
 const int kInputOutputMaxDimensionNum = 4;
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* SpaceToBatchNDInit(TfLiteContext* context, const char* buffer,
+                         size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context, sizeof(SpaceToBatchParams));
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus SpaceToBatchNDPrepare(TfLiteContext* context, TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
 
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
@@ -67,7 +68,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus SpaceToBatchNDEval(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
   const SpaceToBatchParams& params =
       *(static_cast<const SpaceToBatchParams*>(node->user_data));
@@ -115,7 +116,8 @@
 }  // namespace.
 
 TFLMRegistration Register_SPACE_TO_BATCH_ND() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(SpaceToBatchNDInit, SpaceToBatchNDPrepare,
+                                   SpaceToBatchNDEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/split.cc b/tensorflow/lite/micro/kernels/split.cc
index aa87720..cae7074 100644
--- a/tensorflow/lite/micro/kernels/split.cc
+++ b/tensorflow/lite/micro/kernels/split.cc
@@ -67,7 +67,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus SplitPrepare(TfLiteContext* context, TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
   TfLiteTensor* axis = micro_context->AllocateTempInputTensor(node, 0);
   TF_LITE_ENSURE(context, axis != nullptr);
@@ -82,7 +82,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus SplitEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 0);
   const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 1);
 
@@ -119,7 +119,7 @@
 }  // namespace
 
 TFLMRegistration Register_SPLIT() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, SplitPrepare, SplitEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/split_v.cc b/tensorflow/lite/micro/kernels/split_v.cc
index 6aed6f7..ad96a20 100644
--- a/tensorflow/lite/micro/kernels/split_v.cc
+++ b/tensorflow/lite/micro/kernels/split_v.cc
@@ -71,7 +71,7 @@
   return kTfLiteOk;

 }

 

-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {

+TfLiteStatus SplitVPrepare(TfLiteContext* context, TfLiteNode* node) {

   TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);

 

   MicroContext* micro_context = GetMicroContext(context);

@@ -85,7 +85,7 @@
   return kTfLiteOk;

 }

 

-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {

+TfLiteStatus SplitVEval(TfLiteContext* context, TfLiteNode* node) {

   const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0);

   const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 2);

 

@@ -121,7 +121,7 @@
 }  // namespace

 

 TFLMRegistration Register_SPLIT_V() {

-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);

+  return tflite::micro::RegisterOp(nullptr, SplitVPrepare, SplitVEval);

 }

 

 }  // namespace tflite

diff --git a/tensorflow/lite/micro/kernels/squeeze.cc b/tensorflow/lite/micro/kernels/squeeze.cc
index e52ccab..3df1363 100644
--- a/tensorflow/lite/micro/kernels/squeeze.cc
+++ b/tensorflow/lite/micro/kernels/squeeze.cc
@@ -44,7 +44,7 @@
   TfLiteTensor* output;
 };
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus SqueezePrepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
 
@@ -55,7 +55,7 @@
   // Determines number of dimensions of output tensor after squeeze.
   const TfLiteIntArray* input_dims = op_context.input->dims;
   const TfLiteIntArray* output_dims = op_context.output->dims;
-  const int* squeeze_dims = op_context.params->squeeze_dims;
+  const int32_t* squeeze_dims = op_context.params->squeeze_dims;
 
   constexpr int max_squeeze_dims = 8;
   TF_LITE_ENSURE(context, input_num_dims <= max_squeeze_dims);
@@ -87,7 +87,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus SqueezeEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0);
 
   if (input->type == kTfLiteString) {
@@ -112,7 +112,7 @@
 }  // namespace
 
 TFLMRegistration Register_SQUEEZE() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, SqueezePrepare, SqueezeEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/strided_slice.cc b/tensorflow/lite/micro/kernels/strided_slice.cc
index 4e60e6b..78507a7 100644
--- a/tensorflow/lite/micro/kernels/strided_slice.cc
+++ b/tensorflow/lite/micro/kernels/strided_slice.cc
@@ -14,146 +14,30 @@
 ==============================================================================*/
 #include "tensorflow/lite/kernels/internal/reference/strided_slice.h"
 
-#include <cmath>
+#include <cstdint>
 #include <cstring>
 
-#include "tensorflow/lite/c/builtin_op_data.h"
 #include "tensorflow/lite/c/common.h"
 #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
 #include "tensorflow/lite/kernels/kernel_util.h"
 #include "tensorflow/lite/kernels/op_macros.h"
 #include "tensorflow/lite/micro/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/kernels/strided_slice.h"
 #include "tensorflow/lite/micro/micro_log.h"
 
 namespace tflite {
 
 namespace {
 
-constexpr int kInputTensor = 0;
-constexpr int kBeginTensor = 1;
-constexpr int kEndTensor = 2;
-constexpr int kStridesTensor = 3;
-constexpr int kOutputTensor = 0;
-
-struct StridedSliceContext {
-  StridedSliceContext(TfLiteContext* context, TfLiteNode* node) {
-    params = reinterpret_cast<TfLiteStridedSliceParams*>(node->builtin_data);
-    micro_context = GetMicroContext(context);
-    input = micro_context->AllocateTempInputTensor(node, kInputTensor);
-    begin = micro_context->AllocateTempInputTensor(node, kBeginTensor);
-    end = micro_context->AllocateTempInputTensor(node, kEndTensor);
-    strides = micro_context->AllocateTempInputTensor(node, kStridesTensor);
-    output = micro_context->AllocateTempOutputTensor(node, kOutputTensor);
-    dims = NumDimensions(input);
-  }
-  ~StridedSliceContext() {
-    micro_context->DeallocateTempTfLiteTensor(input);
-    micro_context->DeallocateTempTfLiteTensor(begin);
-    micro_context->DeallocateTempTfLiteTensor(end);
-    micro_context->DeallocateTempTfLiteTensor(strides);
-    micro_context->DeallocateTempTfLiteTensor(output);
-  }
-  const TfLiteStridedSliceParams* params;
-  MicroContext* micro_context;
-  TfLiteTensor* input;
-  TfLiteTensor* begin;
-  TfLiteTensor* end;
-  TfLiteTensor* strides;
-  TfLiteTensor* output;
-  int dims;
-};
-
-// This Op only supports 1-4D cases and since we use the reference 4D
-// implementation, the 1-3D tensors are mapped to 4D.
-const int kMaxDim = 4;
-
-tflite::StridedSliceParams BuildStridedSliceParams(
-    StridedSliceContext* op_context) {
-  tflite::StridedSliceParams op_params{};
-  op_params.start_indices_count = op_context->dims;
-  op_params.stop_indices_count = op_context->dims;
-  op_params.strides_count = op_context->dims;
-
-  for (int i = 0; i < op_context->dims; ++i) {
-    op_params.start_indices[i] = GetTensorData<int32_t>(op_context->begin)[i];
-    op_params.stop_indices[i] = GetTensorData<int32_t>(op_context->end)[i];
-    op_params.strides[i] = GetTensorData<int32_t>(op_context->strides)[i];
-  }
-
-  op_params.begin_mask = op_context->params->begin_mask;
-  op_params.ellipsis_mask = 0;
-  op_params.end_mask = op_context->params->end_mask;
-  op_params.new_axis_mask = 0;
-  op_params.shrink_axis_mask = op_context->params->shrink_axis_mask;
-  return op_params;
-}
-
-// Processes the indexing tensors (begin, end and strides) to resize the
-// output tensor. This function is callable from both Prepare() and Eval() as
-// long as the caller ensures the indexing tensors are present.
-TfLiteStatus CheckOutputSize(TfLiteContext* context,
-                             StridedSliceContext* op_context) {
-  using ::tflite::strided_slice::StartForAxis;
-  using ::tflite::strided_slice::StopForAxis;
-  TfLiteIntArray* output_shape = op_context->output->dims;
-  int shape_size = 0;
-  auto op_params = BuildStridedSliceParams(op_context);
-  auto input_shape = GetTensorShape(op_context->input);
-  for (int idx = 0; idx < op_context->dims; ++idx) {
-    int32_t stride = GetTensorData<int32_t>(op_context->strides)[idx];
-    TF_LITE_ENSURE_MSG(context, stride != 0, "stride value has to be non-zero");
-    int32_t begin = StartForAxis(op_params, input_shape, idx);
-    int32_t end = StopForAxis(op_params, input_shape, idx, begin);
-
-    // When shrinking an axis, the end position does not matter (and can be
-    // incorrect when negative indexing is used, see Issue #19260). Always use
-    // begin + 1 to generate a length 1 slice, since begin has
-    // already been adjusted for negative indices by StartForAxis.
-    const bool shrink_axis = op_context->params->shrink_axis_mask & (1 << idx);
-    if (shrink_axis) {
-      end = begin + 1;
-    }
-
-    // This is valid for both positive and negative strides
-    int32_t dim_shape = std::ceil((end - begin) / static_cast<float>(stride));
-    dim_shape = dim_shape < 0 ? 0 : dim_shape;
-    if (!shrink_axis) {
-      TF_LITE_ENSURE_EQ(context, output_shape->data[shape_size], dim_shape);
-      shape_size++;
-    }
-  }
-  TF_LITE_ENSURE_EQ(context, output_shape->size, shape_size);
-  return kTfLiteOk;
-}
-
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
-  TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
-  return context->AllocatePersistentBuffer(context, sizeof(StridedSliceParams));
-}
-
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
-  TFLITE_DCHECK(node->user_data != nullptr);
-  StridedSliceParams* op_params =
-      static_cast<StridedSliceParams*>(node->user_data);
-  TF_LITE_ENSURE_EQ(context, NumInputs(node), 4);
-  TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
-  StridedSliceContext op_context(context, node);
-  TF_LITE_ENSURE_MSG(context, op_context.dims <= kMaxDim,
-                     "input dim should not exceed 4");
-  auto params = BuildStridedSliceParams(&op_context);
-  memcpy(op_params, &params, sizeof(StridedSliceParams));
-  return CheckOutputSize(context, &op_context);
-}
-
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus StridedSliceEval(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
   const StridedSliceParams& op_params =
       *(static_cast<const StridedSliceParams*>(node->user_data));
 
   const TfLiteEvalTensor* input =
-      tflite::micro::GetEvalInput(context, node, kInputTensor);
+      tflite::micro::GetEvalInput(context, node, kStridedSliceInputTensor);
   TfLiteEvalTensor* output =
-      tflite::micro::GetEvalOutput(context, node, kOutputTensor);
+      tflite::micro::GetEvalOutput(context, node, kStridedSliceOutputTensor);
   switch (output->type) {
     case kTfLiteFloat32:
       reference_ops::StridedSlice(op_params,
@@ -201,7 +85,8 @@
 }  // namespace
 
 TFLMRegistration Register_STRIDED_SLICE() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(StridedSliceInit, StridedSlicePrepare,
+                                   StridedSliceEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/strided_slice.h b/tensorflow/lite/micro/kernels/strided_slice.h
new file mode 100644
index 0000000..ea9413f
--- /dev/null
+++ b/tensorflow/lite/micro/kernels/strided_slice.h
@@ -0,0 +1,40 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_LITE_MICRO_KERNELS_STRIDED_SLICE_H_
+#define TENSORFLOW_LITE_MICRO_KERNELS_STRIDED_SLICE_H_
+
+#include <cstdint>
+
+#include "tensorflow/lite/c/builtin_op_data.h"
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/micro/micro_common.h"
+
+namespace tflite {
+
+constexpr int kStridedSliceInputTensor = 0;
+constexpr int kStridedSliceBeginTensor = 1;
+constexpr int kStridedSliceEndTensor = 2;
+constexpr int kStridedSliceStridesTensor = 3;
+constexpr int kStridedSliceOutputTensor = 0;
+
+void* StridedSliceInit(TfLiteContext* context, const char* buffer,
+                       size_t length);
+
+TfLiteStatus StridedSlicePrepare(TfLiteContext* context, TfLiteNode* node);
+
+}  // namespace tflite
+
+#endif  // TENSORFLOW_LITE_MICRO_KERNELS_STRIDED_SLICE_H_
diff --git a/tensorflow/lite/micro/kernels/strided_slice_common.cc b/tensorflow/lite/micro/kernels/strided_slice_common.cc
new file mode 100644
index 0000000..165e1f3
--- /dev/null
+++ b/tensorflow/lite/micro/kernels/strided_slice_common.cc
@@ -0,0 +1,149 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <cmath>
+#include <cstring>
+
+#include "tensorflow/lite/c/builtin_op_data.h"
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/kernels/internal/reference/strided_slice.h"
+#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
+#include "tensorflow/lite/kernels/kernel_util.h"
+#include "tensorflow/lite/kernels/op_macros.h"
+#include "tensorflow/lite/micro/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/kernels/strided_slice.h"
+#include "tensorflow/lite/micro/micro_log.h"
+
+namespace tflite {
+
+namespace {
+
+struct StridedSliceContext {
+  StridedSliceContext(TfLiteContext* context, TfLiteNode* node) {
+    params = reinterpret_cast<TfLiteStridedSliceParams*>(node->builtin_data);
+    micro_context = GetMicroContext(context);
+    input =
+        micro_context->AllocateTempInputTensor(node, kStridedSliceInputTensor);
+    begin =
+        micro_context->AllocateTempInputTensor(node, kStridedSliceBeginTensor);
+    end = micro_context->AllocateTempInputTensor(node, kStridedSliceEndTensor);
+    strides = micro_context->AllocateTempInputTensor(
+        node, kStridedSliceStridesTensor);
+    output = micro_context->AllocateTempOutputTensor(node,
+                                                     kStridedSliceOutputTensor);
+    dims = NumDimensions(input);
+  }
+  ~StridedSliceContext() {
+    micro_context->DeallocateTempTfLiteTensor(input);
+    micro_context->DeallocateTempTfLiteTensor(begin);
+    micro_context->DeallocateTempTfLiteTensor(end);
+    micro_context->DeallocateTempTfLiteTensor(strides);
+    micro_context->DeallocateTempTfLiteTensor(output);
+  }
+  const TfLiteStridedSliceParams* params;
+  MicroContext* micro_context;
+  TfLiteTensor* input;
+  TfLiteTensor* begin;
+  TfLiteTensor* end;
+  TfLiteTensor* strides;
+  TfLiteTensor* output;
+  int dims;
+};
+
+// This Op only supports 1-4D cases and since we use the reference 4D
+// implementation, the 1-3D tensors are mapped to 4D.
+const int kMaxDim = 4;
+
+tflite::StridedSliceParams BuildStridedSliceParams(
+    StridedSliceContext* op_context) {
+  tflite::StridedSliceParams op_params{};
+  op_params.start_indices_count = op_context->dims;
+  op_params.stop_indices_count = op_context->dims;
+  op_params.strides_count = op_context->dims;
+
+  for (int i = 0; i < op_context->dims; ++i) {
+    op_params.start_indices[i] = GetTensorData<int32_t>(op_context->begin)[i];
+    op_params.stop_indices[i] = GetTensorData<int32_t>(op_context->end)[i];
+    op_params.strides[i] = GetTensorData<int32_t>(op_context->strides)[i];
+  }
+
+  op_params.begin_mask = op_context->params->begin_mask;
+  op_params.ellipsis_mask = 0;
+  op_params.end_mask = op_context->params->end_mask;
+  op_params.new_axis_mask = 0;
+  op_params.shrink_axis_mask = op_context->params->shrink_axis_mask;
+  return op_params;
+}
+
+// Processes the indexing tensors (begin, end and strides) to resize the
+// output tensor. This function is callable from both Prepare() and Eval() as
+// long as the caller ensures the indexing tensors are present.
+TfLiteStatus CheckOutputSize(TfLiteContext* context,
+                             StridedSliceContext* op_context) {
+  using ::tflite::strided_slice::StartForAxis;
+  using ::tflite::strided_slice::StopForAxis;
+  TfLiteIntArray* output_shape = op_context->output->dims;
+  int shape_size = 0;
+  auto op_params = BuildStridedSliceParams(op_context);
+  auto input_shape = GetTensorShape(op_context->input);
+  for (int idx = 0; idx < op_context->dims; ++idx) {
+    int32_t stride = GetTensorData<int32_t>(op_context->strides)[idx];
+    TF_LITE_ENSURE_MSG(context, stride != 0, "stride value has to be non-zero");
+    int32_t begin = StartForAxis(op_params, input_shape, idx);
+    int32_t end = StopForAxis(op_params, input_shape, idx, begin);
+
+    // When shrinking an axis, the end position does not matter (and can be
+    // incorrect when negative indexing is used, see Issue #19260). Always use
+    // begin + 1 to generate a length 1 slice, since begin has
+    // already been adjusted for negative indices by StartForAxis.
+    const bool shrink_axis = op_context->params->shrink_axis_mask & (1 << idx);
+    if (shrink_axis) {
+      end = begin + 1;
+    }
+
+    // This is valid for both positive and negative strides
+    int32_t dim_shape = std::ceil((end - begin) / static_cast<float>(stride));
+    dim_shape = dim_shape < 0 ? 0 : dim_shape;
+    if (!shrink_axis) {
+      TF_LITE_ENSURE_EQ(context, output_shape->data[shape_size], dim_shape);
+      shape_size++;
+    }
+  }
+  TF_LITE_ENSURE_EQ(context, output_shape->size, shape_size);
+  return kTfLiteOk;
+}
+
+}  // namespace
+
+void* StridedSliceInit(TfLiteContext* context, const char* buffer,
+                       size_t length) {
+  TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
+  return context->AllocatePersistentBuffer(context, sizeof(StridedSliceParams));
+}
+
+TfLiteStatus StridedSlicePrepare(TfLiteContext* context, TfLiteNode* node) {
+  TFLITE_DCHECK(node->user_data != nullptr);
+  StridedSliceParams* op_params =
+      static_cast<StridedSliceParams*>(node->user_data);
+  TF_LITE_ENSURE_EQ(context, NumInputs(node), 4);
+  TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
+  StridedSliceContext op_context(context, node);
+  TF_LITE_ENSURE_MSG(context, op_context.dims <= kMaxDim,
+                     "input dim should not exceed 4");
+  auto params = BuildStridedSliceParams(&op_context);
+  memcpy(op_params, &params, sizeof(StridedSliceParams));
+  return CheckOutputSize(context, &op_context);
+}
+
+}  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/svdf.cc b/tensorflow/lite/micro/kernels/svdf.cc
index 0ffb4b0..9e85c6b 100644
--- a/tensorflow/lite/micro/kernels/svdf.cc
+++ b/tensorflow/lite/micro/kernels/svdf.cc
@@ -32,12 +32,12 @@
 namespace tflite {
 namespace {
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* InitSvdf(TfLiteContext* context, const char* buffer, size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context, sizeof(OpDataSvdf));
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus EvalSvdf(TfLiteContext* context, TfLiteNode* node) {
   auto* params = reinterpret_cast<TfLiteSVDFParams*>(node->builtin_data);
   TFLITE_DCHECK(node->user_data != nullptr);
   const OpDataSvdf& data = *(static_cast<const OpDataSvdf*>(node->user_data));
@@ -99,7 +99,7 @@
 }  // namespace
 
 TFLMRegistration Register_SVDF() {
-  return tflite::micro::RegisterOp(Init, PrepareSvdf, Eval);
+  return tflite::micro::RegisterOp(InitSvdf, PrepareSvdf, EvalSvdf);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/testdata/BUILD b/tensorflow/lite/micro/kernels/testdata/BUILD
index 16df712..c93bc7d 100644
--- a/tensorflow/lite/micro/kernels/testdata/BUILD
+++ b/tensorflow/lite/micro/kernels/testdata/BUILD
@@ -1,3 +1,4 @@
+load("@rules_python//python:defs.bzl", "py_binary", "py_test")
 load("@tflm_pip_deps//:requirements.bzl", "requirement")
 
 package(
@@ -45,7 +46,7 @@
     deps = [
         "@absl_py//absl:app",
         requirement("numpy"),
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
     ],
 )
 
diff --git a/tensorflow/lite/micro/kernels/testdata/lstm_test_data.cc b/tensorflow/lite/micro/kernels/testdata/lstm_test_data.cc
index 4d7d9d9..0fc010b 100644
--- a/tensorflow/lite/micro/kernels/testdata/lstm_test_data.cc
+++ b/tensorflow/lite/micro/kernels/testdata/lstm_test_data.cc
@@ -251,11 +251,12 @@
 
   // state quantization parameters
   quantization_settings.input = {/*scale=*/3.0518044e-5, /*zp=*/0,
-                                 /*symmetry=*/false};
-  quantization_settings.output = {/*scale=*/1.8310826e-5, /*zp=*/-5461,
-                                  /*symmetry=*/false};
-  quantization_settings.hidden_state = {/*scale=*/1.8310826e-5, /*zp=*/-5461,
-                                        /*symmetry=*/false};
+                                 /*symmetry=*/true};
+  quantization_settings.output = {/*scale=*/2.1362956633198035e-05, /*zp=*/0,
+                                  /*symmetry=*/true};
+  quantization_settings.hidden_state = {/*scale=*/2.1362956633198035e-05,
+                                        /*zp=*/0,
+                                        /*symmetry=*/true};
   quantization_settings.cell_state = {/*scale=*/0.00024414062, /*zp=*/0,
                                       /*symmetry=*/true};
 
diff --git a/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h b/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h
index 3edf420..932b832 100644
--- a/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h
+++ b/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h
@@ -390,9 +390,9 @@
   int state_size_[3] = {2, batch_size, state_dimension};
 
   // see lstm_shared.h for tensor names, the last tensor is the output tensor
-  TfLiteTensor tensors_[24 + 1];
+  TfLiteTensor tensors_[24 + 1] = {};
   // Use for internel kernel testing
-  TfLiteEvalTensor eval_tensors_[24 + 1];
+  TfLiteEvalTensor eval_tensors_[24 + 1] = {};
   // indices for the tensors inside the node (required by kernel runner)
   int input_tensor_indices_[1 + 24] = {};
   // single output (last in the tensors array)
diff --git a/tensorflow/lite/micro/kernels/testdata/lstm_test_data_generator.py b/tensorflow/lite/micro/kernels/testdata/lstm_test_data_generator.py
index 97c8798..c6553fe 100644
--- a/tensorflow/lite/micro/kernels/testdata/lstm_test_data_generator.py
+++ b/tensorflow/lite/micro/kernels/testdata/lstm_test_data_generator.py
@@ -17,15 +17,15 @@
 2. Print the intermediate step outputs inside the LSTM for a single step LSTM invocation (Get2X2GateOutputCheckData in .cc)
 3. Print the outputs for multi-step LSTM invocation (Get2X2LstmEvalCheckData in .cc)
 
-Every invocation gives three types information: 
-1. Quantized output: kernel output in integer 
+Every invocation gives three types information:
+1. Quantized output: kernel output in integer
 2. Dequantized output: Quantized output in floating point representation
 3. Float output: output from the floating point computation (i.e., float kernel)
 
-Note: 
+Note:
 1. Change quantization settings in _KERNEL_CONFIG to see the outcomes from various quantization schema (e.g., 8x8 Vs. 16x8)
 2. Only single batch inference is supporte here. Change _GATE_TEST_DATA or _MULTISTEP_TEST_DATA to see kernel outputs on different input data
-3. The quantization computation here is not the exact as the c++ implementation. The integer calculation is mimiced here using floating point. 
+3. The quantization computation here is not the exact as the c++ implementation. The integer calculation is emulated here using floating point.
 No fixed point math is implemented here. The purpose is to illustrate the computation procedure and possible quantization error accumulation, not for bit exactness.
 """
 from absl import app
@@ -88,7 +88,7 @@
 _MULTISTEP_TEST_DATA = {
     'init_hidden_state_vals': [0, 0],
     'init_cell_state_vals': [0, 0],
-    'input_data': [0.2, 0.3, 0.2, 0.3, 0.2, 0.3],  # three time steps 
+    'input_data': [0.2, 0.3, 0.2, 0.3, 0.2, 0.3],  # three time steps
     'hidden_state_range': (-0.5, 0.7),
     'cell_state_range': [-8, 8],
     'input_data_range': [-1, 1]
diff --git a/tensorflow/lite/micro/kernels/testdata/lstm_test_data_utils.py b/tensorflow/lite/micro/kernels/testdata/lstm_test_data_utils.py
index 345b143..142a58c 100644
--- a/tensorflow/lite/micro/kernels/testdata/lstm_test_data_utils.py
+++ b/tensorflow/lite/micro/kernels/testdata/lstm_test_data_utils.py
@@ -92,14 +92,14 @@
 
 
 def quantized_sigmoid(input, input_scale, output_scale, num_bits=16):
-  """Sigmoid (interger)"""
+  """Sigmoid (integer)"""
   float_input = input * input_scale
   float_result = sigmoid(float_input)
   return quantize_data(float_result, output_scale, bit_width=num_bits)
 
 
 def quantized_tanh(input, input_scale, output_scale, num_bits=16):
-  """Tanh (interger)"""
+  """Tanh (integer)"""
   float_input = input * input_scale
   float_result = np.tanh(float_input)
   return quantize_data(float_result, output_scale, bit_width=num_bits)
diff --git a/tensorflow/lite/micro/kernels/transpose.cc b/tensorflow/lite/micro/kernels/transpose.cc
index c57812b..915def5 100644
--- a/tensorflow/lite/micro/kernels/transpose.cc
+++ b/tensorflow/lite/micro/kernels/transpose.cc
@@ -46,7 +46,7 @@
   TfLiteTensor* output;
 };
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus TransposePrepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
 
@@ -72,7 +72,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus TransposeEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* perm_tensor =
       tflite::micro::GetEvalInput(context, node, kPermTensor);
   const int32_t* perm_data = perm_tensor->data.i32;
@@ -123,6 +123,6 @@
 }  // namespace
 
 TFLMRegistration Register_TRANSPOSE() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, TransposePrepare, TransposeEval);
 }
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/transpose_conv.cc b/tensorflow/lite/micro/kernels/transpose_conv.cc
index a2ac2b4..ea0efae 100644
--- a/tensorflow/lite/micro/kernels/transpose_conv.cc
+++ b/tensorflow/lite/micro/kernels/transpose_conv.cc
@@ -15,6 +15,9 @@
 
 #include "tensorflow/lite/kernels/internal/reference/transpose_conv.h"
 
+#include <cstddef>
+#include <cstdint>
+
 #include "tensorflow/lite/c/builtin_op_data.h"
 #include "tensorflow/lite/c/common.h"
 #include "tensorflow/lite/kernels/internal/common.h"
@@ -48,8 +51,9 @@
   // A scratch buffer is required for quantized implementations.
   int scratch_buffer_index;
 
-  // TODO(b/192090531): Remove this once all 8x16 transpose conv models use
-  // 64-bit biases.
+  // Index to the converted 64-bit bias buffer from 16-bit bias. This is
+  // required to handle 16x8 transpose convolutions where a 16-bit bias is
+  // provided, whereas the kernel expects 64-bit biases.
   int bias_converted_buffer_index;
 
   // Multiplier and shift arrays are required for the int8 implementation.
@@ -123,7 +127,9 @@
     if (input->type == kTfLiteInt16) {
       TFLITE_DCHECK(filter->type == kTfLiteInt8);
       TFLITE_DCHECK(output->type == kTfLiteInt16);
-      if (bias->type == kTfLiteInt16) {
+      // Handle the case where the bias is 16 bits for 16x8 transpose
+      // convolution where the kernel actually expects 64-bit biases.
+      if (bias != nullptr && bias->type == kTfLiteInt16) {
         TFLITE_DCHECK(
             context->RequestScratchBufferInArena(
                 context, GetTensorShape(bias).FlatSize() * sizeof(std::int64_t),
@@ -141,12 +147,13 @@
   return kTfLiteOk;
 }
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* TransposeConvInit(TfLiteContext* context, const char* buffer,
+                        size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context, sizeof(OpData));
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus TransposeConvPrepare(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
   TFLITE_DCHECK(node->builtin_data != nullptr);
 
@@ -243,7 +250,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus TransposeConvEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kInputTensor);
   const TfLiteEvalTensor* filter =
@@ -298,12 +305,10 @@
       break;
     }
     case kTfLiteInt16: {
-      std::int64_t* scratch_buffer = static_cast<int64_t*>(
+      auto* scratch_buffer = static_cast<int64_t*>(
           context->GetScratchBuffer(context, data.scratch_buffer_index));
-      // TODO(b/192090531): Remove this once all 8x16 transpose conv models use
-      // 64-bit biases.
       if (bias != nullptr && bias->type == kTfLiteInt16) {
-        std::int64_t* bias_converted_buffer =
+        auto* bias_converted_buffer =
             static_cast<int64_t*>(context->GetScratchBuffer(
                 context, data.bias_converted_buffer_index));
         for (int i = 0; i < tflite::micro::GetTensorShape(bias).FlatSize();
@@ -346,7 +351,8 @@
 }  // namespace
 
 TFLMRegistration Register_TRANSPOSE_CONV() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(TransposeConvInit, TransposeConvPrepare,
+                                   TransposeConvEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/transpose_conv.h b/tensorflow/lite/micro/kernels/transpose_conv.h
new file mode 100644
index 0000000..3a99ccb
--- /dev/null
+++ b/tensorflow/lite/micro/kernels/transpose_conv.h
@@ -0,0 +1,50 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_MICRO_KERNELS_TRANSPOSE_CONV_H_
+#define TENSORFLOW_LITE_MICRO_KERNELS_TRANSPOSE_CONV_H_
+
+#include <cstdint>
+
+#include "tensorflow/lite/c/builtin_op_data.h"
+#include "tensorflow/lite/kernels/internal/types.h"
+#include "tensorflow/lite/micro/micro_common.h"
+
+namespace tflite {
+
+// This is the most generic TFLMRegistration. The actual supported types
+// may still be target dependent. The only requirement is that every
+// implementation (reference or optimized) must define this function.
+TFLMRegistration Register_TRANSPOSE_CONV();
+
+#if defined(CMSIS_NN)
+// Returns a TFLMRegistration struct for kernel variant that only supports
+// int8.
+TFLMRegistration Register_TRANSPOSE_CONV_INT8();
+
+#else
+// Note that while this block gets used for both reference and optimized kernels
+// that do not have any specialized implementations, the only goal here is to
+// define fallback implementation that allow reference kernels to still be used
+// from applications that call a more specific kernel variant.
+
+inline TFLMRegistration Register_TRANSPOSE_CONV_INT8() {
+  return Register_TRANSPOSE_CONV();
+}
+
+#endif
+
+}  // namespace tflite
+
+#endif  // TENSORFLOW_LITE_MICRO_KERNELS_TRANSPOSE_CONV_H_
diff --git a/tensorflow/lite/micro/kernels/transpose_conv_test.cc b/tensorflow/lite/micro/kernels/transpose_conv_test.cc
index 0ddb3b2..49d2c90 100644
--- a/tensorflow/lite/micro/kernels/transpose_conv_test.cc
+++ b/tensorflow/lite/micro/kernels/transpose_conv_test.cc
@@ -53,7 +53,8 @@
                                               1,  // stride_height
                                               kTfLiteActNone,
                                               1,
-                                              1};
+                                              1,
+                                              kTfLiteNoType};
 
 template <typename T>
 TfLiteStatus InvokeTransposeConv(TfLiteTensor* tensors, int tensors_size,
@@ -253,7 +254,8 @@
                                   1,                   // stride_height
                                   kTfLiteActRelu,
                                   1,
-                                  1};
+                                  1,
+                                  kTfLiteNoType};
 
   TF_LITE_MICRO_EXPECT_EQ(
       kTfLiteOk, tflite::testing::TestTransposeConvFloat(
@@ -276,7 +278,8 @@
                                   3,                   // stride_height
                                   kTfLiteActRelu,
                                   1,
-                                  1};
+                                  1,
+                                  kTfLiteNoType};
 
   TF_LITE_MICRO_EXPECT_EQ(
       kTfLiteOk, tflite::testing::TestTransposeConvFloat(
@@ -304,7 +307,8 @@
                                   2,                    // stride_height
                                   kTfLiteActRelu,
                                   1,
-                                  1};
+                                  1,
+                                  kTfLiteNoType};
 
   TF_LITE_MICRO_EXPECT_EQ(
       kTfLiteOk,
diff --git a/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h b/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h
index 16aa23b..46f6b2d 100644
--- a/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h
+++ b/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h
@@ -1,4 +1,4 @@
-/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -36,10 +36,19 @@
 // implementations.
 TFLMRegistration Register_UNIDIRECTIONAL_SEQUENCE_LSTM_INT8();
 
+// Returns a TFLMRegistration struct for kernel variant that only supports
+// int16 activations and int8 weights and uses the latency optimized
+// implementations.
+TFLMRegistration Register_UNIDIRECTIONAL_SEQUENCE_LSTM_INT16();
+
 #else
 inline TFLMRegistration Register_UNIDIRECTIONAL_SEQUENCE_LSTM_INT8() {
   return Register_UNIDIRECTIONAL_SEQUENCE_LSTM();
 }
+
+inline TFLMRegistration Register_UNIDIRECTIONAL_SEQUENCE_LSTM_INT16() {
+  return Register_UNIDIRECTIONAL_SEQUENCE_LSTM();
+}
 #endif
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm_test.cc b/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm_test.cc
index c85e56f..1e5a868 100644
--- a/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm_test.cc
+++ b/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm_test.cc
@@ -1,4 +1,4 @@
-/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -28,7 +28,6 @@
 namespace {
 
 constexpr int kLstmMaxNumInputOutputTensors = 24 + 1;
-constexpr int kLstmIntermediateTensorBase = kLstmMaxNumInputOutputTensors + 1;
 
 // Validate the output result array with golden values
 template <typename T>
@@ -50,42 +49,20 @@
     LstmNodeContent<ActivationType, WeightType, BiasType, CellType, batch_size,
                     time_steps, input_dimension, state_dimension>&
         node_contents) {
-  TfLiteTensor tensors[kLstmMaxNumInputOutputTensors + 1 + 5];
-  memcpy(tensors, node_contents.GetTensors(),
-         kLstmMaxNumInputOutputTensors * sizeof(TfLiteTensor));
-
-  // Provide also intermediate tensors needed by older LSTM implementations
-  int intermediate_array_data[6] = {5,
-                                    kLstmIntermediateTensorBase,
-                                    kLstmIntermediateTensorBase + 1,
-                                    kLstmIntermediateTensorBase + 2,
-                                    kLstmIntermediateTensorBase + 3,
-                                    kLstmIntermediateTensorBase + 4};
-  int input_zero_points[2] = {1, -21};
-  float input_scales[2] = {1, 0.004705882165580988};
-  TfLiteAffineQuantization input_quant = {
-      tflite::testing::FloatArrayFromFloats(input_scales),
-      tflite::testing::IntArrayFromInts(input_zero_points), 0};
-  int intermediate_dim[2] = {1, 0};
-  for (int i = 0; i < 5; ++i) {
-    tensors[kLstmIntermediateTensorBase + i] =
-        CreateTensor<int16_t>(nullptr, IntArrayFromInts(intermediate_dim));
-    tensors[kLstmIntermediateTensorBase + i].quantization = {
-        kTfLiteAffineQuantization, &input_quant};
-  }
-
   const TFLMRegistration registration = Register_UNIDIRECTIONAL_SEQUENCE_LSTM();
   auto buildin_data = node_contents.BuiltinData();
   micro::KernelRunner runner(
-      registration, tensors, kLstmMaxNumInputOutputTensors + 1 + 5,
+      registration, node_contents.GetTensors(), kLstmMaxNumInputOutputTensors,
       node_contents.KernelInputs(), node_contents.KernelOutputs(),
-      reinterpret_cast<void*>(&buildin_data),
-      IntArrayFromInts(intermediate_array_data));
+      reinterpret_cast<void*>(&buildin_data));
   TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
   TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
 
   const auto& quantization_settings = node_contents.QuantizationSettings();
 
+// CMSIS-NN does not use the hidden state and cell state tensors so these tests
+// fail.
+#if !defined(CMSIS_NN)
   float dequantized_hidden_state[batch_size * state_dimension] = {};
   Dequantize(node_contents.GetHiddenStateData(), batch_size * state_dimension,
              quantization_settings.hidden_state.scale,
@@ -104,6 +81,7 @@
   ValidateResultGoldens(eval_check_data.expected_cell_state,
                         dequantized_cell_state, batch_size * state_dimension,
                         cell_state_tolerance);
+#endif
 
   float dequantized_output[batch_size * state_dimension * time_steps] = {};
   Dequantize(node_contents.GetOutputData(),
@@ -150,7 +128,6 @@
 TF_LITE_MICRO_TESTS_BEGIN
 // TODO(b/230666079) enable below tests for xtensa when the xtensa
 // kernel is reconciled with reference kernel
-#if !defined(XTENSA)
 TF_LITE_MICRO_TEST(TestUnidirectionalLSTMFloat) {
   const tflite::testing::LstmEvalCheckData<12, 4, 12> kernel_eval_data =
       tflite::testing::Get2X2LstmEvalCheckData();
@@ -193,5 +170,4 @@
       kernel_eval_data, hidden_state_tolerance, cell_state_tolerance,
       int16_node_contents);
 }
-#endif  // !defined(XTENSA)
 TF_LITE_MICRO_TESTS_END
diff --git a/tensorflow/lite/micro/kernels/unpack.cc b/tensorflow/lite/micro/kernels/unpack.cc
index 3ce4c33..9ce1683 100644
--- a/tensorflow/lite/micro/kernels/unpack.cc
+++ b/tensorflow/lite/micro/kernels/unpack.cc
@@ -72,7 +72,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus UnpackEval(TfLiteContext* context, TfLiteNode* node) {
   TfLiteUnpackParams* data =
       reinterpret_cast<TfLiteUnpackParams*>(node->builtin_data);
 
@@ -102,7 +102,7 @@
 }  // namespace
 
 TFLMRegistration Register_UNPACK() {
-  return tflite::micro::RegisterOp(nullptr, nullptr, Eval);
+  return tflite::micro::RegisterOp(nullptr, nullptr, UnpackEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/var_handle.cc b/tensorflow/lite/micro/kernels/var_handle.cc
index 06087f7..0efb28c 100644
--- a/tensorflow/lite/micro/kernels/var_handle.cc
+++ b/tensorflow/lite/micro/kernels/var_handle.cc
@@ -36,12 +36,12 @@
   int32_t resource_id;
 };
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* VarHandleInit(TfLiteContext* context, const char* buffer, size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context, sizeof(OpData));
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus VarHandlePrepare(TfLiteContext* context, TfLiteNode* node) {
   OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
   const auto* params =
       reinterpret_cast<const TfLiteVarHandleParams*>(node->builtin_data);
@@ -72,7 +72,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus VarHandleEval(TfLiteContext* context, TfLiteNode* node) {
   OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
 
   TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0);
@@ -87,7 +87,8 @@
 }  // namespace.
 
 TFLMRegistration Register_VAR_HANDLE() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(VarHandleInit, VarHandlePrepare,
+                                   VarHandleEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/while.cc b/tensorflow/lite/micro/kernels/while.cc
index 097a342..a11adeb 100644
--- a/tensorflow/lite/micro/kernels/while.cc
+++ b/tensorflow/lite/micro/kernels/while.cc
@@ -36,12 +36,12 @@
   int body_subgraph_index;
 };
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* WhileInit(TfLiteContext* context, const char* buffer, size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context, sizeof(OpData));
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus WhilePrepare(TfLiteContext* context, TfLiteNode* node) {
   OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
   const auto* params =
       reinterpret_cast<const TfLiteWhileParams*>(node->builtin_data);
@@ -74,7 +74,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus WhileEval(TfLiteContext* context, TfLiteNode* node) {
   const OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
 
   tflite::MicroContext* micro_context = tflite::GetMicroContext(context);
@@ -127,7 +127,7 @@
 }  // namespace.
 
 TFLMRegistration Register_WHILE() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(WhileInit, WhilePrepare, WhileEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/xtensa/add.cc b/tensorflow/lite/micro/kernels/xtensa/add.cc
index 4e4f805..d3c2371 100644
--- a/tensorflow/lite/micro/kernels/xtensa/add.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/add.cc
@@ -113,11 +113,11 @@
   op_params.output_shift = data->output_shift;
   SetActivationParams(data->output_activation_min, data->output_activation_max,
                       &op_params);
-#if !(defined(HIFI4))
+#if !(defined(HIFI3) || defined(HIFI4) || defined(HIFI5))
   bool need_broadcast = reference_ops::ProcessBroadcastShapes(
       tflite::micro::GetTensorShape(input1),
       tflite::micro::GetTensorShape(input2), &op_params);
-#endif  // !defined(HIFI4)
+#endif  // !defined(HIFI3) && !defined(HIFI4) && !defined(HIFI5)
 
   switch (output->type) {
     case kTfLiteInt8: {
@@ -126,7 +126,7 @@
           *(reinterpret_cast<XtensaAddOpData*>(node->user_data));
       AddEvalQuantizedVision(context, node, *params, op_data, input1, input2,
                              output);
-#elif defined(HIFI4)  // defined(VISION_P6)
+#elif defined(HIFI3) || defined(HIFI4) || defined(HIFI5)  // defined(VISION_P6)
       int err;
       const RuntimeShape extended_input1_shape =
           RuntimeShape::ExtendedShape(4, tflite::micro::GetTensorShape(input1));
@@ -150,7 +150,7 @@
           op_params.left_shift);
 
       TF_LITE_ENSURE(context, err == 0);
-#else                 // defined(VISION_P6)
+#else                                                     // defined(VISION_P6)
       if (need_broadcast) {
         reference_integer_ops::BroadcastAdd4DSlow(
             op_params, tflite::micro::GetTensorShape(input1),
@@ -168,11 +168,11 @@
             tflite::micro::GetTensorShape(output),
             tflite::micro::GetTensorData<int8_t>(output));
       }
-#endif                // defined(VISION_P6)
+#endif                                                    // defined(VISION_P6)
       break;
     }
     case kTfLiteInt16: {
-#if defined(HIFI4)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
       int err;
       const RuntimeShape extended_input1_shape =
           RuntimeShape::ExtendedShape(4, tflite::micro::GetTensorShape(input1));
@@ -196,7 +196,7 @@
           op_params.left_shift);
 
       TF_LITE_ENSURE(context, err == 0);
-#else   // defined(HIFI4)
+#else   // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
       if (need_broadcast) {
         reference_ops::BroadcastAdd4DSlow(
             op_params, tflite::micro::GetTensorShape(input1),
@@ -214,7 +214,7 @@
                            tflite::micro::GetTensorData<int16_t>(output),
                            false);
       }
-#endif  // defined(HIFI4)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
       break;
     }
     default:
diff --git a/tensorflow/lite/micro/kernels/xtensa/conv.cc b/tensorflow/lite/micro/kernels/xtensa/conv.cc
index 59e576c..384dba9 100644
--- a/tensorflow/lite/micro/kernels/xtensa/conv.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/conv.cc
@@ -1,4 +1,4 @@
-/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -32,31 +32,6 @@
 namespace tflite {
 namespace {
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
-  TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
-  void* data =
-      context->AllocatePersistentBuffer(context, sizeof(XtensaConvOpData));
-#if defined(VISION_P6)
-  if (InitXtensaContext()) {
-    return nullptr;
-  }
-#endif  // defined(VISION_P6)
-
-  return data;
-}
-
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
-  TF_LITE_ENSURE_OK(context, ConvPrepare(context, node));
-
-#if defined(HIFI4) || defined(HIFI5)
-  TF_LITE_ENSURE_OK(context, ConvPrepareHifi(context, node));
-#endif
-#if defined(VISION_P6)
-  TF_LITE_ENSURE_OK(context, ConvPrepareVision(context, node));
-#endif  // VISION_P6
-  return kTfLiteOk;
-}
-
 TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
   TFLITE_DCHECK(node->builtin_data != nullptr);
@@ -73,68 +48,76 @@
   const TfLiteEvalTensor* filter =
       tflite::micro::GetEvalInput(context, node, kConvWeightsTensor);
   const TfLiteEvalTensor* bias =
-      (NumInputs(node) == 3)
-          ? tflite::micro::GetEvalInput(context, node, kConvBiasTensor)
-          : nullptr;
-
-  TfLiteEvalTensor filter_int8 = tflite::micro::MakeUnpackedInt4Tensor(
-      context, op_data.reference_op_data.filter_buffer_index, filter);
+      tflite::micro::GetEvalInput(context, node, kConvBiasTensor);
 
   switch (input->type) {
+    case kTfLiteFloat32: {
+      tflite::reference_ops::Conv(
+          ConvParamsFloat(params, op_data.reference_op_data),
+          tflite::micro::GetTensorShape(input),
+          tflite::micro::GetTensorData<float>(input),
+          tflite::micro::GetTensorShape(filter),
+          tflite::micro::GetTensorData<float>(filter),
+          tflite::micro::GetTensorShape(bias),
+          tflite::micro::GetOptionalTensorData<float>(bias),
+          tflite::micro::GetTensorShape(output),
+          tflite::micro::GetTensorData<float>(output),
+          tflite::micro::GetTensorShape(nullptr), nullptr);
+      break;
+    }
     case kTfLiteInt8: {
-      switch (filter_int8.type) {
-        case kTfLiteInt8: {
-#if defined(HIFI4) || defined(HIFI5)
-          ConvEvalHifi(context, node, params, op_data, input, &filter_int8,
-                       bias, output);
-#elif defined(VISION_P6)
-          return ConvEvalVision(context, node, params, op_data, input,
-                                &filter_int8, bias, output);
-#else
-          reference_integer_ops::ConvPerChannel(
-              ConvParamsQuantized(params, op_data.reference_op_data),
-              op_data.reference_op_data.per_channel_output_multiplier,
-              op_data.reference_op_data.per_channel_output_shift,
-              tflite::micro::GetTensorShape(input),
-              tflite::micro::GetTensorData<int8_t>(input),
-              tflite::micro::GetTensorShape(filter),
-              tflite::micro::GetTensorData<int8_t>(&filter_int8),
-              tflite::micro::GetTensorShape(bias),
-              tflite::micro::GetOptionalTensorData<int32_t>(bias),
-              tflite::micro::GetTensorShape(output),
-              tflite::micro::GetTensorData<int8_t>(output));
-          return kTfLiteOk;
-#endif
-          break;
-        }
-
-        default:
-          MicroPrintf("Filter type %s (%d) not supported.",
-                      TfLiteTypeGetName(filter->type), filter->type);
-          return kTfLiteError;
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
+      if (params.dilation_width_factor == 1 &&
+          params.dilation_height_factor == 1) {
+        return ConvEvalHifiInt8(context, node, params, op_data, input, filter,
+                                bias, output);
+      } else {
+        return ConvReferenceEvalInt8(context, node);
       }
-      return kTfLiteOk;
+#elif defined(VISION_P6)
+      // At this time the optimized implementation is failing the unit tests in
+      // ways that are not entirely clear why. For now, we have identified some
+      // of the problem cases and are manually inserting a reference fallback.
+      // See http://b/270720625 for more details.
+      if (op_data.is_per_channel_quantized ||
+          input->dims->data[1] != input->dims->data[2]) {
+        return ConvReferenceEvalInt8(context, node);
+      } else {
+        return ConvEvalVision(context, node, params, op_data, input, filter,
+                              bias, output);
+      }
+#else
+      return ConvReferenceEvalInt8(context, node);
+#endif
     }
     case kTfLiteInt16: {
-#if defined(HIFI4)
-      ConvEvalHifi16(context, node, params, op_data, input, filter, bias,
-                     output);
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
+      // Note that int32 bias is not widely supported and might be risky (e.g.
+      // http://b/262003750). As such, while we have a fallback to the reference
+      // implementation, production use-cases should only have int64 bias.
+      if (bias->type == kTfLiteInt32) {
+        return ConvReferenceEvalInt16(context, node);
+      } else {
+        return ConvEvalHifiInt16(context, node, params, op_data, input, filter,
+                                 bias, output);
+      }
 #else
       return ConvReferenceEvalInt16(context, node);
-#endif  // defined(HIFI4)
-      break;
+#endif
     }
     default:
       MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type),
                   input->type);
       return kTfLiteError;
   }
+
   return kTfLiteOk;
 }
+
 }  // namespace
 
 TFLMRegistration Register_CONV_2D() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(ConvInitXtensa, ConvPrepareXtensa, Eval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/xtensa/conv_common_xtensa.cc b/tensorflow/lite/micro/kernels/xtensa/conv_common_xtensa.cc
new file mode 100644
index 0000000..3063e77
--- /dev/null
+++ b/tensorflow/lite/micro/kernels/xtensa/conv_common_xtensa.cc
@@ -0,0 +1,56 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/lite/c/builtin_op_data.h"
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/kernels/internal/common.h"
+#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
+#include "tensorflow/lite/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/kernels/conv.h"
+#include "tensorflow/lite/micro/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/kernels/xtensa/xtensa.h"
+#include "tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h"
+
+namespace tflite {
+
+void* ConvInitXtensa(TfLiteContext* context, const char* buffer,
+                     size_t length) {
+  TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
+  void* data =
+      context->AllocatePersistentBuffer(context, sizeof(XtensaConvOpData));
+#if defined(VISION_P6)
+  if (InitXtensaContext()) {
+    return nullptr;
+  }
+#endif  // defined(VISION_P6)
+
+  return data;
+}
+
+TfLiteStatus ConvPrepareXtensa(TfLiteContext* context, TfLiteNode* node) {
+  TF_LITE_ENSURE_OK(context, ConvPrepare(context, node));
+
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
+  TF_LITE_ENSURE_OK(context, ConvPrepareHifi(context, node));
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
+
+#if defined(VISION_P6)
+  TF_LITE_ENSURE_OK(context, ConvPrepareVision(context, node));
+#endif  // defined(VISION_P6)
+
+  return kTfLiteOk;
+}
+
+}  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/xtensa/conv_hifi.cc b/tensorflow/lite/micro/kernels/xtensa/conv_hifi.cc
index 487c84a..1d2d7ec 100644
--- a/tensorflow/lite/micro/kernels/xtensa/conv_hifi.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/conv_hifi.cc
@@ -1,4 +1,4 @@
-/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -13,14 +13,14 @@
 limitations under the License.
 ==============================================================================*/
 
-#if defined(HIFI4) || defined(HIFI5)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 
 #include <cstdint>
 
 #include "tensorflow/lite/c/builtin_op_data.h"
 #include "tensorflow/lite/c/common.h"
 #include "tensorflow/lite/kernels/internal/common.h"
-#include "tensorflow/lite/kernels/internal/reference/integer_ops/conv.h"
+#include "tensorflow/lite/kernels/internal/portable_tensor_utils.h"
 #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
 #include "tensorflow/lite/kernels/kernel_util.h"
 #include "tensorflow/lite/micro/kernels/conv.h"
@@ -39,17 +39,43 @@
   // Calculate scratch memory requirements and request scratch buffer
   TfLiteTensor* output =
       micro_context->AllocateTempOutputTensor(node, kConvOutputTensor);
-  TF_LITE_ENSURE(context, output != nullptr);
   TfLiteTensor* input =
       micro_context->AllocateTempInputTensor(node, kConvInputTensor);
-  TF_LITE_ENSURE(context, input != nullptr);
   TfLiteTensor* filter =
       micro_context->AllocateTempInputTensor(node, kConvWeightsTensor);
-  TF_LITE_ENSURE(context, filter != nullptr);
+  TfLiteTensor* bias =
+      micro_context->AllocateTempInputTensor(node, kConvBiasTensor);
 
   const RuntimeShape& input_shape = GetTensorShape(input);
   const RuntimeShape& filter_shape = GetTensorShape(filter);
   const RuntimeShape& output_shape = GetTensorShape(output);
+
+  // Check if the Xtensa optimized code can be used
+  // HIFI4 and HIFI5 do not allow bias data pointer to be nullptr
+  /* TODO(b/277112516): Dilation is currently not supported on HiFi 4 NN Library
+   */
+  bool inputs_and_bias_ok = bias != nullptr;
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
+  inputs_and_bias_ok =
+      inputs_and_bias_ok &&
+      (input->type == kTfLiteInt8 ||
+       (input->type == kTfLiteInt16 && bias->type == kTfLiteInt64));
+#else
+  inputs_and_bias_ok = inputs_and_bias_ok && (input->type == kTfLiteInt8);
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
+  if (!(inputs_and_bias_ok && params->dilation_width_factor == 1 &&
+        params->dilation_height_factor == 1 &&
+        input_shape.Dims(1) >= filter_shape.Dims(1) &&
+        input_shape.Dims(2) >= filter_shape.Dims(2))) {
+    micro_context->DeallocateTempTfLiteTensor(input);
+    micro_context->DeallocateTempTfLiteTensor(filter);
+    micro_context->DeallocateTempTfLiteTensor(output);
+    if (bias != nullptr) {
+      micro_context->DeallocateTempTfLiteTensor(bias);
+    }
+    return kTfLiteOk;
+  }
+
   const int input_height = input_shape.Dims(1);
   const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3);
   const int filter_height = filter_shape.Dims(1);
@@ -60,7 +86,7 @@
   const int pad_height = data->reference_op_data.padding.height;
 
   int required_scratch = 0;
-  // Dilation is currently not supported on HiFi 4 NN Library
+  // TODO(b/277112516): Dilation is currently not supported on HiFi 4 NN Library
   if ((params->dilation_width_factor == 1) &&
       (params->dilation_height_factor == 1)) {
     if (input->type == kTfLiteInt8) {
@@ -83,245 +109,220 @@
   micro_context->DeallocateTempTfLiteTensor(input);
   micro_context->DeallocateTempTfLiteTensor(filter);
   micro_context->DeallocateTempTfLiteTensor(output);
+  if (bias != nullptr) {
+    micro_context->DeallocateTempTfLiteTensor(bias);
+  }
   return kTfLiteOk;
 }
 
-#if defined(HIFI4)
-TfLiteStatus ConvEvalHifi16(TfLiteContext* context, TfLiteNode* node,
-                            const TfLiteConvParams& params,
-                            const XtensaConvOpData& data,
-                            const TfLiteEvalTensor* input,
-                            const TfLiteEvalTensor* filter,
-                            const TfLiteEvalTensor* bias,
-                            TfLiteEvalTensor* output) {
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
+TfLiteStatus ConvEvalHifiInt16(TfLiteContext* context, TfLiteNode* node,
+                               const TfLiteConvParams& params,
+                               const XtensaConvOpData& data,
+                               const TfLiteEvalTensor* input,
+                               const TfLiteEvalTensor* filter,
+                               const TfLiteEvalTensor* bias,
+                               TfLiteEvalTensor* output) {
   const RuntimeShape& input_shape = tflite::micro::GetTensorShape(input);
   const RuntimeShape& filter_shape = tflite::micro::GetTensorShape(filter);
-  /* TODO(b/277112516):Dilation is currently not supported on HiFi 4 NN Library
-   */
-  if ((params.dilation_width_factor == 1) &&
-      (params.dilation_height_factor == 1) &&
-      input_shape.Dims(1) >= filter_shape.Dims(1) &&
-      input_shape.Dims(2) >= filter_shape.Dims(2)) {
-    const int stride_width = params.stride_width;
-    const int stride_height = params.stride_height;
-    const int pad_width = data.reference_op_data.padding.width;
-    const int pad_height = data.reference_op_data.padding.height;
-    const int32_t output_activation_min =
-        data.reference_op_data.output_activation_min;
-    const int32_t output_activation_max =
-        data.reference_op_data.output_activation_max;
+  const int stride_width = params.stride_width;
+  const int stride_height = params.stride_height;
+  const int pad_width = data.reference_op_data.padding.width;
+  const int pad_height = data.reference_op_data.padding.height;
+  const int32_t output_activation_min =
+      data.reference_op_data.output_activation_min;
+  const int32_t output_activation_max =
+      data.reference_op_data.output_activation_max;
 
-    const RuntimeShape& output_shape = tflite::micro::GetTensorShape(output);
-    const int batches = MatchingDim(input_shape, 0, output_shape, 0);
-    const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3);
-    const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3);
-    const int input_height = input_shape.Dims(1);
-    const int input_width = input_shape.Dims(2);
-    const int filter_height = filter_shape.Dims(1);
-    const int filter_width = filter_shape.Dims(2);
-    const int output_height = output_shape.Dims(1);
-    const int output_width = output_shape.Dims(2);
+  const RuntimeShape& output_shape = tflite::micro::GetTensorShape(output);
+  const int batches = MatchingDim(input_shape, 0, output_shape, 0);
+  const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3);
+  const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3);
+  const int input_height = input_shape.Dims(1);
+  const int input_width = input_shape.Dims(2);
+  const int filter_height = filter_shape.Dims(1);
+  const int filter_width = filter_shape.Dims(2);
+  const int output_height = output_shape.Dims(1);
+  const int output_width = output_shape.Dims(2);
 
-    const int16_t* input_data = tflite::micro::GetTensorData<int16_t>(input);
-    const int8_t* filter_data = tflite::micro::GetTensorData<int8_t>(filter);
-    const int64_t* bias_data = tflite::micro::GetTensorData<int64_t>(bias);
-    int16_t* output_data = tflite::micro::GetTensorData<int16_t>(output);
+  const int16_t* input_data = tflite::micro::GetTensorData<int16_t>(input);
+  const int8_t* filter_data = tflite::micro::GetTensorData<int8_t>(filter);
+  const int64_t* bias_data = tflite::micro::GetTensorData<int64_t>(bias);
+  int16_t* output_data = tflite::micro::GetTensorData<int16_t>(output);
 
-    int output_data_format = 0;
-    int out_length = output_height * output_width * output_depth;
-    if (filter_height == 1 && filter_width == 1) {
-      for (int batch = 0; batch < batches; ++batch) {
-        int16_t* p_out_temp;
-        p_out_temp = &output_data[batch * out_length];
+  int output_data_format = 0;
+  int out_length = output_height * output_width * output_depth;
+  if (filter_height == 1 && filter_width == 1) {
+    for (int batch = 0; batch < batches; ++batch) {
+      int16_t* p_out_temp;
+      p_out_temp = &output_data[batch * out_length];
 
+      TF_LITE_ENSURE_EQ(
+          context,
+          xa_nn_conv2d_pointwise_per_chan_sym8sxsym16s(
+              p_out_temp, const_cast<WORD8*>(filter_data),
+              const_cast<WORD16*>(&input_data[batch * input_height *
+                                              input_width * input_depth]),
+              const_cast<WORD64*>(bias_data), input_height, input_width,
+              input_depth, output_depth, 0,
+              data.reference_op_data.per_channel_output_multiplier,
+              data.reference_op_data.per_channel_output_shift, 0,
+              output_data_format),
+          0);
+
+      TF_LITE_ENSURE_EQ(context,
+                        xa_nn_vec_activation_min_max_16_16(
+                            p_out_temp, p_out_temp, output_activation_min,
+                            output_activation_max, out_length),
+                        0);
+    }
+  } else {
+    void* p_scratch = static_cast<void*>(
+        context->GetScratchBuffer(context, data.scratch_tensor_index));
+
+    for (int batch = 0; batch < batches; ++batch) {
+      int16_t* p_out_temp;
+      p_out_temp = &output_data[batch * out_length];
+
+      {
         TF_LITE_ENSURE_EQ(
             context,
-            xa_nn_conv2d_pointwise_per_chan_sym8sxsym16s(
-                p_out_temp, const_cast<WORD8*>(filter_data),
-                const_cast<WORD16*>(&input_data[batch * input_height *
-                                                input_width * input_depth]),
-                const_cast<WORD64*>(bias_data), input_height, input_width,
-                input_depth, output_depth, 0,
+            xa_nn_conv2d_std_per_chan_sym8sxsym16s(
+                p_out_temp,
+                &input_data[batch * input_height * input_width * input_depth],
+                const_cast<int8_t*>(filter_data),  // filter_data,
+                bias_data, input_height, input_width, input_depth,
+                filter_height, filter_width, output_depth, stride_width,
+                stride_height, pad_width, pad_height, output_height,
+                output_width, 0,
                 data.reference_op_data.per_channel_output_multiplier,
                 data.reference_op_data.per_channel_output_shift, 0,
-                output_data_format),
+                output_data_format, static_cast<void*>(p_scratch)),
             0);
-
-        TF_LITE_ENSURE_EQ(context,
-                          xa_nn_vec_activation_min_max_16_16(
-                              p_out_temp, p_out_temp, output_activation_min,
-                              output_activation_max, out_length),
-                          0);
       }
-    } else {
-      void* p_scratch = static_cast<void*>(
-          context->GetScratchBuffer(context, data.scratch_tensor_index));
-
-      for (int batch = 0; batch < batches; ++batch) {
-        int16_t* p_out_temp;
-        p_out_temp = &output_data[batch * out_length];
-
-        {
-          TF_LITE_ENSURE_EQ(
-              context,
-              xa_nn_conv2d_std_per_chan_sym8sxsym16s(
-                  p_out_temp,
-                  &input_data[batch * input_height * input_width * input_depth],
-                  const_cast<int8_t*>(filter_data),  // filter_data,
-                  bias_data, input_height, input_width, input_depth,
-                  filter_height, filter_width, output_depth, stride_width,
-                  stride_height, pad_width, pad_height, output_height,
-                  output_width, 0,
-                  data.reference_op_data.per_channel_output_multiplier,
-                  data.reference_op_data.per_channel_output_shift, 0,
-                  output_data_format, static_cast<void*>(p_scratch)),
-              0);
-        }
-        TF_LITE_ENSURE_EQ(context,
-                          xa_nn_vec_activation_min_max_16_16(
-                              p_out_temp, p_out_temp, output_activation_min,
-                              output_activation_max, out_length),
-                          0);
-      }
+      TF_LITE_ENSURE_EQ(context,
+                        xa_nn_vec_activation_min_max_16_16(
+                            p_out_temp, p_out_temp, output_activation_min,
+                            output_activation_max, out_length),
+                        0);
     }
-    return kTfLiteOk;
   }
-  reference_integer_ops::ConvPerChannel(
-      ConvParamsQuantized(params, data.reference_op_data),
-      data.reference_op_data.per_channel_output_multiplier,
-      data.reference_op_data.per_channel_output_shift,
-      tflite::micro::GetTensorShape(input),
-      tflite::micro::GetTensorData<int16_t>(input),
-      tflite::micro::GetTensorShape(filter),
-      tflite::micro::GetTensorData<int8_t>(filter),
-      tflite::micro::GetTensorShape(bias),
-      tflite::micro::GetTensorData<int64_t>(bias),
-      tflite::micro::GetTensorShape(output),
-      tflite::micro::GetTensorData<int16_t>(output));
+
   return kTfLiteOk;
 }
-#endif  // defined(HIFI4)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 
-TfLiteStatus ConvEvalHifi(TfLiteContext* context, TfLiteNode* node,
-                          const TfLiteConvParams& params,
-                          const XtensaConvOpData& data,
-                          const TfLiteEvalTensor* input,
-                          const TfLiteEvalTensor* filter,
-                          const TfLiteEvalTensor* bias,
-                          TfLiteEvalTensor* output) {
+TfLiteStatus ConvEvalHifiInt8(TfLiteContext* context, TfLiteNode* node,
+                              const TfLiteConvParams& params,
+                              const XtensaConvOpData& data,
+                              const TfLiteEvalTensor* input,
+                              const TfLiteEvalTensor* filter,
+                              const TfLiteEvalTensor* bias,
+                              TfLiteEvalTensor* output) {
   const RuntimeShape& input_shape = tflite::micro::GetTensorShape(input);
   const RuntimeShape& filter_shape = tflite::micro::GetTensorShape(filter);
-  /* TODO(b/277112516):Dilation is currently not supported on HiFi 4 NN
-  Library */
-  if ((params.dilation_width_factor == 1) &&
-      (params.dilation_height_factor == 1) &&
-      input_shape.Dims(1) >= filter_shape.Dims(1) &&
-      input_shape.Dims(2) >= filter_shape.Dims(2)) {
-    const int32_t input_offset = -data.reference_op_data.input_zero_point;
-    const int32_t output_offset = data.reference_op_data.output_zero_point;
-    const int stride_width = params.stride_width;
-    const int stride_height = params.stride_height;
-    const int pad_width = data.reference_op_data.padding.width;
-    const int pad_height = data.reference_op_data.padding.height;
-    const int32_t output_activation_min =
-        data.reference_op_data.output_activation_min;
-    const int32_t output_activation_max =
-        data.reference_op_data.output_activation_max;
+  const int32_t input_offset = -data.reference_op_data.input_zero_point;
+  const int32_t output_offset = data.reference_op_data.output_zero_point;
+  const int stride_width = params.stride_width;
+  const int stride_height = params.stride_height;
+  const int pad_width = data.reference_op_data.padding.width;
+  const int pad_height = data.reference_op_data.padding.height;
+  const int32_t output_activation_min =
+      data.reference_op_data.output_activation_min;
+  const int32_t output_activation_max =
+      data.reference_op_data.output_activation_max;
 
-    const RuntimeShape& output_shape = tflite::micro::GetTensorShape(output);
-    const int batches = MatchingDim(input_shape, 0, output_shape, 0);
-    const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3);
-    const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3);
-    const int input_height = input_shape.Dims(1);
-    const int input_width = input_shape.Dims(2);
-    const int filter_height = filter_shape.Dims(1);
-    const int filter_width = filter_shape.Dims(2);
-    const int output_height = output_shape.Dims(1);
-    const int output_width = output_shape.Dims(2);
+  const RuntimeShape& output_shape = tflite::micro::GetTensorShape(output);
+  const int batches = MatchingDim(input_shape, 0, output_shape, 0);
+  const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3);
+  const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3);
+  const int input_height = input_shape.Dims(1);
+  const int input_width = input_shape.Dims(2);
+  const int filter_height = filter_shape.Dims(1);
+  const int filter_width = filter_shape.Dims(2);
+  const int output_height = output_shape.Dims(1);
+  const int output_width = output_shape.Dims(2);
 
-    const int8_t* input_data = tflite::micro::GetTensorData<int8_t>(input);
-    const int8_t* filter_data = tflite::micro::GetTensorData<int8_t>(filter);
-    const int32_t* bias_data = tflite::micro::GetTensorData<int32_t>(bias);
-    int8_t* output_data = tflite::micro::GetTensorData<int8_t>(output);
+  const int8_t* input_data = tflite::micro::GetTensorData<int8_t>(input);
+  const int32_t* bias_data = tflite::micro::GetTensorData<int32_t>(bias);
+  int8_t* output_data = tflite::micro::GetTensorData<int8_t>(output);
 
-    int output_data_format = 0;
-    int out_length = output_height * output_width * output_depth;
-
-    if (filter_height == 1 && filter_width == 1) {
-      for (int batch = 0; batch < batches; ++batch) {
-        int8_t* p_out_temp;
-        p_out_temp = &output_data[batch * out_length];
-
-        TF_LITE_ENSURE_EQ(
-            context,
-
-            xa_nn_conv2d_pointwise_per_chan_sym8sxasym8s(
-                p_out_temp, const_cast<WORD8*>(filter_data),
-                const_cast<WORD8*>(&input_data[batch * input_height *
-                                               input_width * input_depth]),
-                const_cast<WORD32*>(bias_data), input_height, input_width,
-                input_depth, output_depth, input_offset,
-                data.reference_op_data.per_channel_output_multiplier,
-                data.reference_op_data.per_channel_output_shift, output_offset,
-                output_data_format),
-            0);
-
-        TF_LITE_ENSURE_EQ(context,
-                          xa_nn_vec_activation_min_max_8_8(
-                              p_out_temp, p_out_temp, output_activation_min,
-                              output_activation_max, out_length),
-                          0);
-      }
-    } else {
-      void* p_scratch = static_cast<void*>(
-          context->GetScratchBuffer(context, data.scratch_tensor_index));
-
-      for (int batch = 0; batch < batches; ++batch) {
-        int8_t* p_out_temp;
-        p_out_temp = &output_data[batch * out_length];
-
-        {
-          TF_LITE_ENSURE_EQ(
-              context,
-              xa_nn_conv2d_std_per_chan_sym8sxasym8s(
-                  p_out_temp,
-                  &input_data[batch * input_height * input_width * input_depth],
-                  const_cast<int8_t*>(filter_data),  // filter_data,
-                  bias_data, input_height, input_width, input_depth,
-                  filter_height, filter_width, output_depth, stride_width,
-                  stride_height, pad_width, pad_height, output_height,
-                  output_width, input_offset,
-                  data.reference_op_data.per_channel_output_multiplier,
-                  data.reference_op_data.per_channel_output_shift,
-                  output_offset, output_data_format,
-                  static_cast<void*>(p_scratch)),
-              0);
-        }
-
-        TF_LITE_ENSURE_EQ(context,
-                          xa_nn_vec_activation_min_max_8_8(
-                              p_out_temp, p_out_temp, output_activation_min,
-                              output_activation_max, out_length),
-                          0);
-      }
-    }
-    return kTfLiteOk;
+  const int8_t* filter_data;
+  if (filter->type == kTfLiteInt4) {
+    int8_t* unpacked_filter_data =
+        static_cast<int8_t*>(context->GetScratchBuffer(
+            context, data.reference_op_data.filter_buffer_index));
+    tflite::tensor_utils::UnpackDenseInt4IntoInt8(
+        tflite::micro::GetTensorData<int8_t>(filter),
+        tflite::micro::GetTensorShape(filter).FlatSize(), unpacked_filter_data);
+    filter_data = unpacked_filter_data;
+  } else {
+    filter_data = tflite::micro::GetTensorData<int8_t>(filter);
   }
 
-  reference_integer_ops::ConvPerChannel(
-      ConvParamsQuantized(params, data.reference_op_data),
-      data.reference_op_data.per_channel_output_multiplier,
-      data.reference_op_data.per_channel_output_shift,
-      tflite::micro::GetTensorShape(input),
-      tflite::micro::GetTensorData<int8_t>(input),
-      tflite::micro::GetTensorShape(filter),
-      tflite::micro::GetTensorData<int8_t>(filter),
-      tflite::micro::GetTensorShape(bias),
-      tflite::micro::GetTensorData<int32_t>(bias),
-      tflite::micro::GetTensorShape(output),
-      tflite::micro::GetTensorData<int8_t>(output));
+  int output_data_format = 0;
+  int out_length = output_height * output_width * output_depth;
+
+  if (filter_height == 1 && filter_width == 1) {
+    for (int batch = 0; batch < batches; ++batch) {
+      int8_t* p_out_temp;
+      p_out_temp = &output_data[batch * out_length];
+
+      TF_LITE_ENSURE_EQ(
+          context,
+
+          xa_nn_conv2d_pointwise_per_chan_sym8sxasym8s(
+              p_out_temp, const_cast<WORD8*>(filter_data),
+              const_cast<WORD8*>(&input_data[batch * input_height *
+                                             input_width * input_depth]),
+              const_cast<WORD32*>(bias_data), input_height, input_width,
+              input_depth, output_depth, input_offset,
+              data.reference_op_data.per_channel_output_multiplier,
+              data.reference_op_data.per_channel_output_shift, output_offset,
+              output_data_format),
+          0);
+
+      TF_LITE_ENSURE_EQ(context,
+                        xa_nn_vec_activation_min_max_8_8(
+                            p_out_temp, p_out_temp, output_activation_min,
+                            output_activation_max, out_length),
+                        0);
+    }
+  } else {
+    void* p_scratch = static_cast<void*>(
+        context->GetScratchBuffer(context, data.scratch_tensor_index));
+
+    for (int batch = 0; batch < batches; ++batch) {
+      int8_t* p_out_temp;
+      p_out_temp = &output_data[batch * out_length];
+
+      {
+        TF_LITE_ENSURE_EQ(
+            context,
+            xa_nn_conv2d_std_per_chan_sym8sxasym8s(
+                p_out_temp,
+                &input_data[batch * input_height * input_width * input_depth],
+                const_cast<int8_t*>(filter_data),  // filter_data,
+                bias_data, input_height, input_width, input_depth,
+                filter_height, filter_width, output_depth, stride_width,
+                stride_height, pad_width, pad_height, output_height,
+                output_width, input_offset,
+                data.reference_op_data.per_channel_output_multiplier,
+                data.reference_op_data.per_channel_output_shift, output_offset,
+                output_data_format, static_cast<void*>(p_scratch)),
+            0);
+      }
+
+      TF_LITE_ENSURE_EQ(context,
+                        xa_nn_vec_activation_min_max_8_8(
+                            p_out_temp, p_out_temp, output_activation_min,
+                            output_activation_max, out_length),
+                        0);
+    }
+  }
+
   return kTfLiteOk;
 }
 
 }  // namespace tflite
-#endif  // defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
diff --git a/tensorflow/lite/micro/kernels/xtensa/conv_int16_reference.cc b/tensorflow/lite/micro/kernels/xtensa/conv_int16_reference.cc
index 0d3c4a3..2492d4b 100644
--- a/tensorflow/lite/micro/kernels/xtensa/conv_int16_reference.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/conv_int16_reference.cc
@@ -1,4 +1,4 @@
-/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -23,16 +23,9 @@
 #include "tensorflow/lite/kernels/padding.h"
 #include "tensorflow/lite/micro/kernels/conv.h"
 #include "tensorflow/lite/micro/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/micro_log.h"
 
 namespace tflite {
-namespace {
-
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
-  TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
-  return context->AllocatePersistentBuffer(context, sizeof(OpDataConv));
-}
-
-}  // namespace.
 
 TfLiteStatus ConvReferenceEvalInt16(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
@@ -52,25 +45,37 @@
           ? tflite::micro::GetEvalInput(context, node, kConvBiasTensor)
           : nullptr;
 
-  reference_integer_ops::ConvPerChannel(
-      ConvParamsQuantized(params, op_data),
-      op_data.per_channel_output_multiplier, op_data.per_channel_output_shift,
-      tflite::micro::GetTensorShape(input),
-      tflite::micro::GetTensorData<int16_t>(input),
-      tflite::micro::GetTensorShape(filter),
-      tflite::micro::GetTensorData<int8_t>(filter),
-      tflite::micro::GetTensorShape(bias),
-      tflite::micro::GetTensorData<std::int64_t>(bias),
-      tflite::micro::GetTensorShape(output),
-      tflite::micro::GetTensorData<int16_t>(output));
-  return kTfLiteOk;
-}
+  if (bias == nullptr || bias->type == kTfLiteInt32) {
+    reference_integer_ops::ConvPerChannel(
+        ConvParamsQuantized(params, op_data),
+        op_data.per_channel_output_multiplier, op_data.per_channel_output_shift,
+        tflite::micro::GetTensorShape(input),
+        tflite::micro::GetTensorData<int16_t>(input),
+        tflite::micro::GetTensorShape(filter),
+        tflite::micro::GetTensorData<int8_t>(filter),
+        tflite::micro::GetTensorShape(bias),
+        tflite::micro::GetOptionalTensorData<std::int32_t>(bias),
+        tflite::micro::GetTensorShape(output),
+        tflite::micro::GetTensorData<int16_t>(output));
+  } else if (bias->type == kTfLiteInt64) {
+    reference_integer_ops::ConvPerChannel(
+        ConvParamsQuantized(params, op_data),
+        op_data.per_channel_output_multiplier, op_data.per_channel_output_shift,
+        tflite::micro::GetTensorShape(input),
+        tflite::micro::GetTensorData<int16_t>(input),
+        tflite::micro::GetTensorShape(filter),
+        tflite::micro::GetTensorData<int8_t>(filter),
+        tflite::micro::GetTensorShape(bias),
+        tflite::micro::GetOptionalTensorData<std::int64_t>(bias),
+        tflite::micro::GetTensorShape(output),
+        tflite::micro::GetTensorData<int16_t>(output));
+  } else {
+    MicroPrintf("Bias type %s (%d) not supported.",
+                TfLiteTypeGetName(bias->type), bias->type);
+    return kTfLiteError;
+  }
 
-// TODO(b/189981943): This variant can be used for a smaller binary
-// since the optimized conv implementation currently adds a lot to
-// the binary size (~30KB to text section).
-TFLMRegistration Register_CONV_2D_INT16REF() {
-  return tflite::micro::RegisterOp(Init, ConvPrepare, ConvReferenceEvalInt16);
+  return kTfLiteOk;
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/xtensa/conv_int8_int16.cc b/tensorflow/lite/micro/kernels/xtensa/conv_int8_int16.cc
new file mode 100644
index 0000000..ed64f01
--- /dev/null
+++ b/tensorflow/lite/micro/kernels/xtensa/conv_int8_int16.cc
@@ -0,0 +1,89 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/lite/c/builtin_op_data.h"
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/kernels/internal/common.h"
+#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
+#include "tensorflow/lite/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/kernels/xtensa/xtensa.h"
+#include "tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h"
+
+namespace tflite {
+namespace {
+
+TfLiteStatus EvalInt8(TfLiteContext* context, TfLiteNode* node) {
+#if defined(HIFIMINI)
+  return ConvReferenceEvalInt8(context, node);
+#else
+  const auto& op_data = *(reinterpret_cast<XtensaConvOpData*>(node->user_data));
+  const auto& params =
+      *(reinterpret_cast<TfLiteConvParams*>(node->builtin_data));
+
+  const TfLiteEvalTensor* input =
+      tflite::micro::GetEvalInput(context, node, kConvInputTensor);
+  TfLiteEvalTensor* output =
+      tflite::micro::GetEvalOutput(context, node, kConvOutputTensor);
+  const TfLiteEvalTensor* filter =
+      tflite::micro::GetEvalInput(context, node, kConvWeightsTensor);
+  const TfLiteEvalTensor* bias =
+      tflite::micro::GetEvalInput(context, node, kConvBiasTensor);
+
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
+  return ConvEvalHifiInt8(context, node, params, op_data, input, filter, bias,
+                          output);
+#elif defined(VISION_P6)
+  return ConvEvalVision(context, node, params, op_data, input, filter, bias,
+                        output);
+#endif
+
+#endif  // defined(HIFIMINI)
+}
+
+TfLiteStatus EvalInt16(TfLiteContext* context, TfLiteNode* node) {
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
+  const auto& op_data = *(reinterpret_cast<XtensaConvOpData*>(node->user_data));
+  const auto& params =
+      *(reinterpret_cast<TfLiteConvParams*>(node->builtin_data));
+
+  const TfLiteEvalTensor* input =
+      tflite::micro::GetEvalInput(context, node, kConvInputTensor);
+  TfLiteEvalTensor* output =
+      tflite::micro::GetEvalOutput(context, node, kConvOutputTensor);
+  const TfLiteEvalTensor* filter =
+      tflite::micro::GetEvalInput(context, node, kConvWeightsTensor);
+  const TfLiteEvalTensor* bias =
+      tflite::micro::GetEvalInput(context, node, kConvBiasTensor);
+
+  return ConvEvalHifiInt16(context, node, params, op_data, input, filter, bias,
+                           output);
+#else
+  return ConvReferenceEvalInt16(context, node);
+#endif
+}
+
+}  // namespace
+
+TFLMRegistration Register_CONV_2D_INT8() {
+  return tflite::micro::RegisterOp(ConvInitXtensa, ConvPrepareXtensa, EvalInt8);
+}
+
+TFLMRegistration Register_CONV_2D_INT16() {
+  return tflite::micro::RegisterOp(ConvInitXtensa, ConvPrepareXtensa,
+                                   EvalInt16);
+}
+
+}  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/xtensa/conv_int8_reference.cc b/tensorflow/lite/micro/kernels/xtensa/conv_int8_reference.cc
index 80a42d9..6ac07ba 100644
--- a/tensorflow/lite/micro/kernels/xtensa/conv_int8_reference.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/conv_int8_reference.cc
@@ -1,4 +1,4 @@
-/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -16,6 +16,7 @@
 #include "tensorflow/lite/c/builtin_op_data.h"
 #include "tensorflow/lite/c/common.h"
 #include "tensorflow/lite/kernels/internal/common.h"
+#include "tensorflow/lite/kernels/internal/portable_tensor_utils.h"
 #include "tensorflow/lite/kernels/internal/quantization_util.h"
 #include "tensorflow/lite/kernels/internal/reference/integer_ops/conv.h"
 #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
@@ -25,14 +26,6 @@
 #include "tensorflow/lite/micro/kernels/kernel_util.h"
 
 namespace tflite {
-namespace {
-
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
-  TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
-  return context->AllocatePersistentBuffer(context, sizeof(OpDataConv));
-}
-
-}  // namespace.
 
 TfLiteStatus ConvReferenceEvalInt8(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
@@ -52,17 +45,29 @@
           ? tflite::micro::GetEvalInput(context, node, kConvBiasTensor)
           : nullptr;
 
+  const int8_t* filter_data;
+  if (filter->type == kTfLiteInt4) {
+    int8_t* unpacked_filter_data = static_cast<int8_t*>(
+        context->GetScratchBuffer(context, op_data.filter_buffer_index));
+    tflite::tensor_utils::UnpackDenseInt4IntoInt8(
+        tflite::micro::GetTensorData<int8_t>(filter),
+        tflite::micro::GetTensorShape(filter).FlatSize(), unpacked_filter_data);
+    filter_data = unpacked_filter_data;
+  } else {
+    filter_data = tflite::micro::GetTensorData<int8_t>(filter);
+  }
+
   reference_integer_ops::ConvPerChannel(
       ConvParamsQuantized(params, op_data),
       op_data.per_channel_output_multiplier, op_data.per_channel_output_shift,
       tflite::micro::GetTensorShape(input),
       tflite::micro::GetTensorData<int8_t>(input),
-      tflite::micro::GetTensorShape(filter),
-      tflite::micro::GetTensorData<int8_t>(filter),
+      tflite::micro::GetTensorShape(filter), filter_data,
       tflite::micro::GetTensorShape(bias),
-      tflite::micro::GetTensorData<int32_t>(bias),
+      tflite::micro::GetOptionalTensorData<int32_t>(bias),
       tflite::micro::GetTensorShape(output),
       tflite::micro::GetTensorData<int8_t>(output));
+
   return kTfLiteOk;
 }
 
@@ -70,7 +75,8 @@
 // since the optimized conv implementation currently adds a lot to
 // the binary size (~30KB to text section).
 TFLMRegistration Register_CONV_2D_INT8REF() {
-  return tflite::micro::RegisterOp(Init, ConvPrepare, ConvReferenceEvalInt8);
+  return tflite::micro::RegisterOp(ConvInit, ConvPrepare,
+                                   ConvReferenceEvalInt8);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/xtensa/conv_vision.cc b/tensorflow/lite/micro/kernels/xtensa/conv_vision.cc
index e4f0d49..812ab60 100644
--- a/tensorflow/lite/micro/kernels/xtensa/conv_vision.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/conv_vision.cc
@@ -1,4 +1,4 @@
-/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -21,6 +21,7 @@
 #include "tensorflow/lite/c/common.h"
 #include "tensorflow/lite/kernels/internal/common.h"
 #include "tensorflow/lite/kernels/internal/portable_tensor_utils.h"
+#include "tensorflow/lite/kernels/internal/reference/integer_ops/conv.h"
 #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
 #include "tensorflow/lite/kernels/kernel_util.h"
 #include "tensorflow/lite/micro/kernels/conv.h"
@@ -32,29 +33,22 @@
 namespace tflite {
 
 TfLiteStatus ConvPrepareVision(TfLiteContext* context, TfLiteNode* node) {
-  TFLITE_DCHECK(node->user_data != nullptr);
-  TFLITE_DCHECK(node->builtin_data != nullptr);
+  MicroContext* micro_context = GetMicroContext(context);
+  TfLiteTensor* input =
+      micro_context->AllocateTempInputTensor(node, kConvInputTensor);
+  TfLiteTensor* bias =
+      micro_context->AllocateTempInputTensor(node, kConvBiasTensor);
+  const uint32_t input_height = SizeOfDimension(input, 1);
+  const uint32_t input_width = SizeOfDimension(input, 2);
 
   XtensaConvOpData* data = reinterpret_cast<XtensaConvOpData*>(node->user_data);
   const auto& params =
       *(reinterpret_cast<const TfLiteConvParams*>(node->builtin_data));
 
-  MicroContext* micro_context = GetMicroContext(context);
   TfLiteTensor* output =
       micro_context->AllocateTempOutputTensor(node, kConvOutputTensor);
-  TF_LITE_ENSURE(context, output != nullptr);
-  TfLiteTensor* input =
-      micro_context->AllocateTempInputTensor(node, kConvInputTensor);
-  TF_LITE_ENSURE(context, input != nullptr);
   TfLiteTensor* filter =
       micro_context->AllocateTempInputTensor(node, kConvWeightsTensor);
-  TF_LITE_ENSURE(context, filter != nullptr);
-  TfLiteTensor* bias =
-      micro_context->AllocateTempInputTensor(node, kConvBiasTensor);
-  TF_LITE_ENSURE(context, bias != nullptr);
-
-  const uint32_t input_height = SizeOfDimension(input, 1);
-  const uint32_t input_width = SizeOfDimension(input, 2);
 
   const uint32_t output_height = SizeOfDimension(output, 1);
   const uint32_t output_width = SizeOfDimension(output, 2);
@@ -62,6 +56,15 @@
   const uint32_t filter_height = SizeOfDimension(filter, 1);
   const uint32_t filter_width = SizeOfDimension(filter, 2);
 
+  // At this time it is unclear if per channel quantization is correctly
+  // supported by the optimized vision P6 implementation or not. For now, we are
+  // manually adding a flag to switch to the reference implementation for
+  // per-channel conv.
+  // See http://b/270720625 for more details.
+  data->is_per_channel_quantized =
+      reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params)
+          ->scale->size > 1;
+
   // Dynamically allocate per-channel quantization parameters.
   const int num_channels = SizeOfDimension(filter, kConvQuantizedDimension);
   data->per_channel_output_shift_int8 = static_cast<int8_t*>(
@@ -97,7 +100,6 @@
     tflite::tensor_utils::UnpackDenseInt4IntoInt8(
         GetTensorData<int8_t>(filter), GetTensorShape(filter).FlatSize(),
         GetTensorData<int8_t>(&filter_int8));
-
   } else {
     filter_int8 = *filter;
   }
@@ -142,14 +144,17 @@
   if (status) {
     return kTfLiteError;
   }
+
   if (filter->type == kTfLiteInt4) {
     micro_context->DeallocateTempBuffer(GetTensorData<uint8_t>(&filter_int8));
   }
+
   micro_context->DeallocateTempTfLiteTensor(output);
   micro_context->DeallocateTempTfLiteTensor(input);
   micro_context->DeallocateTempTfLiteTensor(filter);
-  micro_context->DeallocateTempTfLiteTensor(bias);
-
+  if (bias != nullptr) {
+    micro_context->DeallocateTempTfLiteTensor(bias);
+  }
   return kTfLiteOk;
 }
 
@@ -170,7 +175,9 @@
          data.reorder_coefficient_bias, data.reorder_coefficient_bias_size,
          data.reference_op_data.per_channel_output_multiplier,
          data.per_channel_output_shift_int8, num_channels);
+
   return kTfLiteOk;
 }
+
 }  // namespace tflite
 #endif  // defined(VISION_P6)
diff --git a/tensorflow/lite/micro/kernels/xtensa/depthwise_conv.cc b/tensorflow/lite/micro/kernels/xtensa/depthwise_conv.cc
index f3acb8d..8536ff7 100644
--- a/tensorflow/lite/micro/kernels/xtensa/depthwise_conv.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/depthwise_conv.cc
@@ -61,9 +61,9 @@
   }
   micro_context->DeallocateTempTfLiteTensor(input);
 
-#if defined(HIFI4) || defined(HIFI5)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
   TF_LITE_ENSURE_OK(context, DepthwiseConvPrepareHifi(context, node));
-#endif  // defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 
 #if defined(VISION_P6)
   TF_LITE_ENSURE_OK(context, DepthwiseConvPrepareVision(context, node));
@@ -97,7 +97,7 @@
     case kTfLiteInt8: {
       switch (filter_int8.type) {
         case kTfLiteInt8: {
-#if defined(HIFI4) || defined(HIFI5)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
           DepthwiseConvEvalHifi(context, node, params, op_data, input,
                                 &filter_int8, bias, output);
 #elif defined(VISION_P6)
@@ -116,7 +116,7 @@
               tflite::micro::GetOptionalTensorData<int32_t>(bias),
               tflite::micro::GetTensorShape(output),
               tflite::micro::GetTensorData<int8_t>(output));
-#endif  // defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
           break;
         }
         default:
diff --git a/tensorflow/lite/micro/kernels/xtensa/depthwise_conv_hifi.cc b/tensorflow/lite/micro/kernels/xtensa/depthwise_conv_hifi.cc
index 05dab48..8c2052b 100644
--- a/tensorflow/lite/micro/kernels/xtensa/depthwise_conv_hifi.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/depthwise_conv_hifi.cc
@@ -28,7 +28,7 @@
 #include "tensorflow/lite/micro/kernels/xtensa/xtensa.h"
 #include "tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h"
 
-#if defined(HIFI4) || defined(HIFI5)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 namespace tflite {
 TfLiteStatus DepthwiseConvPrepareHifi(TfLiteContext* context,
                                       TfLiteNode* node) {
@@ -187,4 +187,4 @@
   return kTfLiteOk;
 }
 }  // namespace tflite
-#endif  // defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFI3) ||defined(HIFI4) || defined(HIFI5)
diff --git a/tensorflow/lite/micro/kernels/xtensa/dequantize.cc b/tensorflow/lite/micro/kernels/xtensa/dequantize.cc
new file mode 100644
index 0000000..f2f4a7d
--- /dev/null
+++ b/tensorflow/lite/micro/kernels/xtensa/dequantize.cc
@@ -0,0 +1,118 @@
+/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/lite/kernels/internal/reference/dequantize.h"
+
+#include "tensorflow/lite/c/builtin_op_data.h"
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/kernels/internal/quantization_util.h"
+#include "tensorflow/lite/kernels/internal/reference/quantize.h"
+#include "tensorflow/lite/kernels/internal/reference/requantize.h"
+#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
+#include "tensorflow/lite/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/kernels/dequantize.h"
+#include "tensorflow/lite/micro/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/kernels/xtensa/xtensa.h"
+#include "tensorflow/lite/micro/micro_log.h"
+
+namespace tflite {
+
+void* DequantizeInit(TfLiteContext* context, const char* buffer,
+                     size_t length) {
+  TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
+  return context->AllocatePersistentBuffer(context, sizeof(DequantizeOpData));
+}
+
+TfLiteStatus DequantizeEval(TfLiteContext* context, TfLiteNode* node) {
+  TFLITE_DCHECK(node->user_data != nullptr);
+  DequantizeOpData* data = static_cast<DequantizeOpData*>(node->user_data);
+
+  const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0);
+  TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0);
+
+  // Output type ensured to be kTfLiteFloat32 at the Prepare stage
+  TFLITE_DCHECK(output->type == kTfLiteFloat32);
+
+  switch (input->type) {
+    case kTfLiteInt8: {
+#if HIFI_VFPU && (defined(HIFI5) || defined(HIFI4) || defined(HIFI3))
+      int err;
+      const int8_t* input_data_ptr;
+      float* output_data_ptr;
+      const int flat_size =
+          MatchingFlatSize(tflite::micro::GetTensorShape(input),
+                           tflite::micro::GetTensorShape(output));
+      input_data_ptr = tflite::micro::GetTensorData<int8_t>(input);
+      output_data_ptr = tflite::micro::GetTensorData<float>(output);
+
+      err = xa_nn_elm_dequantize_asym8s_f32(
+          output_data_ptr, input_data_ptr, data->quantization_params.zero_point,
+          data->quantization_params.scale, flat_size);
+      TF_LITE_ENSURE(context, (err == 0));
+#else   //  HIFI_VFPU && (defined(HIFI5) || defined(HIFI4) || defined(HIFI3))
+      reference_ops::Dequantize(data->quantization_params,
+                                tflite::micro::GetTensorShape(input),
+                                tflite::micro::GetTensorData<int8_t>(input),
+                                tflite::micro::GetTensorShape(output),
+                                tflite::micro::GetTensorData<float>(output));
+#endif  //  HIFI_VFPU && (defined(HIFI5) || defined(HIFI4) || defined(HIFI3))
+      break;
+    }
+    case kTfLiteInt16: {
+#if HIFI_VFPU && (defined(HIFI5) || defined(HIFI4) || defined(HIFI3))
+      int err;
+      const int16_t* input_data_ptr;
+      float* output_data_ptr;
+      const RuntimeShape& input_shape = tflite::micro::GetTensorShape(input);
+      const RuntimeShape& output_shape = tflite::micro::GetTensorShape(output);
+      const int flat_size = MatchingFlatSize(input_shape, output_shape);
+      input_data_ptr = tflite::micro::GetTensorData<int16_t>(input);
+      output_data_ptr = tflite::micro::GetTensorData<float>(output);
+      err = xa_nn_elm_dequantize_asym16s_f32(
+          output_data_ptr, input_data_ptr, data->quantization_params.zero_point,
+          data->quantization_params.scale, flat_size);
+      TF_LITE_ENSURE(context, (err == 0));
+#else   // HIFI_VFPU && (defined(HIFI5) || defined(HIFI4) || defined(HIFI3))
+      reference_ops::Dequantize(data->quantization_params,
+                                tflite::micro::GetTensorShape(input),
+                                tflite::micro::GetTensorData<int16_t>(input),
+                                tflite::micro::GetTensorShape(output),
+                                tflite::micro::GetTensorData<float>(output));
+#endif  // HIFI_VFPU && (defined(HIFI5) || defined(HIFI4) || defined(HIFI3))
+      break;
+    }
+    case kTfLiteUInt8:
+      reference_ops::Dequantize(data->quantization_params,
+                                tflite::micro::GetTensorShape(input),
+                                tflite::micro::GetTensorData<uint8_t>(input),
+                                tflite::micro::GetTensorShape(output),
+                                tflite::micro::GetTensorData<float>(output));
+      break;
+    default:
+      MicroPrintf("Input %s, output %s not supported.",
+                  TfLiteTypeGetName(input->type),
+                  TfLiteTypeGetName(output->type));
+      return kTfLiteError;
+  }
+
+  return kTfLiteOk;
+}
+
+TFLMRegistration Register_DEQUANTIZE() {
+  return tflite::micro::RegisterOp(DequantizeInit, DequantizePrepare,
+                                   DequantizeEval);
+}
+
+}  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/xtensa/fully_connected.cc b/tensorflow/lite/micro/kernels/xtensa/fully_connected.cc
index 1395fc3..df54580 100644
--- a/tensorflow/lite/micro/kernels/xtensa/fully_connected.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/fully_connected.cc
@@ -125,4 +125,8 @@
                                    XtensaPrepareFullyConnected, Eval);
 }
 
+TFLMInferenceRegistration RegisterInference_FULLY_CONNECTED() {
+  return tflite::micro::RegisterOp(Eval);
+}
+
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/xtensa/fully_connected_int8.cc b/tensorflow/lite/micro/kernels/xtensa/fully_connected_int8.cc
index b53afa4..f850c0c 100644
--- a/tensorflow/lite/micro/kernels/xtensa/fully_connected_int8.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/fully_connected_int8.cc
@@ -57,7 +57,7 @@
                              tflite::micro::GetTensorShape(bias), bias_data,
                              tflite::micro::GetTensorShape(output),
                              tflite::micro::GetTensorData<int8_t>(output));
-#elif defined(HIFI4) || defined(HIFI5)
+#elif defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
   const RuntimeShape& output_shape = tflite::micro::GetTensorShape(output);
   const int num_batches =
       FlatSizeSkipDim(output_shape, output_shape.DimensionsCount() - 1);
@@ -103,7 +103,7 @@
       tflite::micro::GetTensorShape(bias), bias_data,
       tflite::micro::GetTensorShape(output),
       tflite::micro::GetTensorData<int8_t>(output));
-#endif  // defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 
   return kTfLiteOk;
 }
diff --git a/tensorflow/lite/micro/kernels/xtensa/leaky_relu.cc b/tensorflow/lite/micro/kernels/xtensa/leaky_relu.cc
index 857a488..c1ed1d6 100644
--- a/tensorflow/lite/micro/kernels/xtensa/leaky_relu.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/leaky_relu.cc
@@ -76,7 +76,7 @@
       return kTfLiteOk;
     } break;
     case kTfLiteInt16: {
-#if defined(HIFI4)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
       const RuntimeShape& input_shape = tflite::micro::GetTensorShape(input);
       const RuntimeShape& output_shape = tflite::micro::GetTensorShape(output);
       const int flat_size = MatchingFlatSize(input_shape, output_shape);
@@ -89,7 +89,7 @@
       if (err != 0) return kTfLiteError;
 #else
       QuantizeLeakyRelu<int16_t>(data, input, output);
-#endif  // defined(HIFI4)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
       return kTfLiteOk;
     } break;
     default:
diff --git a/tensorflow/lite/micro/kernels/xtensa/logistic.cc b/tensorflow/lite/micro/kernels/xtensa/logistic.cc
index 41e6f3d..2ddf82e 100644
--- a/tensorflow/lite/micro/kernels/xtensa/logistic.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/logistic.cc
@@ -54,7 +54,7 @@
 
   switch (input->type) {
     case kTfLiteFloat32: {
-#if HIFI_VFPU && (defined(HIFI4) || defined(HIFI5))
+#if HIFI_VFPU && (defined(HIFI3) || defined(HIFI4) || defined(HIFI5))
       const RuntimeShape& input_shape = tflite::micro::GetTensorShape(input);
       const RuntimeShape& output_shape = tflite::micro::GetTensorShape(output);
       const int flat_size = MatchingFlatSize(input_shape, output_shape);
@@ -70,11 +70,11 @@
                               tflite::micro::GetTensorData<float>(input),
                               tflite::micro::GetTensorShape(output),
                               tflite::micro::GetTensorData<float>(output));
-#endif  // HIFI_VFPU && (defined(HIFI4) || defined(HIFI5))
+#endif  // HIFI_VFPU && (defined(HIFI3) || defined(HIFI4) || defined(HIFI5))
       break;
     }
     case kTfLiteInt8: {
-#if defined(HIFI4) || defined(HIFI5)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
       const RuntimeShape& input_shape = tflite::micro::GetTensorShape(input);
       const RuntimeShape& output_shape = tflite::micro::GetTensorShape(output);
       const int flat_size = MatchingFlatSize(input_shape, output_shape);
@@ -96,7 +96,7 @@
           data->input_multiplier, data->input_left_shift,
           NumElements(input->dims), tflite::micro::GetTensorData<int8_t>(input),
           tflite::micro::GetTensorData<int8_t>(output));
-#endif  // defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
       break;
     }
     case kTfLiteInt16: {
diff --git a/tensorflow/lite/micro/kernels/xtensa/lstm_eval.cc b/tensorflow/lite/micro/kernels/xtensa/lstm_eval.cc
index 9065388..94e76a1 100644
--- a/tensorflow/lite/micro/kernels/xtensa/lstm_eval.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/lstm_eval.cc
@@ -1,4 +1,4 @@
-/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -14,1204 +14,469 @@
 ==============================================================================*/
 #include "tensorflow/lite/micro/kernels/xtensa/lstm_eval.h"
 
-#include <math.h>
-#include <string.h>
+#include <limits>
 
-#include <algorithm>
-#include <cstdint>
-#include <memory>
-#include <vector>
-
-#include "tensorflow/lite/c/builtin_op_data.h"
-#include "tensorflow/lite/c/common.h"
-#include "tensorflow/lite/kernels/internal/compatibility.h"
-#include "tensorflow/lite/kernels/internal/portable_tensor_utils.h"
-#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
-#include "tensorflow/lite/kernels/op_macros.h"
+#include "tensorflow/lite/kernels/internal/reference/fully_connected.h"
+#include "tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h"
+#include "tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h"
+#include "tensorflow/lite/kernels/internal/reference/integer_ops/mul.h"
+#include "tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h"
+#include "tensorflow/lite/kernels/internal/reference/logistic.h"
+#include "tensorflow/lite/kernels/internal/reference/mul.h"
+#include "tensorflow/lite/kernels/internal/reference/tanh.h"
+#include "tensorflow/lite/kernels/internal/types.h"
 #include "tensorflow/lite/micro/kernels/xtensa/xtensa.h"
 
 namespace tflite {
-namespace ops {
-namespace micro {
-namespace lstm_eval {
-namespace {
 
-// Calculates a single LSTM gate, int8x8_16 version.
-// Implements the same functionality as CalculateLstmGateFloat.
-void CalculateLstmGateInteger8x8_16(
-    // Input and weights
-    const int8_t* input, const int8_t* input_to_gate_weights,
-    const int32_t* input_to_gate_bias, const int32_t input_to_gate_scale_a,
-    const int32_t input_to_gate_scale_b,
-    // Output state and weights
-    const int8_t* output_state, const int8_t* recurrent_to_gate_weights,
-    const int32_t* recurrent_to_gate_bias,
-    const int32_t recurrent_to_gate_scale_a,
-    const int32_t recurrent_to_gate_scale_b,
-    // Cell state and weights
-    const int16_t* cell_state, const int16_t* cell_to_gate_weights,
-    const int32_t cell_to_gate_scale_a, const int32_t cell_to_gate_scale_b,
-    // Layer normalization parameters (layer norm LSTM)
-    const int16_t* layer_norm_coefficients, const int32_t* layer_norm_bias,
-    const int32_t layer_norm_input_scale_a,
-    const int32_t layer_norm_input_scale_b,
-    const int32_t layer_norm_variance_guard,
-    // Array sizes
-    const int n_batch, const int n_input, const int n_output, const int n_cell,
-    const TfLiteFusedActivation activation,
-    // Output
-    int16_t* gate,
-    // Parameters for performance optimizations
-    // CpuBackendContext* context,
-    // Scratch arrays
-    int32_t* scratch5) {
-  const bool use_peephole = (cell_to_gate_weights != nullptr);
-  const bool use_layer_norm = (layer_norm_coefficients != nullptr);
-
-  // Initialize scratch buffers with zeros. Note that unlike float and hybrid
-  // versions, bias is only used in layer normalization.
-  std::fill_n(gate, n_batch * n_cell, 0);
-#if !defined(HIFI5)
-  // For each batch and cell: compute input_weight * input.
-  tensor_utils::PortableMatrixBatchVectorMultiplyAccumulate(
-      input, input_to_gate_bias, input_to_gate_weights, input_to_gate_scale_a,
-      input_to_gate_scale_b, n_batch, n_input, n_cell, 0, scratch5, gate, NULL);
-#else
-  {
-    xa_nn_matXvec_acc_batch_sym8sx8_asym16s(
-        gate, input_to_gate_weights, input, input_to_gate_bias, n_cell, n_input,
-        n_input, input_to_gate_scale_a, input_to_gate_scale_b, 0, n_batch);
+LstmTensors::LstmTensors(TfLiteContext* context, TfLiteNode* node) {
+  micro_context_ = GetMicroContext(context);
+  // 24 internal tensors. see lstm_shared.h for tensor names
+  for (size_t i = 0; i < 24; i++) {
+    internal_tensors_[i] = micro_context_->AllocateTempInputTensor(node, i);
   }
-#endif  // !defined(HIFI5)
-// Note: no aux_input.
-
-// For each batch and cell: compute recurrent_weight * output_state.
-#if !defined(HIFI5)
-  tensor_utils::PortableMatrixBatchVectorMultiplyAccumulate(
-      output_state, recurrent_to_gate_bias, recurrent_to_gate_weights,
-      recurrent_to_gate_scale_a, recurrent_to_gate_scale_b, n_batch, n_output,
-      n_cell, 0, scratch5, gate, NULL);
-#else
-  {
-    xa_nn_matXvec_acc_batch_sym8sx8_asym16s(
-        gate, recurrent_to_gate_weights, output_state, recurrent_to_gate_bias,
-        n_cell, n_output, n_output, recurrent_to_gate_scale_a,
-        recurrent_to_gate_scale_b, 0, n_batch);
-  }
-#endif  // !defined(HIFI5)
-  // For each batch and cell: compute cell_weight * cell_state (peephole LSTM)
-  if (use_peephole) {
-    tensor_utils::PortableVectorBatchVectorCwiseProductAccumulate(
-        cell_to_gate_weights, n_output, cell_state, n_batch,
-        cell_to_gate_scale_a, cell_to_gate_scale_b, gate);
-  }
-  // Do layer normalization (if layer norm LSTM)
-  if (use_layer_norm) {
-    tensor_utils::PortableApplyLayerNorm(
-        gate, layer_norm_coefficients, layer_norm_bias,
-        layer_norm_input_scale_a, layer_norm_input_scale_b,
-        layer_norm_variance_guard, n_batch, n_cell, gate);
-  }
-  // Apply activation
-  switch (activation) {
-    case kTfLiteActSigmoid:
-#if !defined(HIFI5)
-      tensor_utils::PortableApplySigmoid(gate, n_batch, n_cell, gate);
-#else
-      xa_nn_vec_sigmoid_16_16(gate, gate, n_batch * n_cell);
-#endif  // !defined(HIFI5)
-      break;
-    case kTfLiteActTanh:
-#if !defined(HIFI5)
-      tensor_utils::PortableApplyTanh(3, gate, n_batch, n_cell, gate);
-#else
-      xa_nn_vec_tanh_16_16(gate, gate, 3, n_batch * n_cell);
-#endif  // !defined(HIFI5)
-      break;
-    default:
-      // Only Sigmoid or Tanh is used.
-      TFLITE_ASSERT_FALSE;
-  }
+  output_tensor_ =
+      micro_context_->AllocateTempOutputTensor(node, kLstmOutputTensor);
 }
 
-// Updates the LSTM cell state, used by both integer LSTM versions.
-// Also see UpdateLstmCellFloat.
-//
-// Parameters:
-//  - n_batch, n_cell: sizes of vectors
-//  - cell_state: input/output vector, size n_batch*n_cell
-//  - cell_state_scale: scaling factor of cell state.
-//  - input_gate: input vector, size n_batch*n_cell.
-//  - forget_gate: input/scratch vector, size n_batch*n_cell, always modified.
-//  - cell_gate: input vector, size n_batch*n_cell.
-//  - use_cifg: use 1-forget_gate instead of input_gate.
-//  - clip: if > 0, clip the resulting cell state to [-clip, +clip].
-void UpdateLstmCellInteger(int n_batch, int n_cell, int16_t* cell_state,
-                           int32_t cell_state_scale, const int16_t* input_gate,
-                           int16_t* forget_gate, const int16_t* cell_gate,
-                           bool use_cifg, int16_t clip) {
-#if !defined(HIFI5)
-  // Use the forget_gate array as scratch, as input_gate array is not allocated
-  // in CIFG case. (Be careful not to write to the scratch before reading the
-  // forget gate data.)
-  int16_t* scratch = forget_gate;
-
-  tensor_utils::PortableCwiseMul(forget_gate, cell_state, n_batch, n_cell, 15,
-                                 cell_state);
-  if (use_cifg) {
-    tensor_utils::PortableSub1Vector(forget_gate, n_batch * n_cell, scratch);
-    tensor_utils::PortableCwiseMul(scratch, cell_gate, n_batch, n_cell,
-                                   30 + cell_state_scale, scratch);
-  } else {
-    tensor_utils::PortableCwiseMul(input_gate, cell_gate, n_batch, n_cell,
-                                   30 + cell_state_scale, scratch);
-  }
-  tensor_utils::PortableCwiseAdd(cell_state, scratch, n_batch, n_cell,
-                                 cell_state);
-
-  if (clip > 0) {
-    tensor_utils::PortableCwiseClipping(cell_state, n_batch * n_cell, clip);
-  }
-#else
-  if (use_cifg) {
-    calc_cell_state_with_cifg(cell_state, forget_gate, cell_gate, 15,
-                              30 + cell_state_scale, clip, n_batch * n_cell);
-  } else {
-    calc_cell_state_without_cifg(cell_state, forget_gate, cell_gate, input_gate,
-                                 15, 30 + cell_state_scale, clip,
-                                 n_batch * n_cell);
-  }
-
-#endif  // !defined(HIFI5)
-}
-
-// Calculates the output state tensor of an LSTM step. See Float and hybrid
-// versions as well.
-//
-// Parameters:
-//  - n_batch: batches: the number of distinct vectors in each array.
-//  - n_cell, n_output: sizes of vectors.
-//  - cell_state, output_gate: input vectors, size n_batch*n_cell.
-//  - cell_state_scale: scaling of cell_state.
-//  - hidden_scale_[a|b]: effective scale of cell_state.*output_gate
-//  - hidden_zp: zero_point for cell_state.*output_gate
-//  - projection_weights, proj_scale_[a|b], projection_bias:
-//      constant inputs, describing projection matrix and bias.
-//  - output_state_zp: zero point of output_state. (Input, calibrated value.)
-//  - quantized_proj_clip: if > 0, clip the output of the projection.
-//  - output_state: output vector, size n_batch*n_output. Must be contiguous.
-//  - context: data for optimized MatrixBatchVectorMultiplyAccumulate.
-//  - scratch0: scratch area of size n_batch*n_cell
-//  - scratch1: scratch area of size n_batch*n_cell
-//  - scratch2: scratch area used by MatrixBatchVectorMultiplyAccumulate
-void CalculateLstmOutputInteger8x8_16(
-    int n_batch, int n_cell, int n_output, const int16_t* cell_state,
-    int32_t cell_state_scale, const int16_t* output_gate,
-    int32_t hidden_scale_a, int32_t hidden_scale_b, int32_t hidden_zp,
-    const int8_t* projection_weights, int32_t proj_scale_a,
-    int32_t proj_scale_b, const int32_t* projection_bias,
-    int32_t output_state_zp, int8_t quantized_proj_clip, int8_t* output_state,
-    int16_t* scratch0, int8_t* scratch1, int32_t* scratch2) {
-// Note: unlike float/hybrid, the activation is always Tanh.
-#if !defined(HIFI5)
-  tensor_utils::PortableApplyTanh(15 + cell_state_scale, cell_state, n_batch,
-                                  n_cell, scratch0);
-#else
-  xa_nn_vec_tanh_16_16(scratch0, cell_state, (15 + cell_state_scale),
-                       n_batch * n_cell);
-#endif  // !defined(HIFI5)
-
-#if !defined(HIFI5)
-  tensor_utils::PortableCwiseMul(output_gate, scratch0, hidden_scale_a,
-                                 hidden_scale_b, n_batch, n_cell, hidden_zp,
-                                 scratch1);
-#else
-  xa_nn_elm_mul_16x16_asym8s(scratch1, output_gate, scratch0, hidden_scale_a,
-                             hidden_scale_b, hidden_zp, n_batch * n_cell);
-#endif  // !defined(HIFI5)
-
-  const bool use_projection = (projection_weights != nullptr);
-
-  if (use_projection) {
-    // Note: no bias like in float/hybrid
-    std::fill_n(output_state, n_batch * n_output, 0);
-    tensor_utils::PortableMatrixBatchVectorMultiplyAccumulate(
-        scratch1, projection_bias, projection_weights, proj_scale_a,
-        proj_scale_b, n_batch, n_cell, n_output, output_state_zp, scratch2,
-        output_state, NULL);
-    if (quantized_proj_clip > 0) {
-      tensor_utils::PortableCwiseClipping(output_state, n_batch * n_output,
-                                          quantized_proj_clip);
-    }
-  } else {
-    std::copy_n(scratch1, n_batch * n_output, output_state);
-  }
-}
-
-// Calculates a single LSTM gate, int8x8_8 version.
-// Implements the same functionality as CalculateLstmGateFloat.
-void CalculateLstmGateInteger8x8_8(
-    // Inputs and weights
-    const int8_t* input, int32_t input_zp, const int8_t* input_to_gate_weight,
-    const int32_t input_to_gate_scale_a, const int32_t input_to_gate_scale_b,
-    const int32_t input_times_weights_scale_a,
-    const int32_t input_times_weights_scale_b,
-    const int32_t input_times_weights_zp,
-    // Output state and weights
-    const int8_t* output_state, const int32_t output_state_zp,
-    const int8_t* recurrent_to_gate_weight,
-    const int32_t recurrent_to_gate_scale_a,
-    const int32_t recurrent_to_gate_scale_b,
-    const int32_t output_state_times_weights_scale_a,
-    const int32_t output_state_times_weights_scale_b,
-    const int32_t output_state_times_weights_zp,
-    // Layer normalization parameters (layer norm LSTM)
-    const int16_t* layer_norm_gate_weight,
-    const int32_t layer_norm_gate_scale_a,
-    const int32_t layer_norm_gate_scale_b, const int32_t* gate_bias,
-    // Array sizes
-    const int n_batch, const int n_input, const int n_output, const int n_cell,
-    const TfLiteFusedActivation activation,
-    // Output
-    int16_t* gate,
-    // Scratch arrays, both sized n_batch*n_cell
-    int8_t* scratch0, int8_t* scratch1) {
-  // Multiply input * input_weights => scratch0
-  tensor_utils::PortableMatrixBatchVectorMultiply(
-      input, input_zp, input_to_gate_weight, input_to_gate_scale_a,
-      input_to_gate_scale_b, n_batch, n_input, n_cell, scratch0,
-      input_times_weights_zp);
-  // Multiply output_state * recurrent_weights => scratch1
-  tensor_utils::PortableMatrixBatchVectorMultiply(
-      output_state, output_state_zp, recurrent_to_gate_weight,
-      recurrent_to_gate_scale_a, recurrent_to_gate_scale_b, n_batch, n_output,
-      n_cell, scratch1, output_state_times_weights_zp);
-  // Add scratch0 + scratch1 => gate
-  tensor_utils::PortableTwoGateSaturatingAdd(
-      scratch0, input_times_weights_zp, scratch1, output_state_times_weights_zp,
-      input_times_weights_scale_a, input_times_weights_scale_b,
-      output_state_times_weights_scale_a, output_state_times_weights_scale_b,
-      n_batch, n_cell, gate);
-  // Apply layer normalization.
-  tensor_utils::PortableApplyLayerNormFloat(
-      gate, layer_norm_gate_weight, layer_norm_gate_scale_a,
-      layer_norm_gate_scale_b, gate_bias, n_batch, n_cell, gate);
-  // Apply activation.
-  switch (activation) {
-    case kTfLiteActSigmoid:
-      tensor_utils::PortableApplySigmoidFloat(gate, n_batch, n_cell, gate);
-      break;
-    case kTfLiteActTanh:
-      tensor_utils::PortableApplyTanhFloat(gate, n_batch, n_cell, -12, gate);
-      break;
-    default:
-      // Only Sigmoid or Tanh is used.
-      TFLITE_ASSERT_FALSE;
-  }
-}
-
-// Calculates the output state tensor of an LSTM step. See Float and hybrid
-// versions as well.
-//
-// Parameters:
-//  - n_batch: batches: the number of distinct vectors in each array.
-//  - n_cell, n_output: sizes of vectors.
-//  - cell_state, output_gate: input vectors, size n_batch*n_cell.
-//  - projection_weights, proj_scale_[a|b], projection_bias:
-//      constant inputs, describing projection matrix and bias.
-//  - output_state_zp: zero point of the output state.
-//  - quantized_proj_clip: if > 0, clip the output of the projection.
-//  - output_state: output vector, size n_batch*n_output. Must be contiguous.
-//  - scratch: scratch area of size n_batch*n_cell
-void CalculateLstmOutputInteger8x8_8(
-    int n_batch, int n_cell, int n_output, const int16_t* cell_state,
-    const int16_t* output_gate, const int8_t* projection_weights,
-    int32_t proj_scale_a, int32_t proj_scale_b, const int32_t* projection_bias,
-    int32_t output_state_zp, int32_t quantized_proj_clip, int8_t* output_state,
-    int16_t* scratch) {
-  // Note: unlike float/hybrid, the activation is always Tanh.
-  tensor_utils::PortableApplyTanhFloat(cell_state, n_batch, n_cell, -15,
-                                       scratch);
-  tensor_utils::PortableCwiseMul(output_gate, scratch, n_batch, n_cell,
-                                 15 + 15 - 15, scratch);
-  // Note: no bias like in float/hybrid
-  tensor_utils::PortableMatrixBatchVectorMultiply(
-      scratch, projection_weights, proj_scale_a, proj_scale_b, projection_bias,
-      n_batch, n_cell, n_output, output_state_zp, output_state);
-  if (quantized_proj_clip > 0) {
-    tensor_utils::PortableCwiseClipping(output_state, n_batch * n_output,
-                                        (int8_t)quantized_proj_clip);
-  }
-}
-
-// Fully quantized lstm kernel for 16 bit gate matmul output.
-//
-// Input tensor of size n_batch * n_input:
-//   input_ptr
-//
-// LSTM weights:
-// Quantized input weights of size 'n_cell * n_input':
-//   input_to_input_weight_ptr            - optional
-//   input_to_forget_weight_ptr           - optional
-//   input_to_cell_weight_ptr             - optional
-//   input_to_output_weight_ptr           - optional
-//
-// Quantized recurrent weights of size 'n_cell * n_output':
-//   recurrent_to_input_weight_ptr        - optional
-//   recurrent_to_forget_weights_ptr
-//   recurrent_to_cell_weights_ptr
-//   recurrent_to_input_weights_ptr
-//
-// Quantized peephole weights of size 'n_cell', representing diagonal matrices.
-//   cell_to_input_weights               - optional
-//   cell_to_cell_weights                - optional
-//   cell_to_output_weights              - optional
-//
-// Quantized projection weights of size 'n_output * n_cell'
-//   projection_weight_ptr                     - optional
-//
-// Weight scales (scalars) for each of the weights above.
-//   effective_input_to_input_scale_a    - optional
-//   effective_input_to_input_scale_b    - optional
-//   effective_input_to_forget_scale_a
-//   effective_input_to_forget_scale_b
-//   effective_input_to_cell_scale_a
-//   effective_input_to_cell_scale_b
-//   effective_input_to_output_scale_a
-//   effective_input_to_output_scale_b
-//   effective_recurrent_to_input_scale_a    - optional
-//   effective_recurrent_to_input_scale_b    - optional
-//   effective_recurrent_to_forget_scale_a
-//   effective_recurrent_to_forget_scale_b
-//   effective_recurrent_to_cell_scale_a
-//   effective_recurrent_to_cell_scale_b
-//   effective_recurrent_to_output_scale_a
-//   effective_recurrent_to_output_scale_b
-//   effective_proj_scale_a                  - optional
-//   effective_proj_scale_b                  - optional
-//
-// Gate biases of size 'n_cell':
-//   input_gate_bias_ptr                 - optional
-//   forget_gate_bias_ptr
-//   cell_gate_bias_ptr
-//   output_gate_bias_ptr
-//
-// Layer norm coefficients of size 'n_cell', representing diagonal matrices.
-//   layer_norm_input_weight_ptr    - optional
-//   layer_norm_forget_weight_ptr   - optional
-//   layer_norm_cell_weight_ptr     - optional
-//   layer_norm_output_weight_ptr   - optional
-//
-// Layer norm scales of size 'n_cell'.
-//   layer_norm_input_scale_a     - optional
-//   layer_norm_input_scale_b     - optional
-//   layer_norm_forget_scale_a    - optional
-//   layer_norm_forget_scale_b    - optional
-//   layer_norm_cell_scale_a      - optional
-//   layer_norm_cell_scale_b      - optional
-//   layer_norm_output_scale_a    - optional
-//   layer_norm_output_scale_b    - optional
-//
-// Scalar values:
-//   quantized_cell_clip: quantized clip value for cell.
-//   quantized_proj_clip: quantized clip value for projection.
-//   cell_state_scale: the power of two scale for cell state.
-//
-// Zero points:
-//   output_state_zp: zero point of output state
-//   hidden_zp: zero point for hidden state.
-//
-// Temporary pre-allocated storage for the calculation. Each is of size n_cell *
-// n_batch.
-//   scratch0
-//   scratch1
-//   scratch2
-//   scratch3
-//   scratch4
-//   scratch5: this scratch buffer is created purely for optimizing the
-//              MatrixBatchVectorMultiplyAccumulate.
-//
-// Outputs:
-//   output_state_ptr - size 'n_batch * n_output'
-//   cell_state_ptr   - size 'n_batch * n_cell'
-//   output_ptr       - size 'n_batch * n_output'
-// TODO(b/159947023): scratch0 is not used if (!cifg). Don't allocate then.
-inline void LstmStepInteger8x8_16(
-    const int8_t* input_ptr, const int8_t* input_to_input_weight_ptr,
-    int32_t effective_input_to_input_scale_a,
-    int32_t effective_input_to_input_scale_b,
-    const int8_t* input_to_forget_weight_ptr,
-    int32_t effective_input_to_forget_scale_a,
-    int32_t effective_input_to_forget_scale_b,
-    const int8_t* input_to_cell_weight_ptr,
-    int32_t effective_input_to_cell_scale_a,
-    int32_t effective_input_to_cell_scale_b,
-    const int8_t* input_to_output_weight_ptr,
-    int32_t effective_input_to_output_scale_a,
-    int32_t effective_input_to_output_scale_b,
-    const int8_t* recurrent_to_input_weight_ptr,
-    int32_t effective_recurrent_to_input_scale_a,
-    int32_t effective_recurrent_to_input_scale_b,
-    const int8_t* recurrent_to_forget_weight_ptr,
-    int32_t effective_recurrent_to_forget_scale_a,
-    int32_t effective_recurrent_to_forget_scale_b,
-    const int8_t* recurrent_to_cell_weight_ptr,
-    int32_t effective_recurrent_to_cell_scale_a,
-    int32_t effective_recurrent_to_cell_scale_b,
-    const int8_t* recurrent_to_output_weight_ptr,
-    int32_t effective_recurrent_to_output_scale_a,
-    int32_t effective_recurrent_to_output_scale_b,
-    const int16_t* cell_to_input_weight_ptr,
-    int32_t effective_cell_to_input_scale_a,
-    int32_t effective_cell_to_input_scale_b,
-    const int16_t* cell_to_forget_weight_ptr,
-    int32_t effective_cell_to_forget_scale_a,
-    int32_t effective_cell_to_forget_scale_b,
-    const int16_t* cell_to_output_weight_ptr,
-    int32_t effective_cell_to_output_scale_a,
-    int32_t effective_cell_to_output_scale_b,
-    const int8_t* projection_weight_ptr, int32_t effective_proj_scale_a,
-    int32_t effective_proj_scale_b, int32_t hidden_zp,
-    int32_t effective_hidden_scale_a, int32_t effective_hidden_scale_b,
-    const int16_t* layer_norm_input_weight_ptr,
-    int32_t layer_norm_input_scale_a, int32_t layer_norm_input_scale_b,
-    const int16_t* layer_norm_forget_weight_ptr,
-    int32_t layer_norm_forget_scale_a, int32_t layer_norm_forget_scale_b,
-    const int16_t* layer_norm_cell_weight_ptr, int32_t layer_norm_cell_scale_a,
-    int32_t layer_norm_cell_scale_b,
-    const int16_t* layer_norm_output_weight_ptr,
-    int32_t layer_norm_output_scale_a, int32_t layer_norm_output_scale_b,
-    const int32_t* input_gate_bias_ptr, const int32_t* forget_gate_bias_ptr,
-    const int32_t* cell_gate_bias_ptr, const int32_t* output_gate_bias_ptr,
-    int16_t quantized_cell_clip, int8_t quantized_proj_clip,
-    int32_t cell_state_scale, int32_t input_variance_guard,
-    int32_t forget_variance_guard, int32_t cell_variance_guard,
-    int32_t output_variance_guard,
-    const int32_t* input_to_forget_effective_bias,
-    const int32_t* recurrent_to_forget_effective_bias,
-    const int32_t* input_to_cell_effective_bias,
-    const int32_t* recurrent_to_cell_effective_bias,
-    const int32_t* input_to_output_effective_bias,
-    const int32_t* recurrent_to_output_effective_bias,
-    const int32_t* input_to_input_effective_bias,
-    const int32_t* recurrent_to_input_effective_bias,
-    const int32_t* projection_effective_bias, int n_batch, int n_cell,
-    int n_input, int n_output, int8_t* output_state_ptr,
-    int32_t output_state_zp, int16_t* cell_state_ptr, int8_t* output_ptr,
-    int16_t* scratch0, int16_t* scratch1, int16_t* scratch2, int16_t* scratch3,
-    int8_t* scratch4, int32_t* scratch5) {
-  // ruy::profiler::ScopeLabel label("LstmStepInteger8x8_16");
-  // Make named scratch buffers for the different gates.
-  int16_t* input_gate_scratch = scratch0;
-  int16_t* forget_gate_scratch = scratch1;
-  int16_t* cell_gate_scratch = scratch2;
-  int16_t* output_gate_scratch = scratch3;
-
-  // Since we have already checked that weights are all there or none, we
-  // can check the existence of only one to the get the condition.
-  const bool use_cifg = (input_to_input_weight_ptr == nullptr);
-
-  // Check for nullptrs.
-  TFLITE_DCHECK(input_to_forget_effective_bias);
-  TFLITE_DCHECK(recurrent_to_forget_effective_bias);
-  TFLITE_DCHECK(input_to_cell_effective_bias);
-  TFLITE_DCHECK(recurrent_to_cell_effective_bias);
-  TFLITE_DCHECK(input_to_output_effective_bias);
-  TFLITE_DCHECK(recurrent_to_output_effective_bias);
-  if (!use_cifg) {
-    TFLITE_DCHECK(input_to_input_effective_bias);
-    TFLITE_DCHECK(recurrent_to_input_effective_bias);
-  }
-  const bool use_projection = (projection_weight_ptr != nullptr);
-  if (use_projection) {
-    TFLITE_DCHECK(projection_effective_bias);
-  }
-  if (!use_cifg) {
-    // Calculate the input gate. (If not CIFG.)
-    CalculateLstmGateInteger8x8_16(
-        input_ptr, input_to_input_weight_ptr, input_to_input_effective_bias,
-        effective_input_to_input_scale_a, effective_input_to_input_scale_b,
-        output_state_ptr, recurrent_to_input_weight_ptr,
-        recurrent_to_input_effective_bias, effective_recurrent_to_input_scale_a,
-        effective_recurrent_to_input_scale_b, cell_state_ptr,
-        cell_to_input_weight_ptr, effective_cell_to_input_scale_a,
-        effective_cell_to_input_scale_b, layer_norm_input_weight_ptr,
-        input_gate_bias_ptr, layer_norm_input_scale_a, layer_norm_input_scale_b,
-        input_variance_guard, n_batch, n_input, n_output, n_cell,
-        kTfLiteActSigmoid, input_gate_scratch, scratch5);
-  }
-  // Calculate the forget gate.
-  CalculateLstmGateInteger8x8_16(
-      input_ptr, input_to_forget_weight_ptr, input_to_forget_effective_bias,
-      effective_input_to_forget_scale_a, effective_input_to_forget_scale_b,
-      output_state_ptr, recurrent_to_forget_weight_ptr,
-      recurrent_to_forget_effective_bias, effective_recurrent_to_forget_scale_a,
-      effective_recurrent_to_forget_scale_b, cell_state_ptr,
-      cell_to_forget_weight_ptr, effective_cell_to_forget_scale_a,
-      effective_cell_to_forget_scale_b, layer_norm_forget_weight_ptr,
-      forget_gate_bias_ptr, layer_norm_forget_scale_a,
-      layer_norm_forget_scale_b, forget_variance_guard, n_batch, n_input,
-      n_output, n_cell, kTfLiteActSigmoid, forget_gate_scratch, scratch5);
-  // Calculate the cell update gate.
-  CalculateLstmGateInteger8x8_16(
-      input_ptr, input_to_cell_weight_ptr, input_to_cell_effective_bias,
-      effective_input_to_cell_scale_a, effective_input_to_cell_scale_b,
-      output_state_ptr, recurrent_to_cell_weight_ptr,
-      recurrent_to_cell_effective_bias, effective_recurrent_to_cell_scale_a,
-      effective_recurrent_to_cell_scale_b, cell_state_ptr,
-      /*cell_to_gate_weights=*/nullptr, /*cell_to_gate_scale_a=*/0,
-      /*cell_to_gate_scale_b=*/0, layer_norm_cell_weight_ptr,
-      cell_gate_bias_ptr, layer_norm_cell_scale_a, layer_norm_cell_scale_b,
-      cell_variance_guard, n_batch, n_input, n_output, n_cell, kTfLiteActTanh,
-      cell_gate_scratch, scratch5);
-  // Update the cell state.
-  UpdateLstmCellInteger(n_batch, n_cell, cell_state_ptr, cell_state_scale,
-                        input_gate_scratch, forget_gate_scratch,
-                        cell_gate_scratch, use_cifg, quantized_cell_clip);
-  // Calculate the output gate.
-  CalculateLstmGateInteger8x8_16(
-      input_ptr, input_to_output_weight_ptr, input_to_output_effective_bias,
-      effective_input_to_output_scale_a, effective_input_to_output_scale_b,
-      output_state_ptr, recurrent_to_output_weight_ptr,
-      recurrent_to_output_effective_bias, effective_recurrent_to_output_scale_a,
-      effective_recurrent_to_output_scale_b, cell_state_ptr,
-      cell_to_output_weight_ptr, effective_cell_to_output_scale_a,
-      effective_cell_to_output_scale_b, layer_norm_output_weight_ptr,
-      output_gate_bias_ptr, layer_norm_output_scale_a,
-      layer_norm_output_scale_b, output_variance_guard, n_batch, n_input,
-      n_output, n_cell, kTfLiteActSigmoid, output_gate_scratch, scratch5);
-  // Update the output state.
-  CalculateLstmOutputInteger8x8_16(
-      n_batch, n_cell, n_output, cell_state_ptr, cell_state_scale,
-      output_gate_scratch, effective_hidden_scale_a, effective_hidden_scale_b,
-      hidden_zp, projection_weight_ptr, effective_proj_scale_a,
-      effective_proj_scale_b, projection_effective_bias, output_state_zp,
-      quantized_proj_clip, output_state_ptr, scratch0, scratch4, scratch5);
-  // Copy output state to the output. Note that unlike float or hybrid, output
-  // is always contiguous.
-  std::copy_n(output_state_ptr, n_batch * n_output, output_ptr);
-}
-
-// Fully quantized lstm kernel for 8 bit gate matmul output.
-//
-// Input tensor of size n_batch * n_input:
-//   input_ptr
-//
-// LSTM weights:
-// Quantized input weights of size 'n_cell * n_input':
-//   input_to_input_weight_ptr            - optional
-//   input_to_forget_weight_ptr           - optional
-//   input_to_cell_weight_ptr             - optional
-//   input_to_output_weight_ptr           - optional
-//
-// Quantized recurrent weights of size 'n_cell * n_output':
-//   recurrent_to_input_weight_ptr        - optional
-//   recurrent_to_forget_weights_ptr
-//   recurrent_to_cell_weights_ptr
-//   recurrent_to_input_weights_ptr
-//
-// Quantized peephole weights of size 'n_cell', representing diagonal matrices.
-//   cell_to_input_weights               - optional
-//   cell_to_cell_weights                - optional
-//   cell_to_output_weights              - optional
-//
-// Quantized projection weights of size 'n_output * n_cell'
-//   projection_weight_ptr                     - optional
-//
-// Weight scales (scalars) for each of the weights above.
-//   effective_input_to_input_scale_a    - optional
-//   effective_input_to_input_scale_b    - optional
-//   effective_input_to_forget_scale_a
-//   effective_input_to_forget_scale_b
-//   effective_input_to_cell_scale_a
-//   effective_input_to_cell_scale_b
-//   effective_input_to_output_scale_a
-//   effective_input_to_output_scale_b
-//   effective_recurrent_to_input_scale_a    - optional
-//   effective_recurrent_to_input_scale_b    - optional
-//   effective_recurrent_to_forget_scale_a
-//   effective_recurrent_to_forget_scale_b
-//   effective_recurrent_to_cell_scale_a
-//   effective_recurrent_to_cell_scale_b
-//   effective_recurrent_to_output_scale_a
-//   effective_recurrent_to_output_scale_b
-//   effective_proj_scale_a                  - optional
-//   effective_proj_scale_b                  - optional
-//
-// Gate biases of size 'n_cell':
-//   input_gate_bias_ptr                 - optional
-//   forget_gate_bias_ptr
-//   cell_gate_bias_ptr
-//   output_gate_bias_ptr
-//
-// Layer norm coefficients of size 'n_cell', representing diagonal matrices.
-//   layer_norm_input_weight_ptr    - optional
-//   layer_norm_forget_weight_ptr   - optional
-//   layer_norm_cell_weight_ptr     - optional
-//   layer_norm_output_weight_ptr   - optional
-//
-// Layer norm scales of size 'n_cell'.
-//   layer_norm_input_scale_a     - optional
-//   layer_norm_input_scale_b     - optional
-//   layer_norm_forget_scale_a    - optional
-//   layer_norm_forget_scale_b    - optional
-//   layer_norm_cell_scale_a      - optional
-//   layer_norm_cell_scale_b      - optional
-//   layer_norm_output_scale_a    - optional
-//   layer_norm_output_scale_b    - optional
-//
-// Scalar values:
-//   quantized_cell_clip: quantized clip value for cell.
-//   quantized_proj_clip: quantized clip value for projection.
-//   cell_state_scale: the power of two scale for cell state.
-//
-// Zero points:
-//   output_state_zp: zero point of output state.
-//   hidden_zp: zero point for hidden state.
-//
-// Temporary pre-allocated storage for the calculation. Each is of size n_cell *
-// n_batch.
-//   scratch0
-//   scratch1
-//   scratch2
-//   scratch3
-//   scratch4
-//   scratch5
-//   scratch6
-//   scratch7
-//
-// Outputs:
-//   output_state_ptr - size 'n_batch * n_output'
-//   cell_state_ptr   - size 'n_batch * n_cell'
-//   output_ptr       - size 'n_batch * n_output'
-// TODO(b/148688698): Move zero point calculation into Prepare().
-// TODO(b/159947023): scratch5 is unused, remove.
-inline void LstmStepInteger8x8_8(
-    const int8_t* input_ptr, int32_t input_zp,
-    const int8_t* input_to_input_weight_ptr,
-    int32_t effective_input_to_input_scale_a,
-    int32_t effective_input_to_input_scale_b,
-    const int8_t* input_to_forget_weight_ptr,
-    int32_t effective_input_to_forget_scale_a,
-    int32_t effective_input_to_forget_scale_b,
-    const int8_t* input_to_cell_weight_ptr,
-    int32_t effective_input_to_cell_scale_a,
-    int32_t effective_input_to_cell_scale_b,
-    const int8_t* input_to_output_weight_ptr,
-    int32_t effective_input_to_output_scale_a,
-    int32_t effective_input_to_output_scale_b,
-    const int8_t* recurrent_to_input_weight_ptr,
-    int32_t effective_recurrent_to_input_scale_a,
-    int32_t effective_recurrent_to_input_scale_b,
-    const int8_t* recurrent_to_forget_weight_ptr,
-    int32_t effective_recurrent_to_forget_scale_a,
-    int32_t effective_recurrent_to_forget_scale_b,
-    const int8_t* recurrent_to_cell_weight_ptr,
-    int32_t effective_recurrent_to_cell_scale_a,
-    int32_t effective_recurrent_to_cell_scale_b,
-    const int8_t* recurrent_to_output_weight_ptr,
-    int32_t effective_recurrent_to_output_scale_a,
-    int32_t effective_recurrent_to_output_scale_b,
-    const int8_t* cell_to_input_weight_ptr,
-    int32_t effective_cell_to_input_scale_a,
-    int32_t effective_cell_to_input_scale_b,
-    const int8_t* cell_to_forget_weight_ptr,
-    int32_t effective_cell_to_forget_scale_a,
-    int32_t effective_cell_to_forget_scale_b,
-    const int8_t* cell_to_output_weight_ptr,
-    int32_t effective_cell_to_output_scale_a,
-    int32_t effective_cell_to_output_scale_b,
-    const int8_t* projection_weight_ptr, int32_t effective_proj_scale_a,
-    int32_t effective_proj_scale_b, const int16_t* layer_norm_input_weight_ptr,
-    int32_t layer_norm_input_scale_a, int32_t layer_norm_input_scale_b,
-    const int16_t* layer_norm_forget_weight_ptr,
-    int32_t layer_norm_forget_scale_a, int32_t layer_norm_forget_scale_b,
-    const int16_t* layer_norm_cell_weight_ptr, int32_t layer_norm_cell_scale_a,
-    int32_t layer_norm_cell_scale_b,
-    const int16_t* layer_norm_output_weight_ptr,
-    int32_t layer_norm_output_scale_a, int32_t layer_norm_output_scale_b,
-    const int32_t* input_gate_bias_ptr, const int32_t* forget_gate_bias_ptr,
-    const int32_t* cell_gate_bias_ptr, const int32_t* output_gate_bias_ptr,
-    const int32_t* projection_bias_ptr, const TfLiteLSTMParams* params,
-    const int32_t* intermediate_scale_a, const int32_t* intermediate_scale_b,
-    const int32_t* intermediate_zp, int16_t quantized_cell_clip,
-    int8_t quantized_proj_clip, int n_batch, int n_cell, int n_input,
-    int n_output, int output_batch_leading_dim, int8_t* output_state_ptr,
-    int32_t output_state_zp, int16_t* cell_state_ptr, int8_t* output_ptr,
-    int8_t* scratch0, int8_t* scratch1, int16_t* scratch2, int16_t* scratch3,
-    int16_t* scratch4, int16_t* scratch5, int16_t* scratch6,
-    int16_t* scratch7) {
-  // TODO(b/159066113): scratch5 is unused, remove.
-
-  // ruy::profiler::ScopeLabel label("LstmStepInteger8x8_8");
-  // Make named scratch buffers for the different gates.
-  int16_t* forget_gate_scratch = scratch2;
-  int16_t* cell_gate_scratch = scratch3;
-  int16_t* output_gate_scratch = scratch4;
-  // no-CIFG is not supported here
-
-  // Calculate the forget gate.
-  CalculateLstmGateInteger8x8_8(
-      input_ptr, input_zp, input_to_forget_weight_ptr,
-      effective_input_to_forget_scale_a, effective_input_to_forget_scale_b,
-      intermediate_scale_a[2], intermediate_scale_b[2], intermediate_zp[4],
-      output_state_ptr, output_state_zp, recurrent_to_forget_weight_ptr,
-      effective_recurrent_to_forget_scale_a,
-      effective_recurrent_to_forget_scale_b, intermediate_scale_a[3],
-      intermediate_scale_b[3], intermediate_zp[5], layer_norm_forget_weight_ptr,
-      layer_norm_forget_scale_a, layer_norm_forget_scale_b,
-      forget_gate_bias_ptr, n_batch, n_input, n_output, n_cell,
-      kTfLiteActSigmoid, forget_gate_scratch, scratch0, scratch1);
-  // Calculate the cell update gate.
-  CalculateLstmGateInteger8x8_8(
-      input_ptr, input_zp, input_to_cell_weight_ptr,
-      effective_input_to_cell_scale_a, effective_input_to_cell_scale_b,
-      intermediate_scale_a[4], intermediate_scale_b[4], intermediate_zp[7],
-      output_state_ptr, output_state_zp, recurrent_to_cell_weight_ptr,
-      effective_recurrent_to_cell_scale_a, effective_recurrent_to_cell_scale_b,
-      intermediate_scale_a[5], intermediate_scale_b[5], intermediate_zp[8],
-      layer_norm_cell_weight_ptr, layer_norm_cell_scale_a,
-      layer_norm_cell_scale_b, cell_gate_bias_ptr, n_batch, n_input, n_output,
-      n_cell, kTfLiteActTanh, cell_gate_scratch, scratch0, scratch1);
-  // Update the cell state.
-  UpdateLstmCellInteger(n_batch, n_cell, cell_state_ptr,
-                        /*cell_state_scale=*/-15, /*input_gate=*/nullptr,
-                        forget_gate_scratch, cell_gate_scratch,
-                        /*use_cifg=*/true, quantized_cell_clip);
-  // Calculate the output gate.
-  CalculateLstmGateInteger8x8_8(
-      input_ptr, input_zp, input_to_output_weight_ptr,
-      effective_input_to_output_scale_a, effective_input_to_output_scale_b,
-      intermediate_scale_a[6], intermediate_scale_b[6], intermediate_zp[10],
-      output_state_ptr, output_state_zp, recurrent_to_output_weight_ptr,
-      effective_recurrent_to_output_scale_a,
-      effective_recurrent_to_output_scale_b, intermediate_scale_a[11],
-      intermediate_scale_b[7], intermediate_zp[7], layer_norm_output_weight_ptr,
-      layer_norm_output_scale_a, layer_norm_output_scale_b,
-      output_gate_bias_ptr, n_batch, n_input, n_output, n_cell,
-      kTfLiteActSigmoid, output_gate_scratch, scratch0, scratch1);
-  // Update the output state.
-  CalculateLstmOutputInteger8x8_8(
-      n_batch, n_cell, n_output, cell_state_ptr, output_gate_scratch,
-      projection_weight_ptr, effective_proj_scale_a, effective_proj_scale_b,
-      projection_bias_ptr, output_state_zp, quantized_proj_clip,
-      output_state_ptr, scratch2);
-  // Copy output state to the output. Note that unlike float or hybrid, output
-  // is always contiguous.
-  std::copy_n(output_state_ptr, n_batch * n_output, output_ptr);
-}
-
-}  // namespace
-
-// LINT.ThenChange(//tensorflow/lite/tools/optimize/calibration/builtin_logging_ops/lstm.cc)
-TfLiteStatus EvalInteger8x8_16(
-    TfLiteContext* context, TfLiteNode* node, const TfLiteEvalTensor* input,
-    const TfLiteEvalTensor* input_to_input_weights,
-    const TfLiteEvalTensor* input_to_forget_weights,
-    const TfLiteEvalTensor* input_to_cell_weights,
-    const TfLiteEvalTensor* input_to_output_weights,
-    const TfLiteEvalTensor* recurrent_to_input_weights,
-    const TfLiteEvalTensor* recurrent_to_forget_weights,
-    const TfLiteEvalTensor* recurrent_to_cell_weights,
-    const TfLiteEvalTensor* recurrent_to_output_weights,
-    const TfLiteEvalTensor* cell_to_input_weights,
-    const TfLiteEvalTensor* cell_to_forget_weights,
-    const TfLiteEvalTensor* cell_to_output_weights,
-    const TfLiteEvalTensor* input_layer_norm_coefficients,
-    const TfLiteEvalTensor* forget_layer_norm_coefficients,
-    const TfLiteEvalTensor* cell_layer_norm_coefficients,
-    const TfLiteEvalTensor* output_layer_norm_coefficients,
-    const TfLiteEvalTensor* input_gate_bias,
-    const TfLiteEvalTensor* forget_gate_bias,
-    const TfLiteEvalTensor* cell_gate_bias,
-    const TfLiteEvalTensor* output_gate_bias,
-    const TfLiteEvalTensor* projection_weights,
-    const TfLiteEvalTensor* projection_bias, const TfLiteLSTMParams* params,
-    bool forward_sequence, bool time_major,
-    const lstm_eval::IntegerLstmParameter* integer_lstm_param,
-    TfLiteEvalTensor* output_state, TfLiteEvalTensor* cell_state,
-    TfLiteEvalTensor* output, TfLiteEvalTensor* scratch0,
-    TfLiteEvalTensor* scratch1, TfLiteEvalTensor* scratch2,
-    TfLiteEvalTensor* scratch3, TfLiteEvalTensor* scratch4,
-    TfLiteEvalTensor* scratch5) {
-  TFLITE_DCHECK(input->dims->size >= 2 && input->dims->size <= 3);
-  const int n_input = input->dims->data[input->dims->size - 1];
-  int max_time, n_batch;
-  if (input->dims->size == 2) {
-    max_time = 1;
-    n_batch = input->dims->data[0];
-  } else {
-    max_time = (time_major) ? input->dims->data[0] : input->dims->data[1];
-    n_batch = (time_major) ? input->dims->data[1] : input->dims->data[0];
-  }
-
-  // n_cell and n_output will be the same size when there is no projection.
-  const int n_cell = input_to_output_weights->dims->data[0];
-  const int n_output = recurrent_to_output_weights->dims->data[1];
-
-  // Activation zero point
-  //  TODO@is data.output_zero_point equal to output_state->params.zero_point
-  // int output_state_zp = output_state->params.zero_point;
-  int output_state_zp = 0;
-
-  // Get params for time/batch/sequence.
-  const int output_batch_leading_dim =
-      output->dims->data[output->dims->size - 1];
-
-  if (time_major) {
-    const int input_step = n_batch * n_input;
-    const int output_step = n_batch * output_batch_leading_dim;
-    for (int t = 0; t < max_time; t++) {
-      const int t_rel = t;
-      int8_t* output_ptr =
-          tflite::micro::GetTensorData<int8_t>(output) + t_rel * output_step;
-      const int8_t* input_ptr =
-          tflite::micro::GetTensorData<int8_t>(input) + t_rel * input_step;
-      LstmStepInteger8x8_16(
-          input_ptr,
-          tflite::micro::GetTensorData<int8_t>(input_to_input_weights),
-          integer_lstm_param->effective_input_to_input_scale_a,
-          integer_lstm_param->effective_input_to_input_scale_b,
-          tflite::micro::GetTensorData<int8_t>(input_to_forget_weights),
-          integer_lstm_param->effective_input_to_forget_scale_a,
-          integer_lstm_param->effective_input_to_forget_scale_b,
-          tflite::micro::GetTensorData<int8_t>(input_to_cell_weights),
-          integer_lstm_param->effective_input_to_cell_scale_a,
-          integer_lstm_param->effective_input_to_cell_scale_b,
-          tflite::micro::GetTensorData<int8_t>(input_to_output_weights),
-          integer_lstm_param->effective_input_to_output_scale_a,
-          integer_lstm_param->effective_input_to_output_scale_b,
-          tflite::micro::GetTensorData<int8_t>(recurrent_to_input_weights),
-          integer_lstm_param->effective_recurrent_to_input_scale_a,
-          integer_lstm_param->effective_recurrent_to_input_scale_b,
-          tflite::micro::GetTensorData<int8_t>(recurrent_to_forget_weights),
-          integer_lstm_param->effective_recurrent_to_forget_scale_a,
-          integer_lstm_param->effective_recurrent_to_forget_scale_b,
-          tflite::micro::GetTensorData<int8_t>(recurrent_to_cell_weights),
-          integer_lstm_param->effective_recurrent_to_cell_scale_a,
-          integer_lstm_param->effective_recurrent_to_cell_scale_b,
-          tflite::micro::GetTensorData<int8_t>(recurrent_to_output_weights),
-          integer_lstm_param->effective_recurrent_to_output_scale_a,
-          integer_lstm_param->effective_recurrent_to_output_scale_b,
-          tflite::micro::GetTensorData<int16_t>(cell_to_input_weights),
-          integer_lstm_param->effective_cell_to_input_scale_a,
-          integer_lstm_param->effective_cell_to_input_scale_b,
-          tflite::micro::GetTensorData<int16_t>(cell_to_forget_weights),
-          integer_lstm_param->effective_cell_to_forget_scale_a,
-          integer_lstm_param->effective_cell_to_forget_scale_b,
-          tflite::micro::GetTensorData<int16_t>(cell_to_output_weights),
-          integer_lstm_param->effective_cell_to_output_scale_a,
-          integer_lstm_param->effective_cell_to_output_scale_b,
-          tflite::micro::GetTensorData<int8_t>(projection_weights),
-          integer_lstm_param->effective_proj_scale_a,
-          integer_lstm_param->effective_proj_scale_b,
-          integer_lstm_param->hidden_zp,
-          integer_lstm_param->effective_hidden_scale_a,
-          integer_lstm_param->effective_hidden_scale_b,
-          tflite::micro::GetTensorData<int16_t>(input_layer_norm_coefficients),
-          integer_lstm_param->layer_norm_input_scale_a,
-          integer_lstm_param->layer_norm_input_scale_b,
-          tflite::micro::GetTensorData<int16_t>(forget_layer_norm_coefficients),
-          integer_lstm_param->layer_norm_forget_scale_a,
-          integer_lstm_param->layer_norm_forget_scale_b,
-          tflite::micro::GetTensorData<int16_t>(cell_layer_norm_coefficients),
-          integer_lstm_param->layer_norm_cell_scale_a,
-          integer_lstm_param->layer_norm_cell_scale_b,
-          tflite::micro::GetTensorData<int16_t>(output_layer_norm_coefficients),
-          integer_lstm_param->layer_norm_output_scale_a,
-          integer_lstm_param->layer_norm_output_scale_b,
-          tflite::micro::GetTensorData<int32_t>(input_gate_bias),
-          tflite::micro::GetTensorData<int32_t>(forget_gate_bias),
-          tflite::micro::GetTensorData<int32_t>(cell_gate_bias),
-          tflite::micro::GetTensorData<int32_t>(output_gate_bias),
-          integer_lstm_param->quantized_cell_clip,
-          integer_lstm_param->quantized_proj_clip,
-          integer_lstm_param->cell_scale,
-          integer_lstm_param->input_variance_guard,
-          integer_lstm_param->forget_variance_guard,
-          integer_lstm_param->cell_variance_guard,
-          integer_lstm_param->output_variance_guard,
-          integer_lstm_param->input_to_forget_effective_bias.get(),
-          integer_lstm_param->recurrent_to_forget_effective_bias.get(),
-          integer_lstm_param->input_to_cell_effective_bias.get(),
-          integer_lstm_param->recurrent_to_cell_effective_bias.get(),
-          integer_lstm_param->input_to_output_effective_bias.get(),
-          integer_lstm_param->recurrent_to_output_effective_bias.get(),
-          integer_lstm_param->input_to_input_effective_bias.get(),
-          integer_lstm_param->recurrent_to_input_effective_bias.get(),
-          integer_lstm_param->projection_effective_bias.get(), n_batch, n_cell,
-          n_input, n_output, tflite::micro::GetTensorData<int8_t>(output_state),
-          output_state_zp, tflite::micro::GetTensorData<int16_t>(cell_state),
-          output_ptr, (int16_t*)(scratch0), (int16_t*)(scratch1),
-          (int16_t*)(scratch2), (int16_t*)(scratch3), (int8_t*)(scratch4),
-          (int32_t*)(scratch5));
-    }
-  } else {
-    for (int b = 0; b < n_batch; b++) {
-      const int input_step = n_input;
-      const int output_step = output_batch_leading_dim;
-      for (int t = 0; t < max_time; t++) {
-        // If this is the forward_sequence, step forward, otherwise step
-        // backwards.
-        const int t_rel = forward_sequence ? t : max_time - t - 1;
-        const int time_offset = b * max_time + t_rel;
-        const int8_t* input_ptr = tflite::micro::GetTensorData<int8_t>(input) +
-                                  time_offset * input_step;
-        int8_t* output_ptr = tflite::micro::GetTensorData<int8_t>(output) +
-                             time_offset * output_step;
-
-        // Offset the {output,cell}_state pointers to the right batch.
-        int8_t* output_state_ptr =
-            tflite::micro::GetTensorData<int8_t>(output_state) +
-            b * output_batch_leading_dim;
-        int16_t* cell_state_ptr =
-            tflite::micro::GetTensorData<int16_t>(cell_state) + b * n_cell;
-
-        LstmStepInteger8x8_16(
-            input_ptr,
-            tflite::micro::GetTensorData<int8_t>(input_to_input_weights),
-            integer_lstm_param->effective_input_to_input_scale_a,
-            integer_lstm_param->effective_input_to_input_scale_b,
-            tflite::micro::GetTensorData<int8_t>(input_to_forget_weights),
-            integer_lstm_param->effective_input_to_forget_scale_a,
-            integer_lstm_param->effective_input_to_forget_scale_b,
-            tflite::micro::GetTensorData<int8_t>(input_to_cell_weights),
-            integer_lstm_param->effective_input_to_cell_scale_a,
-            integer_lstm_param->effective_input_to_cell_scale_b,
-            tflite::micro::GetTensorData<int8_t>(input_to_output_weights),
-            integer_lstm_param->effective_input_to_output_scale_a,
-            integer_lstm_param->effective_input_to_output_scale_b,
-            tflite::micro::GetTensorData<int8_t>(recurrent_to_input_weights),
-            integer_lstm_param->effective_recurrent_to_input_scale_a,
-            integer_lstm_param->effective_recurrent_to_input_scale_b,
-            tflite::micro::GetTensorData<int8_t>(recurrent_to_forget_weights),
-            integer_lstm_param->effective_recurrent_to_forget_scale_a,
-            integer_lstm_param->effective_recurrent_to_forget_scale_b,
-            tflite::micro::GetTensorData<int8_t>(recurrent_to_cell_weights),
-            integer_lstm_param->effective_recurrent_to_cell_scale_a,
-            integer_lstm_param->effective_recurrent_to_cell_scale_b,
-            tflite::micro::GetTensorData<int8_t>(recurrent_to_output_weights),
-            integer_lstm_param->effective_recurrent_to_output_scale_a,
-            integer_lstm_param->effective_recurrent_to_output_scale_b,
-            tflite::micro::GetTensorData<int16_t>(cell_to_input_weights),
-            integer_lstm_param->effective_cell_to_input_scale_a,
-            integer_lstm_param->effective_cell_to_input_scale_b,
-            tflite::micro::GetTensorData<int16_t>(cell_to_forget_weights),
-            integer_lstm_param->effective_cell_to_forget_scale_a,
-            integer_lstm_param->effective_cell_to_forget_scale_b,
-            tflite::micro::GetTensorData<int16_t>(cell_to_output_weights),
-            integer_lstm_param->effective_cell_to_output_scale_a,
-            integer_lstm_param->effective_cell_to_output_scale_b,
-            tflite::micro::GetTensorData<int8_t>(projection_weights),
-            integer_lstm_param->effective_proj_scale_a,
-            integer_lstm_param->effective_proj_scale_b,
-            integer_lstm_param->hidden_zp,
-            integer_lstm_param->effective_hidden_scale_a,
-            integer_lstm_param->effective_hidden_scale_b,
-            tflite::micro::GetTensorData<int16_t>(
-                input_layer_norm_coefficients),
-            integer_lstm_param->layer_norm_input_scale_a,
-            integer_lstm_param->layer_norm_input_scale_b,
-            tflite::micro::GetTensorData<int16_t>(
-                forget_layer_norm_coefficients),
-            integer_lstm_param->layer_norm_forget_scale_a,
-            integer_lstm_param->layer_norm_forget_scale_b,
-            tflite::micro::GetTensorData<int16_t>(cell_layer_norm_coefficients),
-            integer_lstm_param->layer_norm_cell_scale_a,
-            integer_lstm_param->layer_norm_cell_scale_b,
-            tflite::micro::GetTensorData<int16_t>(
-                output_layer_norm_coefficients),
-            integer_lstm_param->layer_norm_output_scale_a,
-            integer_lstm_param->layer_norm_output_scale_b,
-            tflite::micro::GetTensorData<int32_t>(input_gate_bias),
-            tflite::micro::GetTensorData<int32_t>(forget_gate_bias),
-            tflite::micro::GetTensorData<int32_t>(cell_gate_bias),
-            tflite::micro::GetTensorData<int32_t>(output_gate_bias),
-            integer_lstm_param->quantized_cell_clip,
-            integer_lstm_param->quantized_proj_clip,
-            integer_lstm_param->cell_scale,
-            integer_lstm_param->input_variance_guard,
-            integer_lstm_param->forget_variance_guard,
-            integer_lstm_param->cell_variance_guard,
-            integer_lstm_param->output_variance_guard,
-            integer_lstm_param->input_to_forget_effective_bias.get(),
-            integer_lstm_param->recurrent_to_forget_effective_bias.get(),
-            integer_lstm_param->input_to_cell_effective_bias.get(),
-            integer_lstm_param->recurrent_to_cell_effective_bias.get(),
-            integer_lstm_param->input_to_output_effective_bias.get(),
-            integer_lstm_param->recurrent_to_output_effective_bias.get(),
-            integer_lstm_param->input_to_input_effective_bias.get(),
-            integer_lstm_param->recurrent_to_input_effective_bias.get(),
-            integer_lstm_param->projection_effective_bias.get(), /*n_batch=*/1,
-            n_cell, n_input, n_output, output_state_ptr, output_state_zp,
-            cell_state_ptr, output_ptr, (int16_t*)(scratch0),
-            (int16_t*)(scratch1), (int16_t*)(scratch2), (int16_t*)(scratch3),
-            (int8_t*)(scratch4), (int32_t*)(scratch5));
-      }
+LstmTensors::~LstmTensors() {
+  for (size_t i = 0; i < 24; i++) {
+    if (internal_tensors_[i] != nullptr) {
+      micro_context_->DeallocateTempTfLiteTensor(internal_tensors_[i]);
     }
   }
+  micro_context_->DeallocateTempTfLiteTensor(output_tensor_);
+}
 
+// Verify the LSTM internal tensor properties (e.g., type checks)
+// Input/output/states/fc weights tensors are required for kernel evaulation.
+// The state tensors should be variables. Variants of the standard LSTM
+// are not supported here, therefore their corresponding tensors should be
+// invalid
+TfLiteStatus LstmTensors::ValidateTensorStatus(TfLiteContext* context) const {
+  // Verify certain tensor properties
+  // input tensor
+  TF_LITE_ENSURE(context, internal_tensors_[kLstmInputTensor] != nullptr);
+  // hidden state
+  TF_LITE_ENSURE(context, internal_tensors_[kLstmOutputStateTensor] != nullptr);
+  TF_LITE_ENSURE(context,
+                 internal_tensors_[kLstmOutputStateTensor]->is_variable);
+  // hidden state becomes input so they must have the same type
+  TF_LITE_ENSURE_EQ(context, internal_tensors_[kLstmOutputStateTensor]->type,
+                    internal_tensors_[kLstmInputTensor]->type);
+  // cell state
+  TF_LITE_ENSURE(context, internal_tensors_[kLstmCellStateTensor] != nullptr);
+  TF_LITE_ENSURE(context, internal_tensors_[kLstmCellStateTensor]->is_variable);
+  // output
+  TF_LITE_ENSURE(context, output_tensor_ != nullptr);
+  // output type is the same as the input type (activations)
+  TF_LITE_ENSURE_EQ(context, output_tensor_->type,
+                    internal_tensors_[kLstmInputTensor]->type);
+
+  // weight tensors (1-9, see lstm_shared for index definition)
+  const auto weight_type =
+      internal_tensors_[kLstmInputToForgetWeightsTensor]->type;
+  for (size_t i = 1; i < 9; i++) {
+    TF_LITE_ENSURE(context, internal_tensors_[i] != nullptr);
+    TF_LITE_ENSURE_EQ(context, internal_tensors_[i]->type, weight_type);
+  }
+
+  // bias tensors (12-15, see lstm_shared for index definition)
+  const auto bias_type = internal_tensors_[kLstmForgetGateBiasTensor]->type;
+  for (size_t i = 12; i < 16; i++) {
+    TF_LITE_ENSURE(context, internal_tensors_[i] != nullptr);
+    TF_LITE_ENSURE_EQ(context, internal_tensors_[i]->type, bias_type);
+  }
+  // Tensors from LSTM variants are invalid
+  // No peephole
+  for (size_t i = 9; i < 12; i++) {
+    TF_LITE_ENSURE(context, internal_tensors_[i] == nullptr);
+  }
+  // No projection
+  for (size_t i = 16; i < 18; i++) {
+    TF_LITE_ENSURE(context, internal_tensors_[i] == nullptr);
+  }
+  // No internal layer norm
+  for (size_t i = 20; i < 24; i++) {
+    TF_LITE_ENSURE(context, internal_tensors_[i] == nullptr);
+  }
   return kTfLiteOk;
 }
 
-TfLiteStatus EvalInteger8x8_8(
-    const TfLiteEvalTensor* input,
-    const TfLiteEvalTensor* input_to_input_weights,
-    const TfLiteEvalTensor* input_to_forget_weights,
-    const TfLiteEvalTensor* input_to_cell_weights,
-    const TfLiteEvalTensor* input_to_output_weights,
-    const TfLiteEvalTensor* recurrent_to_input_weights,
-    const TfLiteEvalTensor* recurrent_to_forget_weights,
-    const TfLiteEvalTensor* recurrent_to_cell_weights,
-    const TfLiteEvalTensor* recurrent_to_output_weights,
-    const TfLiteEvalTensor* cell_to_input_weights,
-    const TfLiteEvalTensor* cell_to_forget_weights,
-    const TfLiteEvalTensor* cell_to_output_weights,
-    const TfLiteEvalTensor* input_layer_norm_coefficients,
-    const TfLiteEvalTensor* forget_layer_norm_coefficients,
-    const TfLiteEvalTensor* cell_layer_norm_coefficients,
-    const TfLiteEvalTensor* output_layer_norm_coefficients,
-    const TfLiteEvalTensor* input_gate_bias,
-    const TfLiteEvalTensor* forget_gate_bias,
-    const TfLiteEvalTensor* cell_gate_bias,
-    const TfLiteEvalTensor* output_gate_bias,
-    const TfLiteEvalTensor* projection_weights,
-    const TfLiteEvalTensor* projection_bias, const TfLiteLSTMParams* params,
-    TfLiteEvalTensor* output_state, TfLiteEvalTensor* cell_state,
-    TfLiteEvalTensor* output,
-    const lstm_eval::IntegerLstmParameter* integer_lstm_param,
-    TfLiteEvalTensor* scratch0, TfLiteEvalTensor* scratch1,
-    TfLiteEvalTensor* scratch2, TfLiteEvalTensor* scratch3,
-    TfLiteEvalTensor* scratch4, TfLiteEvalTensor* scratch5,
-    TfLiteEvalTensor* scratch6, TfLiteEvalTensor* scratch7) {
-  TFLITE_DCHECK(input->dims->size >= 2 && input->dims->size <= 3);
-  const int n_input = input->dims->data[input->dims->size - 1];
-  int max_time, n_batch;
-  if (input->dims->size == 2) {
-    max_time = 1;
-    n_batch = input->dims->data[0];
-  } else {
-    max_time = input->dims->data[0];
-    n_batch = input->dims->data[1];
+namespace lstm_internal {
+
+#if !(defined(HIFI3) || defined(HIFI4) || defined(HIFI5))
+const int32_t kInt16Max = std::numeric_limits<int16_t>::max();
+const int32_t kInt16Min = std::numeric_limits<int16_t>::min();
+#endif
+
+void AddElementWise(const int16_t* input_1, const int16_t* input_2, int n_batch,
+                    int n_input, int16_t* output) {
+#if !(defined(HIFI3) || defined(HIFI4) || defined(HIFI5))
+  for (int batch = 0; batch < n_batch; ++batch) {
+    for (int i = 0; i < n_input; ++i) {
+      const int index = batch * n_input + i;
+      int32_t sum = input_1[index] + input_2[index];
+      const int32_t sum_clamped = std::min(kInt16Max, std::max(kInt16Min, sum));
+      output[index] = static_cast<int16_t>(sum_clamped);
+    }
   }
-
-  // n_cell and n_output will be the same size when there is no projection.
-  const int n_cell = input_to_output_weights->dims->data[0];
-  const int n_output = recurrent_to_output_weights->dims->data[1];
-  //@TODO input zero point and output zeropoint
-  // const int32_t input_zp = input->params.zero_point;
-  /// const int32_t output_state_zp = output_state->params.zero_point;
-  const int32_t input_zp = 0;
-  const int32_t output_state_zp = 0;
-
-  // Get params for time/batch/sequence.
-  const int output_batch_leading_dim =
-      output->dims->data[output->dims->size - 1];
-  const int input_step = n_batch * n_input;
-  const int output_step = n_batch * output_batch_leading_dim;
-
-  for (int t = 0; t < max_time; t++) {
-    const int t_rel = t;
-    int8_t* output_ptr =
-        tflite::micro::GetTensorData<int8_t>(output) + t_rel * output_step;
-    // Input can be int8 asymmetric or int16 symmetric.
-    const int8_t* input_ptr =
-        tflite::micro::GetTensorData<int8_t>(input) + t_rel * input_step;
-    lstm_eval::LstmStepInteger8x8_8(
-        input_ptr, input_zp,
-
-        tflite::micro::GetTensorData<int8_t>(input_to_input_weights),
-        integer_lstm_param->effective_input_to_input_scale_a,
-        integer_lstm_param->effective_input_to_input_scale_b,
-
-        tflite::micro::GetTensorData<int8_t>(input_to_forget_weights),
-        integer_lstm_param->effective_input_to_forget_scale_a,
-        integer_lstm_param->effective_input_to_forget_scale_b,
-
-        tflite::micro::GetTensorData<int8_t>(input_to_cell_weights),
-        integer_lstm_param->effective_input_to_cell_scale_a,
-        integer_lstm_param->effective_input_to_cell_scale_b,
-
-        tflite::micro::GetTensorData<int8_t>(input_to_output_weights),
-        integer_lstm_param->effective_input_to_output_scale_a,
-        integer_lstm_param->effective_input_to_output_scale_b,
-
-        tflite::micro::GetTensorData<int8_t>(recurrent_to_input_weights),
-        integer_lstm_param->effective_recurrent_to_input_scale_a,
-        integer_lstm_param->effective_recurrent_to_input_scale_b,
-
-        tflite::micro::GetTensorData<int8_t>(recurrent_to_forget_weights),
-        integer_lstm_param->effective_recurrent_to_forget_scale_a,
-        integer_lstm_param->effective_recurrent_to_forget_scale_b,
-
-        tflite::micro::GetTensorData<int8_t>(recurrent_to_cell_weights),
-        integer_lstm_param->effective_recurrent_to_cell_scale_a,
-        integer_lstm_param->effective_recurrent_to_cell_scale_b,
-
-        tflite::micro::GetTensorData<int8_t>(recurrent_to_output_weights),
-        integer_lstm_param->effective_recurrent_to_output_scale_a,
-        integer_lstm_param->effective_recurrent_to_output_scale_b,
-
-        tflite::micro::GetTensorData<int8_t>(cell_to_input_weights),
-        integer_lstm_param->effective_cell_to_input_scale_a,
-        integer_lstm_param->effective_cell_to_input_scale_b,
-
-        tflite::micro::GetTensorData<int8_t>(cell_to_forget_weights),
-        integer_lstm_param->effective_cell_to_forget_scale_a,
-        integer_lstm_param->effective_cell_to_forget_scale_b,
-
-        tflite::micro::GetTensorData<int8_t>(cell_to_output_weights),
-        integer_lstm_param->effective_cell_to_output_scale_a,
-        integer_lstm_param->effective_cell_to_output_scale_b,
-
-        tflite::micro::GetTensorData<int8_t>(projection_weights),
-        integer_lstm_param->effective_proj_scale_a,
-        integer_lstm_param->effective_proj_scale_b,
-
-        tflite::micro::GetTensorData<int16_t>(input_layer_norm_coefficients),
-        integer_lstm_param->layer_norm_input_scale_a,
-        integer_lstm_param->layer_norm_input_scale_b,
-
-        tflite::micro::GetTensorData<int16_t>(forget_layer_norm_coefficients),
-        integer_lstm_param->layer_norm_forget_scale_a,
-        integer_lstm_param->layer_norm_forget_scale_b,
-
-        tflite::micro::GetTensorData<int16_t>(cell_layer_norm_coefficients),
-        integer_lstm_param->layer_norm_cell_scale_a,
-        integer_lstm_param->layer_norm_cell_scale_b,
-
-        tflite::micro::GetTensorData<int16_t>(output_layer_norm_coefficients),
-        integer_lstm_param->layer_norm_output_scale_a,
-        integer_lstm_param->layer_norm_output_scale_b,
-
-        tflite::micro::GetTensorData<int32_t>(input_gate_bias),
-        tflite::micro::GetTensorData<int32_t>(forget_gate_bias),
-        tflite::micro::GetTensorData<int32_t>(cell_gate_bias),
-        tflite::micro::GetTensorData<int32_t>(output_gate_bias),
-        tflite::micro::GetTensorData<int32_t>(projection_bias),
-
-        params, integer_lstm_param->intermediate_scale_a,
-        integer_lstm_param->intermediate_scale_b,
-        integer_lstm_param->intermediate_zp,
-        integer_lstm_param->quantized_cell_clip,
-        integer_lstm_param->quantized_proj_clip, n_batch, n_cell, n_input,
-        n_output, output_batch_leading_dim,
-        tflite::micro::GetTensorData<int8_t>(output_state), output_state_zp,
-        tflite::micro::GetTensorData<int16_t>(cell_state), output_ptr,
-        tflite::micro::GetTensorData<int8_t>(scratch0),
-        tflite::micro::GetTensorData<int8_t>(scratch1),
-        tflite::micro::GetTensorData<int16_t>(scratch2),
-        tflite::micro::GetTensorData<int16_t>(scratch3),
-        tflite::micro::GetTensorData<int16_t>(scratch4),
-        tflite::micro::GetTensorData<int16_t>(scratch5),
-        tflite::micro::GetTensorData<int16_t>(scratch6),
-        tflite::micro::GetTensorData<int16_t>(scratch7));
-  }
-
-  return kTfLiteOk;
+#else
+  xa_nn_elm_add_16x16_16(output, input_1, input_2, n_batch * n_input);
+#endif
 }
 
-}  // namespace lstm_eval
-}  // namespace micro
-}  // namespace ops
+void AddElementWise(const float* input_1, const float* input_2, int n_batch,
+                    int n_input, float* output) {
+  for (int batch = 0; batch < n_batch; ++batch) {
+    for (int i = 0; i < n_input; ++i) {
+      const int index = batch * n_input + i;
+      output[index] = input_1[index] + input_2[index];
+    }
+  }
+}
+
+#if !(defined(HIFI3) || defined(HIFI4) || defined(HIFI5))
+void Sigmoid(const RuntimeShape& data_shape, int16_t* data) {
+  reference_integer_ops::Logistic(
+      0 /*data->input_multiplier*/, 0 /*data->input_left_shift */,
+      data_shape.FlatSize() /*NumElements(input->dims)*/,
+      data /* tflite::micro::GetTensorData<int16_t>(input) */,
+      data /*tflite::micro::GetTensorData<int16_t>(output) */);
+}
+
+void Sigmoid(const RuntimeShape& data_shape, float* data) {
+  reference_ops::Logistic(data_shape, data, data_shape, data);
+}
+
+void Tanh(int32_t cell_state_scale_power, const RuntimeShape& input_data_shape,
+          int16_t* input_data, const RuntimeShape& output_data_shape,
+          int16_t* output_data) {
+  int32_t tanh_input_left_shift = (15 + cell_state_scale_power) - 3;
+  int32_t input_multiplier = 0;
+  if (tanh_input_left_shift < 0) /* handling negative shift value */
+  {
+    tanh_input_left_shift = -tanh_input_left_shift;
+    input_multiplier = 3;
+  }
+  reference_integer_ops::Tanh(input_multiplier, tanh_input_left_shift,
+                              input_data_shape, input_data, output_data_shape,
+                              output_data);
+}
+
+void Tanh(int32_t cell_state_scale_power, const RuntimeShape& input_data_shape,
+          float* input_data, const RuntimeShape& output_data_shape,
+          float* output_data) {
+  reference_ops::Tanh(input_data_shape, input_data, output_data_shape,
+                      output_data);
+}
+
+// Input and output have the same shape in LSTM
+void Mul(const RuntimeShape& shape, const ArithmeticParams& params,
+         const int16_t* input1_data, const int16_t* input2_data,
+         int8_t* output_data) {
+  return reference_integer_ops::MulElementwise(
+      shape.FlatSize(), params, input1_data, input2_data, output_data);
+}
+
+// Input and output have the same shape in LSTM
+void Mul(const RuntimeShape& shape, const ArithmeticParams& params,
+         const int16_t* input1_data, const int16_t* input2_data,
+         int16_t* output_data) {
+  return reference_integer_ops::MulElementwise(
+      shape.FlatSize(), params, input1_data, input2_data, output_data);
+}
+
+// Input and output have the same shape in LSTM
+void Mul(const RuntimeShape& shape, const ArithmeticParams& params,
+         const float* input1_data, const float* input2_data,
+         float* output_data) {
+  return reference_ops::Mul(params, shape, input1_data, shape, input2_data,
+                            shape, output_data);
+}
+
+void FullyConnected(const FullyConnectedParams& params,
+                    const RuntimeShape& input_shape, const int8_t* input_data,
+                    const RuntimeShape& filter_shape, const int8_t* filter_data,
+                    const RuntimeShape& bias_shape, const int32_t* bias_data,
+                    const RuntimeShape& output_shape, int16_t* output_data) {
+  return tflite::reference_integer_ops::FullyConnected(
+      params, input_shape, input_data, filter_shape, filter_data, bias_shape,
+      bias_data, output_shape, output_data);
+}
+
+void FullyConnected(const FullyConnectedParams& params,
+                    const RuntimeShape& input_shape, const int16_t* input_data,
+                    const RuntimeShape& filter_shape, const int8_t* filter_data,
+                    const RuntimeShape& bias_shape, const int64_t* bias_data,
+                    const RuntimeShape& output_shape, int16_t* output_data) {
+  return tflite::reference_integer_ops::FullyConnected(
+      params, input_shape, input_data, filter_shape, filter_data, bias_shape,
+      bias_data, output_shape, output_data);
+}
+
+void FullyConnected(const FullyConnectedParams& params,
+                    const RuntimeShape& input_shape, const float* input_data,
+                    const RuntimeShape& filter_shape, const float* filter_data,
+                    const RuntimeShape& bias_shape, const float* bias_data,
+                    const RuntimeShape& output_shape, float* output_data) {
+  return tflite::reference_ops::FullyConnected(
+      params, input_shape, input_data, filter_shape, filter_data, bias_shape,
+      bias_data, output_shape, output_data);
+}
+#else  // #if !(defined(HIFI3) || defined(HIFI4) || defined(HIFI5))
+void Sigmoid(int16_t* data, int32_t data_size) {
+  xa_nn_vec_sigmoid_sym16s_sym16s(data, data, 0, 0, data_size);
+}
+
+void Sigmoid(float* data, int32_t data_size) {
+  int data_dims[2] = {1, data_size};
+  RuntimeShape data_shape(2, reinterpret_cast<const int32_t*>(data_dims));
+  reference_ops::Logistic(data_shape, data, data_shape, data);
+}
+
+void Tanh(int32_t cell_state_scale_power, int16_t* input_data,
+          int16_t* output_data, int32_t data_size) {
+  int32_t tanh_input_left_shift = (15 + cell_state_scale_power) - 3;
+  int32_t input_multiplier = 0;
+  if (tanh_input_left_shift < 0) /* handling negative shift value */
+  {
+    tanh_input_left_shift = -tanh_input_left_shift;
+#if (defined(USE_HIFI_ACT_TIE) && \
+     (defined(AE_TANH16X4X2) || defined(AE_TANH16X4)))
+    input_multiplier = 1;
+#else
+    input_multiplier = 3;
+#endif
+  }
+  xa_nn_vec_tanh_sym16s_sym16s(output_data, input_data, input_multiplier,
+                               tanh_input_left_shift, data_size);
+}
+
+void Tanh(int32_t cell_state_scale_power, float* input_data, float* output_data,
+          int32_t data_size) {
+  int data_dims[2] = {1, data_size};
+  RuntimeShape data_shape(2, reinterpret_cast<const int32_t*>(data_dims));
+  reference_ops::Tanh(data_shape, input_data, data_shape, output_data);
+}
+
+// Input and output have the same shape in LSTM
+void Mul(const ArithmeticParams& params, const int16_t* input1_data,
+         const int16_t* input2_data, int8_t* output_data, int32_t data_size) {
+  xa_nn_elm_mul_sym16sxsym16s_asym8s(
+      output_data, params.output_offset, params.output_shift,
+      params.output_multiplier, params.quantized_activation_min,
+      params.quantized_activation_max, input1_data, input2_data, data_size);
+}
+
+// Input and output have the same shape in LSTM
+void Mul(const ArithmeticParams& params, const int16_t* input1_data,
+         const int16_t* input2_data, int16_t* output_data, int32_t data_size) {
+  int dims_4D[4] = {1, 1, 1, data_size};
+  xa_nn_elm_mul_broadcast_4D_sym16sxsym16s_sym16s(
+      output_data, dims_4D, params.output_shift, params.output_multiplier,
+      params.quantized_activation_min, params.quantized_activation_max,
+      input1_data, dims_4D, input2_data, dims_4D);
+  return;
+}
+
+// Input and output have the same shape in LSTM
+void Mul(const ArithmeticParams& params, const float* input1_data,
+         const float* input2_data, float* output_data, int32_t data_size) {
+  int dims_2D[2] = {1, data_size};
+  RuntimeShape data_shape(2, reinterpret_cast<const int32_t*>(dims_2D));
+  return reference_ops::Mul(params, data_shape, input1_data, data_shape,
+                            input2_data, data_shape, output_data);
+}
+
+void FullyConnected(const FullyConnectedParams& params,
+                    const int8_t* input_data, const int8_t* filter_data,
+                    const int32_t* bias_data, int16_t* output_data,
+                    const int num_batches, const int output_depth,
+                    const int accum_depth) {
+#pragma loop_count min = 1
+  for (int b = 0; b < num_batches; b++) {
+    xa_nn_matXvec_out_stride_sym8sxasym8s_16(
+        output_data + b * output_depth, filter_data,
+        input_data + b * accum_depth, bias_data, output_depth, accum_depth,
+        accum_depth, 1, params.input_offset, params.output_multiplier,
+        params.output_shift);
+  }
+  return;
+}
+
+void FullyConnected(const FullyConnectedParams& params,
+                    const int16_t* input_data, const int8_t* filter_data,
+                    const int64_t* bias_data, int16_t* output_data,
+                    const int num_batches, const int output_depth,
+                    const int accum_depth) {
+  xa_nn_matmul_sym8sxsym16s_sym16s(
+      output_data, filter_data, input_data, bias_data, output_depth,
+      accum_depth, accum_depth, num_batches, accum_depth, output_depth, 1,
+      params.input_offset, params.output_multiplier, params.output_shift,
+      params.output_offset);
+  return;
+}
+
+void FullyConnected(const FullyConnectedParams& params, const float* input_data,
+                    const float* filter_data, const float* bias_data,
+                    float* output_data, const int num_batches,
+                    const int output_depth, const int accum_depth) {
+  int input_dims[2] = {num_batches, output_depth};
+  RuntimeShape input_shape(2, reinterpret_cast<const int32_t*>(input_dims));
+  RuntimeShape bias_shape(1, bias_data == NULL ? 0 : output_depth);
+  int filter_dims[2] = {output_depth, accum_depth};
+  RuntimeShape filter_shape(2, reinterpret_cast<const int32_t*>(filter_dims));
+  int output_dims[2] = {num_batches, output_depth};
+  RuntimeShape output_shape(2, reinterpret_cast<const int32_t*>(output_dims));
+  return tflite::reference_ops::FullyConnected(
+      params, input_shape, input_data, filter_shape, filter_data, bias_shape,
+      bias_data, output_shape, output_data);
+}
+#endif  // #if !(defined(HIFI3) || defined(HIFI4) || defined(HIFI5))
+
+void Clipping(const int v_size, const CellStateInfo& cell_state_info,
+              int16_t* vector) {
+  for (int i = 0; i < v_size; i++) {
+    vector[i] =
+        std::max(std::min(cell_state_info.quantized_cell_clip, vector[i]),
+                 static_cast<int16_t>(-cell_state_info.quantized_cell_clip));
+  }
+}
+
+void Clipping(const int v_size, const CellStateInfo& cell_state_info,
+              float* vector) {
+  for (int i = 0; i < v_size; i++) {
+    vector[i] = std::max(std::min(cell_state_info.cell_clip, vector[i]),
+                         -cell_state_info.cell_clip);
+  }
+}
+
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
+void UpdateLstmCell(const LstmStepManager& step_info,
+                    TfLiteEvalTensor* cell_state,
+                    // Gate outputs
+                    int16_t* forget_gate_output,
+                    const int16_t* input_gate_output,
+                    const int16_t* cell_gate_output,
+                    // Mul parameters
+                    const ArithmeticParams& forget_cell_mul_params,
+                    const ArithmeticParams& input_mul_params,
+                    const CellStateInfo& cell_state_info, int16_t* buffer) {
+  auto cell_state_shape = step_info.StateShape();
+  // Check offset validity to avoid memory overflow
+  TFLITE_DCHECK_LE(step_info.CellStateOffset() + cell_state_shape.FlatSize(),
+                   tflite::micro::GetTensorShape(cell_state).FlatSize());
+
+  // Multiplier is equivalent to 0.5 here so adding 1 to shifts
+  xa_nn_lstm_cell_state_update_16(
+      tflite::micro::GetTensorData<int16_t>(cell_state) +
+          step_info.CellStateOffset(),
+      forget_gate_output, cell_gate_output, input_gate_output,
+      forget_cell_mul_params.output_shift - 1,
+      input_mul_params.output_shift - 1, cell_state_info.quantized_cell_clip,
+      cell_state_shape.FlatSize());
+}
+
+void UpdateLstmCell(const LstmStepManager& step_info,
+                    TfLiteEvalTensor* cell_state,
+                    // Gate outputs
+                    float* forget_gate_output, const float* input_gate_output,
+                    const float* cell_gate_output,
+                    // Mul parameters
+                    const ArithmeticParams& forget_cell_mul_params,
+                    const ArithmeticParams& input_mul_params,
+                    const CellStateInfo& cell_state_info, float* buffer) {
+  // Check offset validity to avoid memory overflow
+  TFLITE_DCHECK_LE(
+      step_info.CellStateOffset() + step_info.StateShape().FlatSize(),
+      tflite::micro::GetTensorShape(cell_state).FlatSize());
+
+  auto cell_state_shape = step_info.StateShape();
+  // Forget Gate x Cell State
+  Mul(forget_cell_mul_params, forget_gate_output,
+      tflite::micro::GetTensorData<float>(cell_state) +
+          step_info.CellStateOffset(),
+      tflite::micro::GetTensorData<float>(cell_state) +
+          step_info.CellStateOffset(),
+      cell_state_shape.FlatSize());
+  // Input Gate x Cell Gate
+  Mul(input_mul_params, input_gate_output, cell_gate_output, buffer,
+      cell_state_shape.FlatSize());
+
+  // Update the cell state
+  AddElementWise(tflite::micro::GetTensorData<float>(cell_state) +
+                     step_info.CellStateOffset(),
+                 buffer,
+                 /*n_batch=*/cell_state_shape.DimsData()[0],
+                 /*n_state=*/cell_state_shape.DimsData()[1],
+                 tflite::micro::GetTensorData<float>(cell_state) +
+                     step_info.CellStateOffset());
+
+  if (cell_state_info.cell_clip > 0) {
+    Clipping(cell_state_shape.FlatSize(), cell_state_info,
+             tflite::micro::GetTensorData<float>(cell_state) +
+                 step_info.CellStateOffset());
+  }
+}
+#endif  // #if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
+
+// Increment the data offset so the sigle time step invocation call can access
+// the corresponding input/output tensor data at the time step
+void LstmStepManager::UpdateTime() {
+  current_time_ += 1;
+  TFLITE_DCHECK_LE(current_time_, size_info_.time_steps);
+  // default as one batch per inference
+  int input_step = size_info_.input_dimension;
+  int output_step = size_info_.state_dimension;
+  // time major: batch inference
+  if (size_info_.time_major) {
+    input_step = input_step * size_info_.batch_size;
+    output_step = output_step * size_info_.batch_size;
+  }
+
+  input_offset_ += input_step;
+  output_offset_ += output_step;
+}
+
+// Increment the data offset so the sigle time step invocation call can access
+// the corresponding hidden/cell state tensor data at the time step (for single
+// batch inference only)
+void LstmStepManager::UpdateBatch() {
+  current_batch_ += 1;
+  TFLITE_DCHECK_LE(current_batch_, size_info_.batch_size);
+  // batch inference for time major: no action needed
+  if (size_info_.time_major) {
+    return;
+  }
+  // otherwise: singe batch inference, go to the next batch
+  hidden_state_offset_ += size_info_.state_dimension;
+  cell_state_offset_ += size_info_.state_dimension;
+}
+
+// Input shape for each single time LSTM invocation.
+// Multi-batch for time_major input
+RuntimeShape LstmStepManager::InputShape() const {
+  int batch_size = 1;
+  if (size_info_.time_major) {
+    batch_size = size_info_.batch_size;
+  }
+  const int dims[2] = {batch_size, size_info_.input_dimension};
+  const int32_t* dims_data = reinterpret_cast<const int32_t*>(dims);
+  return RuntimeShape(2, dims_data);
+}
+
+// State shape (both hidden and cell) for each single time LSTM invocation.
+// Multi-batch for time_major input
+RuntimeShape LstmStepManager::StateShape() const {
+  int batch_size = 1;
+  if (size_info_.time_major) {
+    batch_size = size_info_.batch_size;
+  }
+  const int dims[2] = {batch_size, size_info_.state_dimension};
+  const int32_t* dims_data = reinterpret_cast<const int32_t*>(dims);
+  return RuntimeShape(2, dims_data);
+}
+
+}  // namespace lstm_internal
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h b/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h
index 5dd746a..0ba5e22 100644
--- a/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h
+++ b/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h
@@ -1,4 +1,4 @@
-/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -12,205 +12,813 @@
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
-#ifndef TENSORFLOW_LITE_KERNELS_LSTM_EVAL_H_
-#define TENSORFLOW_LITE_KERNELS_LSTM_EVAL_H_
 
+// Functions to perform integer evaulation for standard LSTM (e.g., defined in
+// the keras lstm layer, no peephole etc.). Currently used by the 16 bits
+// activation case only
+
+#ifndef TENSORFLOW_LITE_MICRO_KERNELS_LSTM_EVAL_GENERAL_H_
+#define TENSORFLOW_LITE_MICRO_KERNELS_LSTM_EVAL_GENERAL_H_
+#include <algorithm>
 #include <cstdint>
-#include <memory>
 
 #include "tensorflow/lite/c/builtin_op_data.h"
 #include "tensorflow/lite/c/common.h"
-#include "tensorflow/lite/kernels/internal/portable_tensor_utils.h"
-#include "tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h"
 #include "tensorflow/lite/micro/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/kernels/lstm_shared.h"
+#include "tensorflow/lite/micro/kernels/xtensa/xtensa.h"
+#include "tensorflow/lite/micro/micro_log.h"
 
 namespace tflite {
-namespace ops {
-namespace micro {
-namespace lstm_eval {
 
-#if defined(HIFI5)
-void calc_cell_state_without_cifg(int16_t* cell_state,
-                                  const int16_t* forget_gate,
-                                  const int16_t* cell_gate,
-                                  const int16_t* input_gate, int shift1,
-                                  int shift2, int clip, int num_elms);
+// Interface to access all the TempTfLiteTensors of the LSTM kernel during the
+// preparation phase. Can only be constructed through the constructor to avoid
+// memory leakage. All TempTfLiteTensors will be deallocated through the
+// destructor.
+class LstmTensors {
+ public:
+  LstmTensors(const LstmTensors& other) = delete;
+  LstmTensors& operator=(const LstmTensors& other) = delete;
 
-void calc_cell_state_with_cifg(int16_t* cell_state, const int16_t* forget_gate,
-                               const int16_t* cell_gate, int shift1, int shift2,
-                               int clip, int num_elms);
+  LstmTensors(TfLiteContext* context, TfLiteNode* node);
+  ~LstmTensors();
 
-void xa_nn_elm_mul_16x16_asym8s(int8_t* output, const int16_t* input_1,
-                                const int16_t* input_2, int32_t multiplier,
-                                int32_t shift, int32_t zero_point,
-                                int num_elms);
-#endif  // defined(HIFI5)
+  // Verify the LSTM internal tensor properties (e.g., type checks)
+  // Input/output/states/fc weights tensors are required for kernel evaulation.
+  // The state tensors should be variables. Variants of the standard LSTM
+  // are not supported here, therefore their corresponding tensors should be
+  // invalid
+  TfLiteStatus ValidateTensorStatus(TfLiteContext* context) const;
 
-// Pamameters for integer LSTM.
-// Consider split this into two Integer Parameters if more fields are added.
-struct IntegerLstmParameter {
-  int32_t effective_input_to_input_scale_a;
-  int effective_input_to_input_scale_b;
-  int32_t effective_recurrent_to_input_scale_a;
-  int effective_recurrent_to_input_scale_b;
-  int32_t effective_cell_to_input_scale_a;
-  int effective_cell_to_input_scale_b;
-  int32_t effective_input_to_forget_scale_a;
-  int effective_input_to_forget_scale_b;
-  int32_t effective_recurrent_to_forget_scale_a;
-  int effective_recurrent_to_forget_scale_b;
-  int32_t effective_cell_to_forget_scale_a;
-  int effective_cell_to_forget_scale_b;
-  int32_t effective_input_to_cell_scale_a;
-  int effective_input_to_cell_scale_b;
-  int32_t effective_recurrent_to_cell_scale_a;
-  int effective_recurrent_to_cell_scale_b;
-  int32_t effective_input_to_output_scale_a;
-  int effective_input_to_output_scale_b;
-  int32_t effective_recurrent_to_output_scale_a;
-  int effective_recurrent_to_output_scale_b;
-  int32_t effective_cell_to_output_scale_a;
-  int effective_cell_to_output_scale_b;
-  int32_t effective_proj_scale_a;
-  int effective_proj_scale_b;
-  int32_t effective_hidden_scale_a;
-  int effective_hidden_scale_b;
-  int32_t layer_norm_input_scale_a;
-  int layer_norm_input_scale_b;
-  int32_t layer_norm_forget_scale_a;
-  int layer_norm_forget_scale_b;
-  int32_t layer_norm_cell_scale_a;
-  int layer_norm_cell_scale_b;
-  int32_t layer_norm_output_scale_a;
-  int layer_norm_output_scale_b;
-  // Quantized clip value for cell and projection. Zero value means no clipping.
-  int16_t quantized_cell_clip;
-  int8_t quantized_proj_clip;
-  int32_t hidden_zp;
-  int32_t cell_scale;
+  // Internal tensors. see lstm_shared.h for tensor names
+  const TfLiteTensor* GetInternalTensor(const int tensor_index) const {
+    return internal_tensors_[tensor_index];
+  }
 
-  int32_t input_variance_guard;
-  int32_t forget_variance_guard;
-  int32_t cell_variance_guard;
-  int32_t output_variance_guard;
+  const TfLiteTensor* HiddenStateTensor() const {
+    return internal_tensors_[kLstmOutputStateTensor];
+  }
+  const TfLiteTensor* CellStateTensor() const {
+    return internal_tensors_[kLstmCellStateTensor];
+  }
+  const TfLiteTensor* OutputTensor() const { return output_tensor_; }
 
-  // Pre-calculate bias + zero_point * weight.
-  // Unabled to use temporary tensors since those are used in Prepare() and
-  // scratch buffer is only allocated after Preapre().
-  std::unique_ptr<int32_t[]> input_to_forget_effective_bias;
-  std::unique_ptr<int32_t[]> recurrent_to_forget_effective_bias;
-  std::unique_ptr<int32_t[]> input_to_cell_effective_bias;
-  std::unique_ptr<int32_t[]> recurrent_to_cell_effective_bias;
-  std::unique_ptr<int32_t[]> input_to_output_effective_bias;
-  std::unique_ptr<int32_t[]> recurrent_to_output_effective_bias;
-  std::unique_ptr<int32_t[]> input_to_input_effective_bias;
-  std::unique_ptr<int32_t[]> recurrent_to_input_effective_bias;
-  std::unique_ptr<int32_t[]> projection_effective_bias;
-
-  // Scale and zero point for intermediate tensors.
-  // Used only in the 8x8_8 case.
-  int32_t intermediate_scale_a[8];
-  int32_t intermediate_scale_b[8];
-  int32_t intermediate_zp[12];
+ private:
+  // see lstm_shared.h for tensor names
+  MicroContext* micro_context_;
+  TfLiteTensor* internal_tensors_[24];
+  TfLiteTensor* output_tensor_;
 };
 
-TfLiteStatus EvalFloat(const TfLiteEvalTensor* input,
-                       const TfLiteEvalTensor* input_to_input_weights,
-                       const TfLiteEvalTensor* input_to_forget_weights,
-                       const TfLiteEvalTensor* input_to_cell_weights,
-                       const TfLiteEvalTensor* input_to_output_weights,
-                       const TfLiteEvalTensor* recurrent_to_input_weights,
-                       const TfLiteEvalTensor* recurrent_to_forget_weights,
-                       const TfLiteEvalTensor* recurrent_to_cell_weights,
-                       const TfLiteEvalTensor* recurrent_to_output_weights,
-                       const TfLiteEvalTensor* cell_to_input_weights,
-                       const TfLiteEvalTensor* cell_to_forget_weights,
-                       const TfLiteEvalTensor* cell_to_output_weights,
-                       const TfLiteEvalTensor* input_layer_norm_coefficients,
-                       const TfLiteEvalTensor* forget_layer_norm_coefficients,
-                       const TfLiteEvalTensor* cell_layer_norm_coefficients,
-                       const TfLiteEvalTensor* output_layer_norm_coefficients,
-                       const TfLiteEvalTensor* aux_input,
-                       const TfLiteEvalTensor* aux_input_to_input_weights,
-                       const TfLiteEvalTensor* aux_input_to_forget_weights,
-                       const TfLiteEvalTensor* aux_input_to_cell_weights,
-                       const TfLiteEvalTensor* aux_input_to_output_weights,
-                       const TfLiteEvalTensor* input_gate_bias,
-                       const TfLiteEvalTensor* forget_gate_bias,
-                       const TfLiteEvalTensor* cell_gate_bias,
-                       const TfLiteEvalTensor* output_gate_bias,
-                       const TfLiteEvalTensor* projection_weights,
-                       const TfLiteEvalTensor* projection_bias,
-                       const TfLiteLSTMParams* params, bool forward_sequence,
-                       bool time_major, int output_offset,
-                       TfLiteEvalTensor* scratch_buffer,
-                       TfLiteEvalTensor* output_state,
-                       TfLiteEvalTensor* cell_state, TfLiteEvalTensor* output);
+// Deduce the size information (Batch (B), Time Steps (T), Input dimension (I),
+// State dimension (S)) that defines the LSTM using the input and hidden state
+// tensor
+LstmSizeInfo CreateLstmSizeInfo(
+    const bool time_major, const TfLiteIntArray* input_tensor_shape,
+    const TfLiteIntArray* hidden_state_tensor_shape);
 
-TfLiteStatus EvalInteger8x8_16(
-    TfLiteContext* context, TfLiteNode* node, const TfLiteEvalTensor* input,
-    const TfLiteEvalTensor* input_to_input_weights,
-    const TfLiteEvalTensor* input_to_forget_weights,
-    const TfLiteEvalTensor* input_to_cell_weights,
-    const TfLiteEvalTensor* input_to_output_weights,
-    const TfLiteEvalTensor* recurrent_to_input_weights,
-    const TfLiteEvalTensor* recurrent_to_forget_weights,
-    const TfLiteEvalTensor* recurrent_to_cell_weights,
-    const TfLiteEvalTensor* recurrent_to_output_weights,
-    const TfLiteEvalTensor* cell_to_input_weights,
-    const TfLiteEvalTensor* cell_to_forget_weights,
-    const TfLiteEvalTensor* cell_to_output_weights,
-    const TfLiteEvalTensor* input_layer_norm_coefficients,
-    const TfLiteEvalTensor* forget_layer_norm_coefficients,
-    const TfLiteEvalTensor* cell_layer_norm_coefficients,
-    const TfLiteEvalTensor* output_layer_norm_coefficients,
-    const TfLiteEvalTensor* input_gate_bias,
-    const TfLiteEvalTensor* forget_gate_bias,
-    const TfLiteEvalTensor* cell_gate_bias,
-    const TfLiteEvalTensor* output_gate_bias,
-    const TfLiteEvalTensor* projection_weights,
-    const TfLiteEvalTensor* projection_bias, const TfLiteLSTMParams* params,
-    bool forward_sequence, bool time_major,
-    const lstm_eval::IntegerLstmParameter* integer_lstm_param,
-    TfLiteEvalTensor* output_state, TfLiteEvalTensor* cell_state,
-    TfLiteEvalTensor* output, TfLiteEvalTensor* scratch0,
-    TfLiteEvalTensor* scratch1, TfLiteEvalTensor* scratch2,
-    TfLiteEvalTensor* scratch3, TfLiteEvalTensor* scratch4,
-    TfLiteEvalTensor* scratch5);
+TfLiteStatus ValidateWeightTensorSize(TfLiteContext* context,
+                                      const TfLiteTensor* tensor, int dim1_size,
+                                      int dim2_size);
 
-TfLiteStatus EvalInteger8x8_8(
-    const TfLiteEvalTensor* input,
-    const TfLiteEvalTensor* input_to_input_weights,
-    const TfLiteEvalTensor* input_to_forget_weights,
-    const TfLiteEvalTensor* input_to_cell_weights,
-    const TfLiteEvalTensor* input_to_output_weights,
-    const TfLiteEvalTensor* recurrent_to_input_weights,
-    const TfLiteEvalTensor* recurrent_to_forget_weights,
-    const TfLiteEvalTensor* recurrent_to_cell_weights,
-    const TfLiteEvalTensor* recurrent_to_output_weights,
-    const TfLiteEvalTensor* cell_to_input_weights,
-    const TfLiteEvalTensor* cell_to_forget_weights,
-    const TfLiteEvalTensor* cell_to_output_weights,
-    const TfLiteEvalTensor* input_layer_norm_coefficients,
-    const TfLiteEvalTensor* forget_layer_norm_coefficients,
-    const TfLiteEvalTensor* cell_layer_norm_coefficients,
-    const TfLiteEvalTensor* output_layer_norm_coefficients,
-    const TfLiteEvalTensor* input_gate_bias,
-    const TfLiteEvalTensor* forget_gate_bias,
-    const TfLiteEvalTensor* cell_gate_bias,
-    const TfLiteEvalTensor* output_gate_bias,
-    const TfLiteEvalTensor* projection_weights,
-    const TfLiteEvalTensor* projection_bias, const TfLiteLSTMParams* params,
-    TfLiteEvalTensor* output_state, TfLiteEvalTensor* cell_state,
-    TfLiteEvalTensor* output,
-    const lstm_eval::IntegerLstmParameter* integer_lstm_param,
-    TfLiteEvalTensor* scratch0, TfLiteEvalTensor* scratch1,
-    TfLiteEvalTensor* scratch2, TfLiteEvalTensor* scratch3,
-    TfLiteEvalTensor* scratch4, TfLiteEvalTensor* scratch5,
-    TfLiteEvalTensor* scratch6, TfLiteEvalTensor* scratch7);
+TfLiteStatus ValidateBiasTensorSize(TfLiteContext* context,
+                                    const TfLiteTensor* tensor, int size);
 
-}  // namespace lstm_eval
-}  // namespace micro
-}  // namespace ops
+// Go through every tensors and make sure their shape match the kernel
+// configuration
+TfLiteStatus ValidateTensorSize(TfLiteContext* context,
+                                const LstmTensors& tensors,
+                                const LstmSizeInfo& size_info);
+
+// Wrapper function to create gate parameters for the four internal LSTM gates
+TfLiteStatus CreateGateParams(
+    TfLiteContext* context,
+    /*Input tensors*/
+    const TfLiteTensor* input, const TfLiteTensor* input_weight,
+    const TfLiteTensor* input_bias,
+    /*Hidden state tensors*/
+    const TfLiteTensor* hidden_state, const TfLiteTensor* hidden_state_weight,
+    const TfLiteTensor* hidden_state_bias,
+    /*Scale of the fc output (input to non-linear activation)*/
+    const float nonlinear_activation_input_scale, const TfLiteType cell_type,
+    const tflite::GateParameters& gate_params);
+
+// Create parameters for element wise multiplication that happens in a) cell
+// state update ; b) hidden state update
+// Note that all the output of gates are symmetrically quantized so only scales
+// are required for input. However, during the hidden state update phase, the
+// output is the updated hidden state, which is asymmetrically quantized. Thus
+// output may require zero point
+tflite::ArithmeticParams CreateInterGateMulParams(const float input1_scale,
+                                                  const float input2_scale,
+                                                  const float output_scale,
+                                                  const TfLiteType output_type,
+                                                  const int output_zp = 0);
+
+// Create the additional information about the cell state, which include:
+// cell_state_scale_power: used in integer nonlinear function (e.g., tanh)
+// quantized_cell_clip: quantized cell clip range
+CellStateInfo CreateLstmCellStateInfo(const float cell_state_scale,
+                                      const float cell_clip);
+
+CellStateInfo CreateLstmCellStateInfoFloat(const float cell_clip);
+tflite::FullyConnectedParams CreateFCParamsFloat();
+
+tflite::GateParameters CreateGateParamsFloat();
+
+tflite::ArithmeticParams CreateInterGateMulParamsFloat();
+
+TfLiteStatus PrepareGateParametersFloat(TfLiteContext* context,
+                                        const LstmTensors& lstm_tensors,
+                                        OpDataLSTM* op_data_lstm);
+
+TfLiteStatus PrepareGateParametersInteger(TfLiteContext* context,
+                                          const LstmTensors& lstm_tensors,
+                                          OpDataLSTM* op_data_lstm);
+
+LSTMKernelContents CreateLSTMKernelContent(TfLiteContext* context,
+                                           TfLiteNode* node);
+
+template <typename CellType>
+LSTMBuffers<CellType> CreateLSTMBuffers(TfLiteContext* context,
+                                        const int* buffer_indices) {
+  LSTMBuffers<CellType> buffers;
+  buffers.buffer0 = reinterpret_cast<CellType*>(
+      context->GetScratchBuffer(context, buffer_indices[0]));
+  buffers.buffer1 = reinterpret_cast<CellType*>(
+      context->GetScratchBuffer(context, buffer_indices[1]));
+  buffers.buffer2 = reinterpret_cast<CellType*>(
+      context->GetScratchBuffer(context, buffer_indices[2]));
+  buffers.buffer3 = reinterpret_cast<CellType*>(
+      context->GetScratchBuffer(context, buffer_indices[3]));
+  return buffers;
+}
+
+// Since LSTM includes multiple intermediate stages, introducing the internal
+// namespace to expose them for testing
+namespace lstm_internal {
+
+#if !(defined(HIFI3) || defined(HIFI4) || defined(HIFI5))
+void Sigmoid(const RuntimeShape& data_shape, int16_t* data);
+
+void Sigmoid(const RuntimeShape& data_shape, float* data);
+
+void Tanh(int32_t cell_state_scale_power, const RuntimeShape& input_data_shape,
+          int16_t* input_data, const RuntimeShape& output_data_shape,
+          int16_t* output_data);
+
+void Tanh(int32_t cell_state_scale_power, const RuntimeShape& input_data_shape,
+          float* input_data, const RuntimeShape& output_data_shape,
+          float* output_data);
+
+void Mul(const RuntimeShape& shape, const ArithmeticParams& params,
+         const int16_t* input1_data, const int16_t* input2_data,
+         int8_t* output_data);
+
+void Mul(const RuntimeShape& shape, const ArithmeticParams& params,
+         const int16_t* input1_data, const int16_t* input2_data,
+         int16_t* output_data);
+
+void Mul(const RuntimeShape& shape, const ArithmeticParams& params,
+         const float* input1_data, const float* input2_data,
+         float* output_data);
+
+void FullyConnected(const FullyConnectedParams& params,
+                    const RuntimeShape& input_shape, const int8_t* input_data,
+                    const RuntimeShape& filter_shape, const int8_t* filter_data,
+                    const RuntimeShape& bias_shape, const int32_t* bias_data,
+                    const RuntimeShape& output_shape, int16_t* output_data);
+
+void FullyConnected(const FullyConnectedParams& params,
+                    const RuntimeShape& input_shape, const int16_t* input_data,
+                    const RuntimeShape& filter_shape, const int8_t* filter_data,
+                    const RuntimeShape& bias_shape, const int64_t* bias_data,
+                    const RuntimeShape& output_shape, int16_t* output_data);
+
+void FullyConnected(const FullyConnectedParams& params,
+                    const RuntimeShape& input_shape, const float* input_data,
+                    const RuntimeShape& filter_shape, const float* filter_data,
+                    const RuntimeShape& bias_shape, const float* bias_data,
+                    const RuntimeShape& output_shape, float* output_data);
+#else   // #if !(defined(HIFI3) || defined(HIFI4) || defined(HIFI5))
+void Sigmoid(int16_t* data, int32_t data_size);
+
+void Sigmoid(float* data, int32_t data_size);
+
+void Tanh(int32_t cell_state_scale_power, int16_t* input_data,
+          int16_t* output_data, int32_t data_size);
+
+void Tanh(int32_t cell_state_scale_power, float* input_data, float* output_data,
+          int32_t data_size);
+
+void Mul(const ArithmeticParams& params, const int16_t* input1_data,
+         const int16_t* input2_data, int8_t* output_data, int32_t data_size);
+
+void Mul(const ArithmeticParams& params, const int16_t* input1_data,
+         const int16_t* input2_data, int16_t* output_data, int32_t data_size);
+
+void Mul(const ArithmeticParams& params, const float* input1_data,
+         const float* input2_data, float* output_data, int32_t data_size);
+
+void FullyConnected(const FullyConnectedParams& params,
+                    const int8_t* input_data, const int8_t* filter_data,
+                    const int32_t* bias_data, int16_t* output_data,
+                    const int num_batches, const int output_depth,
+                    const int accum_depth);
+
+void FullyConnected(const FullyConnectedParams& params,
+                    const int16_t* input_data, const int8_t* filter_data,
+                    const int64_t* bias_data, int16_t* output_data,
+                    const int num_batches, const int output_depth,
+                    const int accum_depth);
+
+void FullyConnected(const FullyConnectedParams& params, const float* input_data,
+                    const float* filter_data, const float* bias_data,
+                    float* output_data, const int num_batches,
+                    const int output_depth, const int accum_depth);
+#endif  // #if !(defined(HIFI3) || defined(HIFI4) || defined(HIFI5))
+
+void AddElementWise(const int16_t* input_1, const int16_t* input_2, int n_batch,
+                    int n_input, int16_t* output);
+
+void AddElementWise(const float* input_1, const float* input_2, int n_batch,
+                    int n_input, float* output);
+
+void Clipping(const int v_size, const CellStateInfo& cell_state_info,
+              int16_t* vector);
+
+void Clipping(const int v_size, const CellStateInfo& cell_state_info,
+              float* vector);
+
+// Manages the slice position (offset), slice length (sliced tensor shape),
+// and update rules for input/output/hidden state/cell state tensors at each
+// time step.
+class LstmStepManager {
+ public:
+  LstmStepManager() = delete;
+  // Does not take any ownership, and all pointers must refer to valid objects
+  // that outlive the one constructed.
+  explicit LstmStepManager(const LstmSizeInfo* size_info)
+      : size_info_(*size_info) {}
+
+  void UpdateTime();
+  void UpdateBatch();
+
+  void ResetTime() { current_time_ = 0; }
+  RuntimeShape InputShape() const;
+  RuntimeShape StateShape() const;
+
+  int InputOffset() const { return input_offset_; }
+  int OutputOffset() const { return output_offset_; }
+  int HiddenStateOffset() const { return hidden_state_offset_; }
+  int CellStateOffset() const { return cell_state_offset_; }
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
+  int time_major() const { return size_info_.time_major; }
+
+  int batch_size() const { return size_info_.batch_size; }
+
+  int input_dimension() const { return size_info_.input_dimension; }
+
+  int state_dimension() const { return size_info_.state_dimension; }
+#endif
+
+ private:
+  int current_time_ = 0;
+  int current_batch_ = 0;
+  int input_offset_ = 0;
+  int output_offset_ = 0;
+  int hidden_state_offset_ = 0;
+  int cell_state_offset_ = 0;
+  // Sizeinfo is from LstmOpData, which reside in the memory arena
+  // (guarante to outlast LSTMStepManager, which reside in stack)
+  const LstmSizeInfo& size_info_;
+};
+
+// Calculates a single LSTM gate.
+// Implements the following formula:
+//   gate = activate(FC(input) + FC(recurrent))
+// Activation is sigmoid except for the "cell" gate (configurable, usually tanh)
+#if !(defined(HIFI3) || defined(HIFI4) || defined(HIFI5))
+template <typename ActivationType, typename WeightType, typename CellType,
+          typename BiasType>
+void CalculateLstmGate(
+    const LstmStepManager& step_info, const GateParameters& gate_params,
+    // Input FC
+    const TfLiteEvalTensor* input, const TfLiteEvalTensor* input_weight,
+    const TfLiteEvalTensor* input_bias,
+    // Recurrent FC
+    const TfLiteEvalTensor* recurrent, const TfLiteEvalTensor* recurrent_weight,
+    const TfLiteEvalTensor* recurrent_bias,
+    // Output
+    CellType* gate_output,
+    // Scratch arrays
+    CellType* fc_output_buffer, const TfLiteFusedActivation activation) {
+  const auto gate_output_shape = step_info.StateShape();
+  // Check offset validity to avoid memory overflow
+  TFLITE_DCHECK_LE(step_info.InputOffset() + step_info.InputShape().FlatSize(),
+                   tflite::micro::GetTensorShape(input).FlatSize());
+  TFLITE_DCHECK_LE(
+      step_info.HiddenStateOffset() + step_info.StateShape().FlatSize(),
+      tflite::micro::GetTensorShape(recurrent).FlatSize());
+
+  // Input FC
+  FullyConnected(gate_params.input_fc_params, step_info.InputShape(),
+                 tflite::micro::GetTensorData<ActivationType>(input) +
+                     step_info.InputOffset(),
+                 micro::GetTensorShape(input_weight),
+                 tflite::micro::GetTensorData<WeightType>(input_weight),
+                 tflite::micro::GetTensorShape(input_bias),
+                 tflite::micro::GetOptionalTensorData<BiasType>(input_bias),
+                 gate_output_shape, gate_output);
+
+  // Recurrent FC
+  FullyConnected(gate_params.recurrent_fc_params, step_info.StateShape(),
+                 tflite::micro::GetTensorData<ActivationType>(recurrent) +
+                     step_info.HiddenStateOffset(),
+                 tflite::micro::GetTensorShape(recurrent_weight),
+                 tflite::micro::GetTensorData<WeightType>(recurrent_weight),
+                 tflite::micro::GetTensorShape(recurrent_bias),
+                 tflite::micro::GetOptionalTensorData<BiasType>(recurrent_bias),
+                 gate_output_shape, fc_output_buffer);
+
+  AddElementWise(gate_output, fc_output_buffer,
+                 /*n_batch=*/gate_output_shape.DimsData()[0],
+                 /*n_state=*/gate_output_shape.DimsData()[1], gate_output);
+  // Apply activation
+  switch (activation) {
+    case kTfLiteActSigmoid:
+      Sigmoid(gate_output_shape, gate_output);
+      break;
+    case kTfLiteActTanh: {
+      // Set the scale power to -12 to avoid shift
+      Tanh(/*cell_state_scale_power=*/-12, gate_output_shape, gate_output,
+           gate_output_shape, gate_output);
+    } break;
+    default:
+      // Only Sigmoid or Tanh is used.
+      TFLITE_ASSERT_FALSE;
+  }
+}
+
+// Update the cell state using the output from the forget gate, input gate, and
+// cell gate Formula: updated_cell_state = forget_gate_output*cell_state +
+// input_gate_output * cell_gate_output, where * denotes element wise
+// multiplication
+template <typename CellType>
+void UpdateLstmCell(const LstmStepManager& step_info,
+                    TfLiteEvalTensor* cell_state,
+                    // Gate outputs
+                    CellType* forget_gate_output,
+                    const CellType* input_gate_output,
+                    const CellType* cell_gate_output,
+                    // Mul parameters
+                    const ArithmeticParams& forget_cell_mul_params,
+                    const ArithmeticParams& input_mul_params,
+                    const CellStateInfo& cell_state_info, CellType* buffer) {
+  // Check offset validity to avoid memory overflow
+  TFLITE_DCHECK_LE(
+      step_info.CellStateOffset() + step_info.StateShape().FlatSize(),
+      tflite::micro::GetTensorShape(cell_state).FlatSize());
+
+  auto cell_state_shape = step_info.StateShape();
+  // Forget Gate x Cell State
+  Mul(cell_state_shape, forget_cell_mul_params, forget_gate_output,
+      tflite::micro::GetTensorData<CellType>(cell_state) +
+          step_info.CellStateOffset(),
+      tflite::micro::GetTensorData<CellType>(cell_state) +
+          step_info.CellStateOffset());
+  // Input Gate x Cell Gate
+  Mul(cell_state_shape, input_mul_params, input_gate_output, cell_gate_output,
+      buffer);
+
+  // Update the cell state
+  AddElementWise(tflite::micro::GetTensorData<CellType>(cell_state) +
+                     step_info.CellStateOffset(),
+                 buffer,
+                 /*n_batch=*/cell_state_shape.DimsData()[0],
+                 /*n_state=*/cell_state_shape.DimsData()[1],
+                 tflite::micro::GetTensorData<CellType>(cell_state) +
+                     step_info.CellStateOffset());
+
+  if (cell_state_info.cell_clip > 0) {
+    Clipping(cell_state_shape.FlatSize(), cell_state_info,
+             tflite::micro::GetTensorData<CellType>(cell_state) +
+                 step_info.CellStateOffset());
+  }
+}
+#else   // #if !(defined(HIFI3) || defined(HIFI4) || defined(HIFI5))
+template <typename ActivationType, typename WeightType, typename CellType,
+          typename BiasType>
+void CalculateLstmGate(
+    const LstmStepManager& step_info, const GateParameters& gate_params,
+    // Input FC
+    const TfLiteEvalTensor* input, const TfLiteEvalTensor* input_weight,
+    const TfLiteEvalTensor* input_bias,
+    // Recurrent FC
+    const TfLiteEvalTensor* recurrent, const TfLiteEvalTensor* recurrent_weight,
+    const TfLiteEvalTensor* recurrent_bias,
+    // Output
+    CellType* gate_output,
+    // Scratch arrays
+    CellType* fc_output_buffer, const TfLiteFusedActivation activation,
+    const int num_batches, const int input_dimension,
+    const int state_dimension) {
+  // RuntimeShape step_input_shape = step_info.InputShape();
+  // RuntimeShape input_shape = tflite::micro::GetTensorShape(input);
+  // RuntimeShape step_state_shape = step_info.StateShape();
+  // RuntimeShape recurrent_shape = tflite::micro::GetTensorShape(recurrent);
+
+  // Moved these to LstmStep function
+  // Check offset validity to avoid memory overflow
+  // TFLITE_DCHECK_LE(step_info.InputOffset() + step_input_shape.FlatSize(),
+  // input_shape.FlatSize());
+  // TFLITE_DCHECK_LE(
+  // step_info.HiddenStateOffset() + step_state_shape.FlatSize(),
+  // recurrent_shape.FlatSize());
+
+  // Input FC
+  FullyConnected(gate_params.input_fc_params,
+                 tflite::micro::GetTensorData<ActivationType>(input) +
+                     step_info.InputOffset(),
+                 tflite::micro::GetTensorData<WeightType>(input_weight),
+                 tflite::micro::GetOptionalTensorData<BiasType>(input_bias),
+                 gate_output, num_batches, state_dimension, input_dimension);
+
+  // Recurrent FC
+  FullyConnected(gate_params.recurrent_fc_params,
+                 tflite::micro::GetTensorData<ActivationType>(recurrent) +
+                     step_info.HiddenStateOffset(),
+                 tflite::micro::GetTensorData<WeightType>(recurrent_weight),
+                 tflite::micro::GetOptionalTensorData<BiasType>(recurrent_bias),
+                 fc_output_buffer, num_batches, state_dimension,
+                 state_dimension);
+
+  AddElementWise(gate_output, fc_output_buffer,
+                 /*n_batch=*/num_batches,
+                 /*n_state=*/state_dimension, gate_output);
+  // Apply activation
+  switch (activation) {
+    case kTfLiteActSigmoid:
+      Sigmoid(gate_output, num_batches * state_dimension);
+      break;
+    case kTfLiteActTanh: {
+      // Set the scale power to -12 to avoid shift
+      Tanh(/*cell_state_scale_power=*/-12, gate_output, gate_output,
+           num_batches * state_dimension);
+    } break;
+    default:
+      // Only Sigmoid or Tanh is used.
+      TFLITE_ASSERT_FALSE;
+  }
+}
+
+// Update the cell state using the output from the forget gate, input gate, and
+// cell gate Formula: updated_cell_state = forget_gate_output*cell_state +
+// input_gate_output * cell_gate_output, where * denotes element wise
+// multiplication
+void UpdateLstmCell(const LstmStepManager& step_info,
+                    TfLiteEvalTensor* cell_state,
+                    // Gate outputs
+                    int16_t* forget_gate_output,
+                    const int16_t* input_gate_output,
+                    const int16_t* cell_gate_output,
+                    // Mul parameters
+                    const ArithmeticParams& forget_cell_mul_params,
+                    const ArithmeticParams& input_mul_params,
+                    const CellStateInfo& cell_state_info, int16_t* buffer);
+
+void UpdateLstmCell(const LstmStepManager& step_info,
+                    TfLiteEvalTensor* cell_state,
+                    // Gate outputs
+                    float* forget_gate_output, const float* input_gate_output,
+                    const float* cell_gate_output,
+                    // Mul parameters
+                    const ArithmeticParams& forget_cell_mul_params,
+                    const ArithmeticParams& input_mul_params,
+                    const CellStateInfo& cell_state_info, float* buffer);
+#endif  // #if !(defined(HIFI3) || defined(HIFI4) || defined(HIFI5))
+
+// Update the hidden state of the LSTM kernel using the following formula:
+// updated_hidden_state = Tanh(updated_cell_state) * output_gate_output, * means
+// element wise multiplication
+template <typename CellType, typename ActivationType>
+void UpdateLstmHidden(const LstmStepManager& step_info,
+                      TfLiteEvalTensor* cell_state,
+                      TfLiteEvalTensor* hidden_state,
+                      const CellType* output_gate_output,
+                      const ArithmeticParams& mul_params,
+                      int32_t cell_state_scale_power, CellType* buffer) {
+  // Check offset validity to avoid memory overflow
+  TFLITE_DCHECK_LE(
+      step_info.CellStateOffset() + step_info.StateShape().FlatSize(),
+      tflite::micro::GetTensorShape(cell_state).FlatSize());
+  TFLITE_DCHECK_LE(
+      step_info.HiddenStateOffset() + step_info.StateShape().FlatSize(),
+      tflite::micro::GetTensorShape(hidden_state).FlatSize());
+
+  auto cell_state_shape = step_info.StateShape();
+  CellType* cell_state_data =
+      tflite::micro::GetTensorData<CellType>(cell_state) +
+      step_info.CellStateOffset();
+  // Tanh(cell_state)
+#if !(defined(HIFI3) || defined(HIFI4) || defined(HIFI5))
+  Tanh(cell_state_scale_power, cell_state_shape, cell_state_data,
+       cell_state_shape, buffer);
+  // Update the hidden state
+  Mul(cell_state_shape, mul_params, buffer, output_gate_output,
+      tflite::micro::GetTensorData<ActivationType>(hidden_state) +
+          step_info.HiddenStateOffset());
+#else
+  int32_t cell_state_size = cell_state_shape.FlatSize();
+  Tanh(cell_state_scale_power, cell_state_data, buffer, cell_state_size);
+  // Update the hidden state
+  Mul(mul_params, buffer, output_gate_output,
+      tflite::micro::GetTensorData<ActivationType>(hidden_state) +
+          step_info.HiddenStateOffset(),
+      cell_state_size);
+#endif
+}
+
+#if !(defined(HIFI3) || defined(HIFI4) || defined(HIFI5))
+template <typename ActivationType, typename WeightType, typename CellType,
+          typename BiasType>
+void LstmStep(const LstmStepManager& step_info, const OpDataLSTM& op_data,
+              LSTMKernelContents& kernel_content,
+              const LSTMBuffers<CellType>& buffers) {
+  /*Step1: Calculate gate outputs to prepare cell state update*/
+  CellType* gate_internal_buffer = buffers.buffer3;
+  CellType* forget_gate_output = buffers.buffer0;
+  CalculateLstmGate<ActivationType, WeightType, CellType, BiasType>(
+      step_info, op_data.forget_gate_parameters,
+      // Input FC
+      kernel_content.GetInternalTensor(tflite::kLstmInputTensor),
+      kernel_content.GetInternalTensor(tflite::kLstmInputToForgetWeightsTensor),
+      kernel_content.GetInternalTensor(tflite::kLstmForgetGateBiasTensor),
+      // Recurrent FC
+      kernel_content.HiddenStateTensor(),
+      kernel_content.GetInternalTensor(
+          tflite::kLstmRecurrentToForgetWeightsTensor),
+      /*recurrent_bias*/ nullptr,
+      // Output
+      forget_gate_output,
+      // Scratch arrays
+      gate_internal_buffer, kTfLiteActSigmoid);
+
+  // Input Gate calculation;
+  CellType* input_gate_output = buffers.buffer1;
+  CalculateLstmGate<ActivationType, WeightType, CellType, BiasType>(
+      step_info, op_data.input_gate_parameters,
+      // Input FC
+      kernel_content.GetInternalTensor(tflite::kLstmInputTensor),
+      kernel_content.GetInternalTensor(tflite::kLstmInputToInputWeightsTensor),
+      kernel_content.GetInternalTensor(tflite::kLstmInputGateBiasTensor),
+      // Recurrent FC
+      kernel_content.HiddenStateTensor(),
+      kernel_content.GetInternalTensor(
+          tflite::kLstmRecurrentToInputWeightsTensor),
+      /*recurrent_bias*/ nullptr,
+      // Output
+      input_gate_output,
+      // Scratch arrays
+      gate_internal_buffer, kTfLiteActSigmoid);
+
+  // Cell Gate calculation
+  CellType* cell_gate_output = buffers.buffer2;
+  CalculateLstmGate<ActivationType, WeightType, CellType, BiasType>(
+      step_info, op_data.cell_gate_parameters,
+      // Input FC
+      kernel_content.GetInternalTensor(tflite::kLstmInputTensor),
+      kernel_content.GetInternalTensor(tflite::kLstmInputToCellWeightsTensor),
+      kernel_content.GetInternalTensor(tflite::kLstmCellGateBiasTensor),
+      // Recurrent FC
+      kernel_content.HiddenStateTensor(),
+      kernel_content.GetInternalTensor(
+          tflite::kLstmRecurrentToCellWeightsTensor),
+      /*recurrent_bias*/ nullptr,
+      // Output
+      cell_gate_output,
+      // Scratch arrays
+      gate_internal_buffer, op_data.cell_gate_nonlinear_type);
+
+  /*Step2: update the cell state */
+  const InterGateParameters& inter_gate_params = op_data.inter_gate_parameters;
+  CellType* updated_input_buffer = buffers.buffer1;  // reuse buffer
+
+  UpdateLstmCell<CellType>(step_info, kernel_content.CellStateTensor(),
+                           forget_gate_output, input_gate_output,
+                           cell_gate_output,
+                           inter_gate_params.forget_cell_mul_params,
+                           inter_gate_params.input_mul_params,
+                           op_data.cell_state_info, updated_input_buffer);
+
+  /*Step3: update the hidden state */
+  CellType* output_gate_output = buffers.buffer1;  // reuse buffer
+  CalculateLstmGate<ActivationType, WeightType, CellType, BiasType>(
+      step_info, op_data.output_gate_parameters,
+      // Input FC
+      kernel_content.GetInternalTensor(tflite::kLstmInputTensor),
+      kernel_content.GetInternalTensor(tflite::kLstmInputToOutputWeightsTensor),
+      kernel_content.GetInternalTensor(tflite::kLstmOutputGateBiasTensor),
+      // Recurrent FC
+      kernel_content.HiddenStateTensor(),
+      kernel_content.GetInternalTensor(
+          tflite::kLstmRecurrentToOutputWeightsTensor),
+      /*recurrent_bias*/ nullptr,
+      // Output
+      output_gate_output,
+      // Scratch arrays
+      gate_internal_buffer, kTfLiteActSigmoid);
+
+  CellType* tanh_activated_cell_buffer = buffers.buffer0;  // reuse buffer
+  tflite::lstm_internal::UpdateLstmHidden<CellType, ActivationType>(
+      step_info, kernel_content.CellStateTensor(),
+      kernel_content.HiddenStateTensor(), output_gate_output,
+      inter_gate_params.output_mul_params,
+      op_data.cell_state_info.cell_state_scale_power,
+      tanh_activated_cell_buffer);
+
+  /*Step4: copy the update the hidden state to output*/
+  // Check offset validity to avoid memory overflow
+  TFLITE_DCHECK_LE(
+      step_info.OutputOffset() + step_info.StateShape().FlatSize(),
+      tflite::micro::GetTensorShape(kernel_content.output_tensor).FlatSize());
+  // record the output (from the updated hidden state)
+  ActivationType* output_ptr = tflite::micro::GetTensorData<ActivationType>(
+      kernel_content.output_tensor);
+  const auto* hidden_state = kernel_content.HiddenStateTensor();
+  std::memcpy(output_ptr + step_info.OutputOffset(),
+              tflite::micro::GetTensorData<ActivationType>(hidden_state) +
+                  step_info.HiddenStateOffset(),
+              step_info.StateShape().FlatSize() * sizeof(ActivationType));
+}
+#else   // #if !(defined(HIFI3) || defined(HIFI4) || defined(HIFI5))
+template <typename ActivationType, typename WeightType, typename CellType,
+          typename BiasType>
+void LstmStep(const LstmStepManager& step_info, const OpDataLSTM& op_data,
+              LSTMKernelContents& kernel_content,
+              const LSTMBuffers<CellType>& buffers) {
+  const TfLiteEvalTensor* input =
+      kernel_content.GetInternalTensor(tflite::kLstmInputTensor);
+  TfLiteEvalTensor* recurrent = kernel_content.HiddenStateTensor();
+
+  int time_major = step_info.time_major();
+  int num_batches = time_major == 0 ? 1 : step_info.batch_size();
+  int input_dimension = step_info.input_dimension();
+  int state_dimension = step_info.state_dimension();
+
+  // Check offset validity to avoid memory overflow
+  TFLITE_DCHECK_LE(step_info.InputOffset() + num_batches * input_dimension,
+                   tflite::micro::GetTensorShape(input).FlatSize());
+  TFLITE_DCHECK_LE(
+      step_info.HiddenStateOffset() + num_batches * state_dimension,
+      tflite::micro::GetTensorShape(recurrent).FlatSize());
+
+  /*Step1: Calculate gate outputs to prepare cell state update*/
+  CellType* gate_internal_buffer = buffers.buffer3;
+  CellType* forget_gate_output = buffers.buffer0;
+  CalculateLstmGate<ActivationType, WeightType, CellType, BiasType>(
+      step_info, op_data.forget_gate_parameters,
+      // Input FC
+      input,  // kernel_content.GetInternalTensor(tflite::kLstmInputTensor),
+      kernel_content.GetInternalTensor(tflite::kLstmInputToForgetWeightsTensor),
+      kernel_content.GetInternalTensor(tflite::kLstmForgetGateBiasTensor),
+      // Recurrent FC
+      recurrent,  // kernel_content.HiddenStateTensor(),
+      kernel_content.GetInternalTensor(
+          tflite::kLstmRecurrentToForgetWeightsTensor),
+      /*recurrent_bias*/ nullptr,
+      // Output
+      forget_gate_output,
+      // Scratch arrays
+      gate_internal_buffer, kTfLiteActSigmoid, num_batches, input_dimension,
+      state_dimension);
+
+  // Input Gate calculation;
+  CellType* input_gate_output = buffers.buffer1;
+  CalculateLstmGate<ActivationType, WeightType, CellType, BiasType>(
+      step_info, op_data.input_gate_parameters,
+      // Input FC
+      input,  // kernel_content.GetInternalTensor(tflite::kLstmInputTensor),
+      kernel_content.GetInternalTensor(tflite::kLstmInputToInputWeightsTensor),
+      kernel_content.GetInternalTensor(tflite::kLstmInputGateBiasTensor),
+      // Recurrent FC
+      recurrent,  // kernel_content.HiddenStateTensor(),
+      kernel_content.GetInternalTensor(
+          tflite::kLstmRecurrentToInputWeightsTensor),
+      /*recurrent_bias*/ nullptr,
+      // Output
+      input_gate_output,
+      // Scratch arrays
+      gate_internal_buffer, kTfLiteActSigmoid, num_batches, input_dimension,
+      state_dimension);
+
+  // Cell Gate calculation
+  CellType* cell_gate_output = buffers.buffer2;
+  CalculateLstmGate<ActivationType, WeightType, CellType, BiasType>(
+      step_info, op_data.cell_gate_parameters,
+      // Input FC
+      input,  // kernel_content.GetInternalTensor(tflite::kLstmInputTensor),
+      kernel_content.GetInternalTensor(tflite::kLstmInputToCellWeightsTensor),
+      kernel_content.GetInternalTensor(tflite::kLstmCellGateBiasTensor),
+      // Recurrent FC
+      recurrent,  // kernel_content.HiddenStateTensor(),
+      kernel_content.GetInternalTensor(
+          tflite::kLstmRecurrentToCellWeightsTensor),
+      /*recurrent_bias*/ nullptr,
+      // Output
+      cell_gate_output,
+      // Scratch arrays
+      gate_internal_buffer, op_data.cell_gate_nonlinear_type, num_batches,
+      input_dimension, state_dimension);
+
+  /*Step2: update the cell state */
+  const InterGateParameters& inter_gate_params = op_data.inter_gate_parameters;
+  CellType* updated_input_buffer = buffers.buffer1;  // reuse buffer
+
+  UpdateLstmCell(step_info, kernel_content.CellStateTensor(),
+                 forget_gate_output, input_gate_output, cell_gate_output,
+                 inter_gate_params.forget_cell_mul_params,
+                 inter_gate_params.input_mul_params, op_data.cell_state_info,
+                 updated_input_buffer);
+
+  /*Step3: update the hidden state */
+  CellType* output_gate_output = buffers.buffer1;  // reuse buffer
+  CalculateLstmGate<ActivationType, WeightType, CellType, BiasType>(
+      step_info, op_data.output_gate_parameters,
+      // Input FC
+      input,  // kernel_content.GetInternalTensor(tflite::kLstmInputTensor),
+      kernel_content.GetInternalTensor(tflite::kLstmInputToOutputWeightsTensor),
+      kernel_content.GetInternalTensor(tflite::kLstmOutputGateBiasTensor),
+      // Recurrent FC
+      recurrent,  // kernel_content.HiddenStateTensor(),
+      kernel_content.GetInternalTensor(
+          tflite::kLstmRecurrentToOutputWeightsTensor),
+      /*recurrent_bias*/ nullptr,
+      // Output
+      output_gate_output,
+      // Scratch arrays
+      gate_internal_buffer, kTfLiteActSigmoid, num_batches, input_dimension,
+      state_dimension);
+
+  CellType* tanh_activated_cell_buffer = buffers.buffer0;  // reuse buffer
+  tflite::lstm_internal::UpdateLstmHidden<CellType, ActivationType>(
+      step_info, kernel_content.CellStateTensor(), recurrent,
+      /* kernel_content.HiddenStateTensor(), */ output_gate_output,
+      inter_gate_params.output_mul_params,
+      op_data.cell_state_info.cell_state_scale_power,
+      tanh_activated_cell_buffer);
+
+  /*Step4: copy the update the hidden state to output*/
+  // Check offset validity to avoid memory overflow
+  TFLITE_DCHECK_LE(
+      step_info.OutputOffset() + step_info.StateShape().FlatSize(),
+      tflite::micro::GetTensorShape(kernel_content.output_tensor).FlatSize());
+  // record the output (from the updated hidden state)
+  ActivationType* output_ptr = tflite::micro::GetTensorData<ActivationType>(
+      kernel_content.output_tensor);
+  // const auto* hidden_state = kernel_content.HiddenStateTensor();
+  std::memcpy(output_ptr + step_info.OutputOffset(),
+              tflite::micro::GetTensorData<ActivationType>(recurrent) +
+                  step_info.HiddenStateOffset(),
+              step_info.StateShape().FlatSize() * sizeof(ActivationType));
+}
+#endif  // #if !(defined(HIFI3) || defined(HIFI4) || defined(HIFI5))
+
+}  // namespace lstm_internal
+
+// Evaulate the LSTM kernel with (potential) multi-steps and multi-batch input
+// Since
+template <typename ActivationType, typename WeightType, typename CellType,
+          typename BiasType>
+TfLiteStatus EvalLstm(const OpDataLSTM& op_data,
+                      LSTMKernelContents& kernel_content,
+                      const LSTMBuffers<CellType>& buffers) {
+  lstm_internal::LstmStepManager step_info(&op_data.size_info);
+  const auto& size_info = op_data.size_info;
+  // time is the first dimention, enable batch computation
+  if (size_info.time_major) {
+    for (int t = 0; t < size_info.time_steps; t++) {
+      lstm_internal::LstmStep<ActivationType, WeightType, CellType, BiasType>(
+          step_info, op_data, kernel_content, buffers);
+      // prepare for the next time step
+      step_info.UpdateTime();
+    }
+  } else {
+    // batch first, unable to size the input data. single batch inference
+    for (int b = 0; b < size_info.batch_size; b++) {
+      for (int t = 0; t < size_info.time_steps; t++) {
+        lstm_internal::LstmStep<ActivationType, WeightType, CellType, BiasType>(
+            step_info, op_data, kernel_content, buffers);
+        // prepare for the next time step
+        step_info.UpdateTime();
+      }
+      // prepare for the next batch
+      step_info.UpdateBatch();
+      step_info.ResetTime();
+    }
+  }
+  return kTfLiteOk;
+}
 }  // namespace tflite
-#endif  // TENSORFLOW_LITE_KERNELS_LSTM_EVAL_H_
+
+#endif  // TENSORFLOW_LITE_MICRO_KERNELS_LSTM_EVAL_16ACT_H_
diff --git a/tensorflow/lite/micro/kernels/xtensa/lstm_eval_hifi.cc b/tensorflow/lite/micro/kernels/xtensa/lstm_eval_hifi.cc
index 2b49f26..f8b6fd8 100644
--- a/tensorflow/lite/micro/kernels/xtensa/lstm_eval_hifi.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/lstm_eval_hifi.cc
@@ -12,17 +12,65 @@
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
+
+#include <xtensa/tie/xt_hifi2.h>
+
 #include "tensorflow/lite/c/builtin_op_data.h"
 #include "tensorflow/lite/c/common.h"
 #include "tensorflow/lite/micro/kernels/xtensa/lstm_eval.h"
 #include "tensorflow/lite/micro/kernels/xtensa/xtensa.h"
 
 namespace tflite {
-namespace ops {
-namespace micro {
-namespace lstm_eval {
 
 #if defined(HIFI5)
+#if TFLITE_SINGLE_ROUNDING
+#define MPY_BY_QUANT_MULT_X2_OUT32(out, inp, multiplier, left_shift,  \
+                                   right_shift)                       \
+  {                                                                   \
+    ae_int64 out64_0, out64_1;                                        \
+    ae_int64 INT64_ONE = AE_MOVINT64_FROMINT32X2(AE_MOVDA32X2(0, 1)); \
+    ae_int64 round_val = AE_SLAA64S(INT64_ONE, 30 - left_shift);      \
+    AE_MUL32X2S_HH_LL(out64_0, out64_1, inp, AE_MOVDA32(multiplier)); \
+    out64_0 = AE_ADD64S(out64_0, round_val);                          \
+    out64_1 = AE_ADD64S(out64_1, round_val);                          \
+    out = AE_TRUNCA32X2F64S(out64_0, out64_1, 1 + left_shift);        \
+  }
+
+#define MPY_BY_QUANT_MULT_X2X2_OUT32(out1, out2, inp1, inp2, multiplier, \
+                                     left_shift, right_shift)            \
+  {                                                                      \
+    ae_int64 out64_0, out64_1, out64_2, out64_3;                         \
+    ae_int64 INT64_ONE = AE_MOVINT64_FROMINT32X2(AE_MOVDA32X2(0, 1));    \
+    ae_int64 round_val = AE_SLAA64S(INT64_ONE, 30 - left_shift);         \
+    AE_MUL32X2S_HH_LL(out64_0, out64_1, inp1, AE_MOVDA32(multiplier));   \
+    AE_MUL32X2S_HH_LL(out64_2, out64_3, inp2, AE_MOVDA32(multiplier));   \
+    out64_0 = AE_ADD64S(out64_0, round_val);                             \
+    out64_1 = AE_ADD64S(out64_1, round_val);                             \
+    out64_2 = AE_ADD64S(out64_2, round_val);                             \
+    out64_3 = AE_ADD64S(out64_3, round_val);                             \
+    out1 = AE_TRUNCA32X2F64S(out64_0, out64_1, 1 + left_shift);          \
+    out2 = AE_TRUNCA32X2F64S(out64_2, out64_3, 1 + left_shift);          \
+  }
+#else /* #if TFLITE_SINGLE_ROUNDING */
+#define MPY_BY_QUANT_MULT_X2_OUT32(out, inp, multiplier, left_shift, \
+                                   right_shift)                      \
+  out = AE_SLAA32(inp, left_shift);                                  \
+  out = AE_MULFP32X2RAS(out, AE_MOVDA32(multiplier));                \
+  out = AE_SRAA32SYMS(out, right_shift);
+
+#define MPY_BY_QUANT_MULT_X2X2_OUT32(out1, out2, inp1, inp2, multiplier, \
+                                     left_shift, right_shift)            \
+  {                                                                      \
+    ae_int32x2 d_ls = AE_MOVDA32(1 << left_shift);                       \
+    AE_MUL2P32X4(out1, out2, inp1, inp2, d_ls, d_ls);                    \
+    AE_MULF2P32X4RAS(out1, out2, out1, out2, AE_MOVDA32(multiplier),     \
+                     AE_MOVDA32(multiplier));                            \
+    out1 = AE_SRAA32SYMS(out1, right_shift);                             \
+    out2 = AE_SRAA32SYMS(out2, right_shift);                             \
+  }
+#endif /* #if TFLITE_SINGLE_ROUNDING */
+
 void calc_cell_state_without_cifg(int16_t* cell_state,
                                   const int16_t* forget_gate,
                                   const int16_t* cell_gate,
@@ -124,7 +172,7 @@
 
       AE_MUL16X4(d_mul_0, d_mul_1, d_cg_0, d_ig_0);
       d_mul_0 = AE_SRAA32SYMS(d_mul_0, shift2);
-      d_cg_0 = AE_SAT16X4(d_mul_0, d_mul_1);
+      d_cg_0 = AE_SAT16X4(d_mul_0, d_mul_0);
 
       d_cs_w_0 = AE_ADD16S(d_cs_w_0, d_cg_0);
       AE_MINMAX16(d_cs_w_0, d_min, d_max);
@@ -187,11 +235,11 @@
 
       AE_MUL16X4(d_mul_0, d_mul_1, d_cs_r_0, d_fg_0);
       d_mul_0 = AE_SRAA32SYMS(d_mul_0, shift1);
-      d_cs_w_0 = AE_SAT16X4(d_mul_0, d_mul_1);
+      d_cs_w_0 = AE_SAT16X4(d_mul_0, d_mul_0);
 
       AE_MUL16X4(d_mul_0, d_mul_1, d_cg_0, d_ig_0);
       d_mul_0 = AE_SRAA32SYMS(d_mul_0, shift2);
-      d_cg_0 = AE_SAT16X4(d_mul_0, d_mul_1);
+      d_cg_0 = AE_SAT16X4(d_mul_0, d_mul_0);
 
       d_cs_w_0 = AE_ADD16S(d_cs_w_0, d_cg_0);
       AE_MINMAX16(d_cs_w_0, d_min, d_max);
@@ -298,7 +346,7 @@
       d_1mfg_0 = AE_SUB16S(d_one, d_fg_0);
       AE_MUL16X4(d_mul_0, d_mul_1, d_cg_0, d_1mfg_0);
       d_mul_0 = AE_SRAA32SYMS(d_mul_0, shift2);
-      d_cg_0 = AE_SAT16X4(d_mul_0, d_mul_1);
+      d_cg_0 = AE_SAT16X4(d_mul_0, d_mul_0);
 
       d_cs_w_0 = AE_ADD16S(d_cs_w_0, d_cg_0);
       AE_MINMAX16(d_cs_w_0, d_min, d_max);
@@ -360,12 +408,12 @@
 
       AE_MUL16X4(d_mul_0, d_mul_1, d_cs_r_0, d_fg_0);
       d_mul_0 = AE_SRAA32SYMS(d_mul_0, shift1);
-      d_cs_w_0 = AE_SAT16X4(d_mul_0, d_mul_1);
+      d_cs_w_0 = AE_SAT16X4(d_mul_0, d_mul_0);
 
       d_1mfg_0 = AE_SUB16S(d_one, d_fg_0);
       AE_MUL16X4(d_mul_0, d_mul_1, d_cg_0, d_1mfg_0);
       d_mul_0 = AE_SRAA32SYMS(d_mul_0, shift2);
-      d_cg_0 = AE_SAT16X4(d_mul_0, d_mul_1);
+      d_cg_0 = AE_SAT16X4(d_mul_0, d_mul_0);
 
       d_cs_w_0 = AE_ADD16S(d_cs_w_0, d_cg_0);
       AE_MINMAX16(d_cs_w_0, d_min, d_max);
@@ -387,7 +435,6 @@
   ae_int16x4 data_a_0, data_a_1;
   ae_int16x4 data_b_0, data_b_1;
   ae_int32x2 data_ab_0, data_ab_1, data_ab_2, data_ab_3;
-  ae_int32x2 d_multiplier, d_left_shift;
   ae_int16x4 d_zp;
   ae_int16x4 data_c_0, data_c_1;
   ae_int8x8 data_c;
@@ -401,13 +448,15 @@
   align_src_input_2 = AE_LA128_PP((ae_int16x8*)tmp_input_2);
   align_dst_output = AE_ZALIGN64();  // zero alignment reg
 
-  d_multiplier = AE_MOVDA32(multiplier);
   d_zp = AE_MOVDA16(zero_point);
 
+#if TFLITE_SINGLE_ROUNDING
+  left_shift = shift;
+  (void)right_shift;
+#else  /* #if TFLITE_SINGLE_ROUNDING */
   left_shift = shift < 0 ? 0 : shift;
   right_shift = shift > 0 ? 0 : -shift;
-
-  d_left_shift = AE_MOVDA32(1 << left_shift);
+#endif /* #if TFLITE_SINGLE_ROUNDING */
 #pragma concurrent
   for (i = 0; i < (num_elms >> 3); i++) {
     AE_LA16X4X2_IP(data_a_0, data_a_1, align_src_input_1, tmp_input_1);
@@ -415,18 +464,10 @@
 
     AE_MUL16X4(data_ab_0, data_ab_1, data_a_0, data_b_0);
     AE_MUL16X4(data_ab_2, data_ab_3, data_a_1, data_b_1);
-    AE_MUL2P32X4(data_ab_0, data_ab_1, data_ab_0, data_ab_1, d_left_shift,
-                 d_left_shift);
-    AE_MUL2P32X4(data_ab_2, data_ab_3, data_ab_2, data_ab_3, d_left_shift,
-                 d_left_shift);
-    AE_MULF2P32X4RAS(data_ab_0, data_ab_1, data_ab_0, data_ab_1, d_multiplier,
-                     d_multiplier);
-    AE_MULF2P32X4RAS(data_ab_2, data_ab_3, data_ab_2, data_ab_3, d_multiplier,
-                     d_multiplier);
-    data_ab_0 = AE_SRAA32SYMS(data_ab_0, right_shift);
-    data_ab_1 = AE_SRAA32SYMS(data_ab_1, right_shift);
-    data_ab_2 = AE_SRAA32SYMS(data_ab_2, right_shift);
-    data_ab_3 = AE_SRAA32SYMS(data_ab_3, right_shift);
+    MPY_BY_QUANT_MULT_X2X2_OUT32(data_ab_0, data_ab_1, data_ab_0, data_ab_1,
+                                 multiplier, left_shift, right_shift);
+    MPY_BY_QUANT_MULT_X2X2_OUT32(data_ab_2, data_ab_3, data_ab_2, data_ab_3,
+                                 multiplier, left_shift, right_shift);
     data_c_0 = AE_SAT16X4(data_ab_0, data_ab_1);
     data_c_1 = AE_SAT16X4(data_ab_2, data_ab_3);
     data_c_0 = AE_SUB16S(data_c_0, d_zp);
@@ -445,18 +486,532 @@
     AE_L16_IP(data_b_0, (ae_int16*)tmp_input_2, 2);
 
     AE_MUL16X4(data_ab_0, data_ab_1, data_a_0, data_b_0);
-    data_ab_0 = AE_MULP32X2(data_ab_0, d_left_shift);
-    data_ab_0 = AE_MULFP32X2RAS(data_ab_0, d_multiplier);
-    data_ab_0 = AE_SRAA32SYMS(data_ab_0, right_shift);
-    data_c_0 = AE_SAT16X4(data_ab_0, data_ab_1);
+    MPY_BY_QUANT_MULT_X2_OUT32(data_ab_0, data_ab_0, multiplier, left_shift,
+                               right_shift);
+    data_c_0 = AE_SAT16X4(data_ab_0, data_ab_0);
     data_c_0 = AE_SUB16S(data_c_0, d_zp);
     data_c = AE_SAT8X8X16(data_c_0, data_c_0);
     AE_S8_0_IP(data_c, (ae_int8*)output, 1);
   }
 }
+#elif defined(HIFI3) || defined(HIFI4)
+#if TFLITE_SINGLE_ROUNDING
+#define MPY_BY_QUANT_MULT_X2_OUT32(out, inp, multiplier, l_shift, r_shift) \
+  {                                                                        \
+    ae_int64 out64_0, out64_1;                                             \
+    out64_0 = AE_MUL32_HH(inp, AE_MOVDA32(multiplier));                    \
+    out64_1 = AE_MUL32_LL(inp, AE_MOVDA32(multiplier));                    \
+    out64_0 = AE_SLAA64S(out64_0, 1 + l_shift);                            \
+    out64_1 = AE_SLAA64S(out64_1, 1 + l_shift);                            \
+    out = AE_ROUND32X2F64SASYM(out64_0, out64_1);                          \
+  }
+
+#define MPY_BY_QUANT_MULT_X2X2_OUT32(out1, out2, inp1, inp2, multiplier, \
+                                     l_shift, r_shift)                   \
+  {                                                                      \
+    ae_int64 out64_0, out64_1, out64_2, out64_3;                         \
+    out64_0 = AE_MUL32_HH(inp1, AE_MOVDA32(multiplier));                 \
+    out64_1 = AE_MUL32_LL(inp1, AE_MOVDA32(multiplier));                 \
+    out64_2 = AE_MUL32_HH(inp2, AE_MOVDA32(multiplier));                 \
+    out64_3 = AE_MUL32_LL(inp2, AE_MOVDA32(multiplier));                 \
+    out64_0 = AE_SLAA64S(out64_0, 1 + l_shift);                          \
+    out64_1 = AE_SLAA64S(out64_1, 1 + l_shift);                          \
+    out64_2 = AE_SLAA64S(out64_2, 1 + l_shift);                          \
+    out64_3 = AE_SLAA64S(out64_3, 1 + l_shift);                          \
+    out1 = AE_ROUND32X2F64SASYM(out64_0, out64_1);                       \
+    out2 = AE_ROUND32X2F64SASYM(out64_2, out64_3);                       \
+  }
+#else /* #if TFLITE_SINGLE_ROUNDING */
+#define MPY_BY_QUANT_MULT_X2_OUT32(out, inp, multiplier, l_shift, r_shift) \
+  out = AE_SLAA32(inp, l_shift);                                           \
+  out = AE_MULFP32X2RAS(out, AE_MOVDA32(multiplier));                      \
+  out = AE_ROUND32X2F64SSYM(AE_SRAA64(AE_CVT64F32_H(out), r_shift),        \
+                            AE_SRAA64(AE_CVT64F32_L(out), r_shift));
+
+#define MPY_BY_QUANT_MULT_X2X2_OUT32(out1, out2, inp1, inp2, multiplier, \
+                                     l_shift, r_shift)                   \
+  {                                                                      \
+    ae_int32x2 d_ls = AE_MOVDA32(1 << l_shift);                          \
+    out1 = AE_MULP32X2(inp1, d_ls);                                      \
+    out2 = AE_MULP32X2(inp2, d_ls);                                      \
+    out1 = AE_MULFP32X2RAS(out1, AE_MOVDA32(multiplier));                \
+    out2 = AE_MULFP32X2RAS(out2, AE_MOVDA32(multiplier));                \
+    out1 = AE_ROUND32X2F64SSYM(AE_SRAA64(AE_CVT64F32_H(out1), r_shift),  \
+                               AE_SRAA64(AE_CVT64F32_L(out1), r_shift)); \
+    out2 = AE_ROUND32X2F64SSYM(AE_SRAA64(AE_CVT64F32_H(out2), r_shift),  \
+                               AE_SRAA64(AE_CVT64F32_L(out2), r_shift)); \
+  }
+#endif /* #if TFLITE_SINGLE_ROUNDING */
+
+#ifndef AE_MULFP16X4RS
+static inline ae_f16x4 AE_MULFP16X4RS(ae_f16x4 d0, ae_f16x4 d1) {
+  ae_f16x4 output;
+  ae_f32x2 d0_32_0, d0_32_1, out32_0, out32_1;
+  ae_f16x4 one_d = AE_MOVDA16(1);
+  AE_MUL16X4(d0_32_0, d0_32_1, d0, one_d);
+  out32_0 = AE_MULFP32X16X2RS_H(d0_32_0, d1);
+  out32_1 = AE_MULFP32X16X2RS_L(d0_32_1, d1);
+  output = AE_SEL16_6420(AE_MOVF16X4_FROMF32X2(out32_0),
+                         AE_MOVF16X4_FROMF32X2(out32_1));
+  return output;
+}
+#endif
+
+#ifndef AE_MINMAX16
+#define AE_MINMAX16(dinout, d_min, d_max) \
+  {                                       \
+    xtbool4 b0 = AE_LT16(dinout, d_min);  \
+    AE_MOVT16X4(dinout, d_min, b0);       \
+    b0 = AE_LT16(d_max, dinout);          \
+    AE_MOVT16X4(dinout, d_max, b0);       \
+  }
+#endif
+
+#ifndef AE_SRAA32SYMS
+#define AE_SRAA32SYMS(inp, right_shift)                           \
+  AE_ROUND32X2F64SSYM(AE_SRAA64(AE_CVT64F32_H(inp), right_shift), \
+                      AE_SRAA64(AE_CVT64F32_L(inp), right_shift))
+#endif
+
+void calc_cell_state_without_cifg(int16_t* cell_state,
+                                  const int16_t* forget_gate,
+                                  const int16_t* cell_gate,
+                                  const int16_t* input_gate, int shift1,
+                                  int shift2, int clip, int num_elms) {
+  const ae_int16x4 *p16x4_cs_r, *p16x4_fg_r;
+  const ae_int16x4 *p16x4_cg_r, *p16x4_ig_r;
+
+  ae_int16x4* p16x4_cs_w;
+
+  ae_valign align_cs_r, align_fg_r;
+  ae_valign align_cg_r, align_ig_r;
+  ae_valign align_cs_w;
+
+  ae_int16x4 d_cs_r_0, d_cs_r_1;
+  ae_int16x4 d_fg_0, d_fg_1;
+  ae_int16x4 d_cg_0, d_cg_1;
+  ae_int16x4 d_ig_0, d_ig_1;
+  ae_int16x4 d_cs_w_0, d_cs_w_1;
+  ae_int32x2 d_mul_0, d_mul_1, d_mul_2, d_mul_3;
+  ae_int32x2 d_mul_4, d_mul_5, d_mul_6, d_mul_7;
+
+  ae_int16x4 d_min, d_max;
+
+  int i = 0;
+  p16x4_cs_r = (const ae_int16x4*)cell_state;
+  p16x4_fg_r = (const ae_int16x4*)forget_gate;
+  p16x4_cg_r = (const ae_int16x4*)cell_gate;
+  p16x4_ig_r = (const ae_int16x4*)input_gate;
+
+  p16x4_cs_w = (ae_int16x4*)cell_state;
+
+  align_cs_r = AE_LA64_PP(p16x4_cs_r);
+  align_fg_r = AE_LA64_PP(p16x4_fg_r);
+  align_cg_r = AE_LA64_PP(p16x4_cg_r);
+  align_ig_r = AE_LA64_PP(p16x4_ig_r);
+
+  align_cs_w = AE_ZALIGN64();
+
+  if (clip > 0) {
+    d_min = AE_MOVDA16(-clip);
+    d_max = AE_MOVDA16(clip);
+  } else {
+    d_min = AE_MOVDA16(-32768);
+    d_max = AE_MOVDA16(32767);
+  }
+
+#pragma concurrent
+  if (shift1 == 15) {
+    for (i = 0; i < (num_elms >> 3); i++) {
+      AE_LA16X4_IP(d_cs_r_0, align_cs_r, p16x4_cs_r);
+      AE_LA16X4_IP(d_cs_r_1, align_cs_r, p16x4_cs_r);
+      AE_LA16X4_IP(d_fg_0, align_fg_r, p16x4_fg_r);
+      AE_LA16X4_IP(d_fg_1, align_fg_r, p16x4_fg_r);
+      AE_LA16X4_IP(d_cg_0, align_cg_r, p16x4_cg_r);
+      AE_LA16X4_IP(d_cg_1, align_cg_r, p16x4_cg_r);
+      AE_LA16X4_IP(d_ig_0, align_ig_r, p16x4_ig_r);
+      AE_LA16X4_IP(d_ig_1, align_ig_r, p16x4_ig_r);
+
+      d_cs_w_0 = AE_MULFP16X4RS(d_cs_r_0, d_fg_0);
+      d_cs_w_1 = AE_MULFP16X4RS(d_cs_r_1, d_fg_1);
+
+      AE_MUL16X4(d_mul_4, d_mul_5, d_cg_0, d_ig_0);
+      AE_MUL16X4(d_mul_6, d_mul_7, d_cg_1, d_ig_1);
+      d_mul_4 = AE_SRAA32SYMS(d_mul_4, shift2);
+      d_mul_5 = AE_SRAA32SYMS(d_mul_5, shift2);
+      d_mul_6 = AE_SRAA32SYMS(d_mul_6, shift2);
+      d_mul_7 = AE_SRAA32SYMS(d_mul_7, shift2);
+
+      d_cg_0 = AE_SAT16X4(d_mul_4, d_mul_5);
+      d_cg_1 = AE_SAT16X4(d_mul_6, d_mul_7);
+      d_cs_w_0 = AE_ADD16S(d_cs_w_0, d_cg_0);
+      d_cs_w_1 = AE_ADD16S(d_cs_w_1, d_cg_1);
+      AE_MINMAX16(d_cs_w_0, d_min, d_max);
+      AE_MINMAX16(d_cs_w_1, d_min, d_max);
+
+      AE_SA16X4_IP(d_cs_w_0, align_cs_w, p16x4_cs_w);
+      AE_SA16X4_IP(d_cs_w_1, align_cs_w, p16x4_cs_w);
+    }
+    AE_SA64POS_FP(align_cs_w, p16x4_cs_w);  // finalize the stream
+
+    const ae_int16 *p16_cs_r, *p16_fg_r;
+    const ae_int16 *p16_cg_r, *p16_ig_r;
+
+    ae_int16* p16_cs_w;
+
+    p16_cs_r = (const ae_int16*)p16x4_cs_r;
+    p16_fg_r = (const ae_int16*)p16x4_fg_r;
+    p16_cg_r = (const ae_int16*)p16x4_cg_r;
+    p16_ig_r = (const ae_int16*)p16x4_ig_r;
+
+    p16_cs_w = (ae_int16*)p16x4_cs_w;
+    // residue iterations
+#pragma concurrent
+#pragma loop_count max = 7
+    for (i = 0; i < ((num_elms)&7); i++) {
+      d_cs_r_0 = p16_cs_r[i];
+      d_fg_0 = p16_fg_r[i];
+      d_cg_0 = p16_cg_r[i];
+      d_ig_0 = p16_ig_r[i];
+
+      d_cs_w_0 = AE_MULFP16X4RS(d_cs_r_0, d_fg_0);
+
+      AE_MUL16X4(d_mul_0, d_mul_1, d_cg_0, d_ig_0);
+      d_mul_0 = AE_SRAA32SYMS(d_mul_0, shift2);
+      d_cg_0 = AE_SAT16X4(d_mul_0, d_mul_0);
+
+      d_cs_w_0 = AE_ADD16S(d_cs_w_0, d_cg_0);
+      AE_MINMAX16(d_cs_w_0, d_min, d_max);
+      p16_cs_w[i] = d_cs_w_0;
+    }
+  } else {
+    for (i = 0; i < (num_elms >> 3); i++) {
+      AE_LA16X4_IP(d_cs_r_0, align_cs_r, p16x4_cs_r);
+      AE_LA16X4_IP(d_cs_r_1, align_cs_r, p16x4_cs_r);
+      AE_LA16X4_IP(d_fg_0, align_fg_r, p16x4_fg_r);
+      AE_LA16X4_IP(d_fg_1, align_fg_r, p16x4_fg_r);
+      AE_LA16X4_IP(d_cg_0, align_cg_r, p16x4_cg_r);
+      AE_LA16X4_IP(d_cg_1, align_cg_r, p16x4_cg_r);
+      AE_LA16X4_IP(d_ig_0, align_ig_r, p16x4_ig_r);
+      AE_LA16X4_IP(d_ig_1, align_ig_r, p16x4_ig_r);
+
+      AE_MUL16X4(d_mul_0, d_mul_1, d_cs_r_0, d_fg_0);
+      AE_MUL16X4(d_mul_2, d_mul_3, d_cs_r_1, d_fg_1);
+      d_mul_0 = AE_SRAA32SYMS(d_mul_0, shift1);
+      d_mul_1 = AE_SRAA32SYMS(d_mul_1, shift1);
+      d_mul_2 = AE_SRAA32SYMS(d_mul_2, shift1);
+      d_mul_3 = AE_SRAA32SYMS(d_mul_3, shift1);
+
+      d_cs_w_0 = AE_SAT16X4(d_mul_0, d_mul_1);
+      d_cs_w_1 = AE_SAT16X4(d_mul_2, d_mul_3);
+
+      AE_MUL16X4(d_mul_4, d_mul_5, d_cg_0, d_ig_0);
+      AE_MUL16X4(d_mul_6, d_mul_7, d_cg_1, d_ig_1);
+      d_mul_4 = AE_SRAA32SYMS(d_mul_4, shift2);
+      d_mul_5 = AE_SRAA32SYMS(d_mul_5, shift2);
+      d_mul_6 = AE_SRAA32SYMS(d_mul_6, shift2);
+      d_mul_7 = AE_SRAA32SYMS(d_mul_7, shift2);
+
+      d_cg_0 = AE_SAT16X4(d_mul_4, d_mul_5);
+      d_cg_1 = AE_SAT16X4(d_mul_6, d_mul_7);
+      d_cs_w_0 = AE_ADD16S(d_cs_w_0, d_cg_0);
+      d_cs_w_1 = AE_ADD16S(d_cs_w_1, d_cg_1);
+      AE_MINMAX16(d_cs_w_0, d_min, d_max);
+      AE_MINMAX16(d_cs_w_1, d_min, d_max);
+
+      AE_SA16X4_IP(d_cs_w_0, align_cs_w, p16x4_cs_w);
+      AE_SA16X4_IP(d_cs_w_1, align_cs_w, p16x4_cs_w);
+    }
+    AE_SA64POS_FP(align_cs_w, p16x4_cs_w);  // finalize the stream
+
+    const ae_int16 *p16_cs_r, *p16_fg_r;
+    const ae_int16 *p16_cg_r, *p16_ig_r;
+
+    ae_int16* p16_cs_w;
+
+    p16_cs_r = (const ae_int16*)p16x4_cs_r;
+    p16_fg_r = (const ae_int16*)p16x4_fg_r;
+    p16_cg_r = (const ae_int16*)p16x4_cg_r;
+    p16_ig_r = (const ae_int16*)p16x4_ig_r;
+
+    p16_cs_w = (ae_int16*)p16x4_cs_w;
+    // residue iterations
+#pragma concurrent
+#pragma loop_count max = 7
+    for (i = 0; i < ((num_elms)&7); i++) {
+      d_cs_r_0 = p16_cs_r[i];
+      d_fg_0 = p16_fg_r[i];
+      d_cg_0 = p16_cg_r[i];
+      d_ig_0 = p16_ig_r[i];
+
+      AE_MUL16X4(d_mul_0, d_mul_1, d_cs_r_0, d_fg_0);
+      d_mul_0 = AE_SRAA32SYMS(d_mul_0, shift1);
+      d_cs_w_0 = AE_SAT16X4(d_mul_0, d_mul_0);
+
+      AE_MUL16X4(d_mul_0, d_mul_1, d_cg_0, d_ig_0);
+      d_mul_0 = AE_SRAA32SYMS(d_mul_0, shift2);
+      d_cg_0 = AE_SAT16X4(d_mul_0, d_mul_0);
+
+      d_cs_w_0 = AE_ADD16S(d_cs_w_0, d_cg_0);
+      AE_MINMAX16(d_cs_w_0, d_min, d_max);
+      p16_cs_w[i] = d_cs_w_0;
+    }
+  }
+}
+
+void calc_cell_state_with_cifg(int16_t* cell_state, const int16_t* forget_gate,
+                               const int16_t* cell_gate, int shift1, int shift2,
+                               int clip, int num_elms) {
+  const ae_int16x4 *p16x4_cs_r, *p16x4_fg_r;
+  const ae_int16x4* p16x4_cg_r;
+
+  ae_int16x4* p16x4_cs_w;
+
+  ae_valign align_cs_r, align_fg_r;
+  ae_valign align_cg_r;
+  ae_valign align_cs_w;
+
+  ae_int16x4 d_cs_r_0, d_cs_r_1;
+  ae_int16x4 d_fg_0, d_fg_1;
+  ae_int16x4 d_cg_0, d_cg_1;
+  ae_int16x4 d_1mfg_0, d_1mfg_1;
+  ae_int16x4 d_cs_w_0, d_cs_w_1;
+  ae_int32x2 d_mul_0, d_mul_1, d_mul_2, d_mul_3;
+  ae_int32x2 d_mul_4, d_mul_5, d_mul_6, d_mul_7;
+
+  ae_int16x4 d_min, d_max, d_one;
+
+  int i = 0;
+  p16x4_cs_r = (const ae_int16x4*)cell_state;
+  p16x4_fg_r = (const ae_int16x4*)forget_gate;
+  p16x4_cg_r = (const ae_int16x4*)cell_gate;
+
+  p16x4_cs_w = (ae_int16x4*)cell_state;
+
+  align_cs_r = AE_LA64_PP(p16x4_cs_r);
+  align_fg_r = AE_LA64_PP(p16x4_fg_r);
+  align_cg_r = AE_LA64_PP(p16x4_cg_r);
+
+  align_cs_w = AE_ZALIGN64();
+
+  if (clip > 0) {
+    d_min = AE_MOVDA16(-clip);
+    d_max = AE_MOVDA16(clip);
+  } else {
+    d_min = AE_MOVDA16(-32768);
+    d_max = AE_MOVDA16(32767);
+  }
+  d_one = AE_MOVDA16(32767);
+
+#pragma concurrent
+  if (shift1 == 15) {
+    for (i = 0; i < (num_elms >> 3); i++) {
+      AE_LA16X4_IP(d_cs_r_0, align_cs_r, p16x4_cs_r);
+      AE_LA16X4_IP(d_cs_r_1, align_cs_r, p16x4_cs_r);
+      AE_LA16X4_IP(d_fg_0, align_fg_r, p16x4_fg_r);
+      AE_LA16X4_IP(d_fg_1, align_fg_r, p16x4_fg_r);
+      AE_LA16X4_IP(d_cg_0, align_cg_r, p16x4_cg_r);
+      AE_LA16X4_IP(d_cg_1, align_cg_r, p16x4_cg_r);
+
+      d_cs_w_0 = AE_MULFP16X4RS(d_cs_r_0, d_fg_0);
+      d_cs_w_1 = AE_MULFP16X4RS(d_cs_r_1, d_fg_1);
+
+      d_1mfg_0 = AE_SUB16S(d_one, d_fg_0);
+      d_1mfg_1 = AE_SUB16S(d_one, d_fg_1);
+      AE_MUL16X4(d_mul_4, d_mul_5, d_cg_0, d_1mfg_0);
+      AE_MUL16X4(d_mul_6, d_mul_7, d_cg_1, d_1mfg_1);
+      d_mul_4 = AE_SRAA32SYMS(d_mul_4, shift2);
+      d_mul_5 = AE_SRAA32SYMS(d_mul_5, shift2);
+      d_mul_6 = AE_SRAA32SYMS(d_mul_6, shift2);
+      d_mul_7 = AE_SRAA32SYMS(d_mul_7, shift2);
+      d_cg_0 = AE_SAT16X4(d_mul_4, d_mul_5);
+      d_cg_1 = AE_SAT16X4(d_mul_6, d_mul_7);
+
+      d_cs_w_0 = AE_ADD16S(d_cs_w_0, d_cg_0);
+      d_cs_w_1 = AE_ADD16S(d_cs_w_1, d_cg_1);
+
+      AE_MINMAX16(d_cs_w_0, d_min, d_max);
+      AE_MINMAX16(d_cs_w_1, d_min, d_max);
+
+      AE_SA16X4_IP(d_cs_w_0, align_cs_w, p16x4_cs_w);
+      AE_SA16X4_IP(d_cs_w_1, align_cs_w, p16x4_cs_w);
+    }
+    AE_SA64POS_FP(align_cs_w, p16x4_cs_w);  // finalize the stream
+
+    const ae_int16 *p16_cs_r, *p16_fg_r;
+    const ae_int16* p16_cg_r;
+
+    ae_int16* p16_cs_w;
+
+    p16_cs_r = (const ae_int16*)p16x4_cs_r;
+    p16_fg_r = (const ae_int16*)p16x4_fg_r;
+    p16_cg_r = (const ae_int16*)p16x4_cg_r;
+
+    p16_cs_w = (ae_int16*)p16x4_cs_w;
+    // residue iterations
+#pragma concurrent
+#pragma loop_count max = 7
+    for (i = 0; i < ((num_elms)&7); i++) {
+      d_cs_r_0 = p16_cs_r[i];
+      d_fg_0 = p16_fg_r[i];
+      d_cg_0 = p16_cg_r[i];
+
+      d_cs_w_0 = AE_MULFP16X4RS(d_cs_r_0, d_fg_0);
+
+      d_1mfg_0 = AE_SUB16S(d_one, d_fg_0);
+      AE_MUL16X4(d_mul_0, d_mul_1, d_cg_0, d_1mfg_0);
+      d_mul_0 = AE_SRAA32SYMS(d_mul_0, shift2);
+      d_cg_0 = AE_SAT16X4(d_mul_0, d_mul_0);
+
+      d_cs_w_0 = AE_ADD16S(d_cs_w_0, d_cg_0);
+      AE_MINMAX16(d_cs_w_0, d_min, d_max);
+      p16_cs_w[i] = d_cs_w_0;
+    }
+  } else {
+    for (i = 0; i < (num_elms >> 3); i++) {
+      AE_LA16X4_IP(d_cs_r_0, align_cs_r, p16x4_cs_r);
+      AE_LA16X4_IP(d_cs_r_1, align_cs_r, p16x4_cs_r);
+      AE_LA16X4_IP(d_fg_0, align_fg_r, p16x4_fg_r);
+      AE_LA16X4_IP(d_fg_1, align_fg_r, p16x4_fg_r);
+      AE_LA16X4_IP(d_cg_0, align_cg_r, p16x4_cg_r);
+      AE_LA16X4_IP(d_cg_1, align_cg_r, p16x4_cg_r);
+
+      AE_MUL16X4(d_mul_0, d_mul_1, d_cs_r_0, d_fg_0);
+      AE_MUL16X4(d_mul_2, d_mul_3, d_cs_r_1, d_fg_1);
+      d_mul_0 = AE_SRAA32SYMS(d_mul_0, shift1);
+      d_mul_1 = AE_SRAA32SYMS(d_mul_1, shift1);
+      d_mul_2 = AE_SRAA32SYMS(d_mul_2, shift1);
+      d_mul_3 = AE_SRAA32SYMS(d_mul_3, shift1);
+      d_cs_w_0 = AE_SAT16X4(d_mul_0, d_mul_1);
+      d_cs_w_1 = AE_SAT16X4(d_mul_2, d_mul_3);
+
+      d_1mfg_0 = AE_SUB16S(d_one, d_fg_0);
+      d_1mfg_1 = AE_SUB16S(d_one, d_fg_1);
+      AE_MUL16X4(d_mul_4, d_mul_5, d_cg_0, d_1mfg_0);
+      AE_MUL16X4(d_mul_6, d_mul_7, d_cg_1, d_1mfg_1);
+      d_mul_4 = AE_SRAA32SYMS(d_mul_4, shift2);
+      d_mul_5 = AE_SRAA32SYMS(d_mul_5, shift2);
+      d_mul_6 = AE_SRAA32SYMS(d_mul_6, shift2);
+      d_mul_7 = AE_SRAA32SYMS(d_mul_7, shift2);
+      d_cg_0 = AE_SAT16X4(d_mul_4, d_mul_5);
+      d_cg_1 = AE_SAT16X4(d_mul_6, d_mul_7);
+
+      d_cs_w_0 = AE_ADD16S(d_cs_w_0, d_cg_0);
+      d_cs_w_1 = AE_ADD16S(d_cs_w_1, d_cg_1);
+
+      AE_MINMAX16(d_cs_w_0, d_min, d_max);
+      AE_MINMAX16(d_cs_w_1, d_min, d_max);
+
+      AE_SA16X4_IP(d_cs_w_0, align_cs_w, p16x4_cs_w);
+      AE_SA16X4_IP(d_cs_w_1, align_cs_w, p16x4_cs_w);
+    }
+    AE_SA64POS_FP(align_cs_w, p16x4_cs_w);  // finalize the stream
+
+    const ae_int16 *p16_cs_r, *p16_fg_r;
+    const ae_int16* p16_cg_r;
+
+    ae_int16* p16_cs_w;
+
+    p16_cs_r = (const ae_int16*)p16x4_cs_r;
+    p16_fg_r = (const ae_int16*)p16x4_fg_r;
+    p16_cg_r = (const ae_int16*)p16x4_cg_r;
+
+    p16_cs_w = (ae_int16*)p16x4_cs_w;
+    // residue iterations
+#pragma concurrent
+#pragma loop_count max = 7
+    for (i = 0; i < ((num_elms)&7); i++) {
+      d_cs_r_0 = p16_cs_r[i];
+      d_fg_0 = p16_fg_r[i];
+      d_cg_0 = p16_cg_r[i];
+
+      AE_MUL16X4(d_mul_0, d_mul_1, d_cs_r_0, d_fg_0);
+      d_mul_0 = AE_SRAA32SYMS(d_mul_0, shift1);
+      d_cs_w_0 = AE_SAT16X4(d_mul_0, d_mul_0);
+
+      d_1mfg_0 = AE_SUB16S(d_one, d_fg_0);
+      AE_MUL16X4(d_mul_0, d_mul_1, d_cg_0, d_1mfg_0);
+      d_mul_0 = AE_SRAA32SYMS(d_mul_0, shift2);
+      d_cg_0 = AE_SAT16X4(d_mul_0, d_mul_0);
+
+      d_cs_w_0 = AE_ADD16S(d_cs_w_0, d_cg_0);
+      AE_MINMAX16(d_cs_w_0, d_min, d_max);
+      p16_cs_w[i] = d_cs_w_0;
+    }
+  }
+}
+
+void xa_nn_elm_mul_16x16_asym8s(int8_t* output, const int16_t* input_1,
+                                const int16_t* input_2, int32_t multiplier,
+                                int32_t shift, int32_t zero_point,
+                                int num_elms) {
+  ae_int16x4* tmp_input_1;
+  ae_int16x4* tmp_input_2;
+
+  ae_valign align_src_input_1, align_src_input_2;
+
+  ae_int16x4 data_a_0, data_b_0;
+  ae_int32x2 data_ab_0, data_ab_1;
+  ae_int16x4 d_zp;
+  ae_int16x4 data_c_0;
+  ae_int16x4 d_min8 = AE_MOVDA16(-128);
+  ae_int16x4 d_max8 = AE_MOVDA16(127);
+
+  int i = 0;
+  int left_shift, right_shift;
+  tmp_input_1 = (ae_int16x4*)(input_1);
+  tmp_input_2 = (ae_int16x4*)(input_2);
+
+  align_src_input_1 = AE_LA64_PP((ae_int16x4*)tmp_input_1);
+  align_src_input_2 = AE_LA64_PP((ae_int16x4*)tmp_input_2);
+
+  d_zp = AE_MOVDA16(zero_point);
+
+#if TFLITE_SINGLE_ROUNDING
+  left_shift = shift;
+  (void)right_shift;
+#else  /* #if TFLITE_SINGLE_ROUNDING */
+  left_shift = shift < 0 ? 0 : shift;
+  right_shift = shift > 0 ? 0 : -shift;
+#endif /* #if TFLITE_SINGLE_ROUNDING */
+
+#pragma concurrent
+  for (i = 0; i < (num_elms >> 2); i++) {
+    AE_LA16X4_IP(data_a_0, align_src_input_1, tmp_input_1);
+    AE_LA16X4_IP(data_b_0, align_src_input_2, tmp_input_2);
+
+    AE_MUL16X4(data_ab_0, data_ab_1, data_a_0, data_b_0);
+    MPY_BY_QUANT_MULT_X2X2_OUT32(data_ab_0, data_ab_1, data_ab_0, data_ab_1,
+                                 multiplier, left_shift, right_shift);
+    data_c_0 = AE_SAT16X4(data_ab_0, data_ab_1);
+    data_c_0 = AE_SUB16S(data_c_0, d_zp);
+    AE_MINMAX16(data_c_0, d_min8, d_max8);
+
+    *output++ = AE_MOVAD16_3(data_c_0);
+    *output++ = AE_MOVAD16_2(data_c_0);
+    *output++ = AE_MOVAD16_1(data_c_0);
+    *output++ = AE_MOVAD16_0(data_c_0);
+  }
+
+  // residue iterations
+#pragma concurrent
+#pragma loop_count max = 3
+  for (int j = 0; j < ((num_elms)&3); j++) {
+    AE_L16_IP(data_a_0, (ae_int16*)tmp_input_1, 2);
+    AE_L16_IP(data_b_0, (ae_int16*)tmp_input_2, 2);
+
+    AE_MUL16X4(data_ab_0, data_ab_1, data_a_0, data_b_0);
+    MPY_BY_QUANT_MULT_X2_OUT32(data_ab_0, data_ab_0, multiplier, left_shift,
+                               right_shift);
+    data_c_0 = AE_SAT16X4(data_ab_0, data_ab_0);
+    data_c_0 = AE_SUB16S(data_c_0, d_zp);
+    AE_MINMAX16(data_c_0, d_min8, d_max8);
+
+    *output++ = AE_MOVAD16_0(data_c_0);
+  }
+}
 #endif  // defined(HIFI5)
 
-}  // namespace lstm_eval
-}  // namespace micro
-}  // namespace ops
 }  // namespace tflite
+
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
diff --git a/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h b/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h
deleted file mode 100644
index 4bcff1a..0000000
--- a/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-#ifndef TENSORFLOW_LITE_KERNELS_LSTM_SHARED_H_
-#define TENSORFLOW_LITE_KERNELS_LSTM_SHARED_H_
-
-namespace tflite {
-namespace ops {
-namespace micro {
-namespace lstm {
-// For full inputs kernel (24-inputs).
-// Please note the 20-input full kernel is deprecated and only kept
-// here for backward compatibility.
-namespace full {
-
-// Input Tensors of size {n_batch, n_input}
-constexpr int kInputTensor = 0;
-
-// Input weight tensors of size: {n_cell, n_input}
-constexpr int kInputToInputWeightsTensor = 1;  // Optional
-constexpr int kInputToForgetWeightsTensor = 2;
-constexpr int kInputToCellWeightsTensor = 3;
-constexpr int kInputToOutputWeightsTensor = 4;
-
-// Recurrent weight tensors of size {n_cell, n_output}
-constexpr int kRecurrentToInputWeightsTensor = 5;  // Optional
-constexpr int kRecurrentToForgetWeightsTensor = 6;
-constexpr int kRecurrentToCellWeightsTensor = 7;
-constexpr int kRecurrentToOutputWeightsTensor = 8;
-
-// Peephole weights tensors of size {n_cell}, representing a diagonal matrix.
-constexpr int kCellToInputWeightsTensor = 9;    // Optional
-constexpr int kCellToForgetWeightsTensor = 10;  // Optional
-constexpr int kCellToOutputWeightsTensor = 11;  // Optional
-
-// Gates bias tensors of size {n_cell}
-constexpr int kInputGateBiasTensor = 12;  // Optional
-constexpr int kForgetGateBiasTensor = 13;
-constexpr int kCellGateBiasTensor = 14;
-constexpr int kOutputGateBiasTensor = 15;
-
-// Projection weight tensor of size {n_output, n_cell}
-constexpr int kProjectionWeightsTensor = 16;  // Optional
-// Projection bias tensor of size {n_output}
-constexpr int kProjectionBiasTensor = 17;  // Optional
-
-// These state tensors are defined as variable tensors, and will be modified by
-// this op.
-constexpr int kOutputStateTensor = 18;
-constexpr int kCellStateTensor = 19;
-
-// Layer norm coefficient tensors of size {n_cell}, representing a diagonal
-// matrix.
-constexpr int kInputLayerNormCoefficientsTensor = 20;   // Optional
-constexpr int kForgetLayerNormCoefficientsTensor = 21;  // Optional
-constexpr int kCellLayerNormCoefficientsTensor = 22;    // Optional
-constexpr int kOutputLayerNormCoefficientsTensor = 23;  // Optional
-
-// Output tensors.
-constexpr int kOutputTensor = 0;
-}  // namespace full
-
-}  // namespace lstm
-}  // namespace micro
-}  // namespace ops
-}  // namespace tflite
-#endif  // TENSORFLOW_LITE_KERNELS_LSTM_SHARED_H_
diff --git a/tensorflow/lite/micro/kernels/xtensa/pad.cc b/tensorflow/lite/micro/kernels/xtensa/pad.cc
index bb00edb..d822c28 100644
--- a/tensorflow/lite/micro/kernels/xtensa/pad.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/pad.cc
@@ -215,7 +215,7 @@
           constant_values == nullptr
               ? 0
               : *tflite::micro::GetTensorData<int16_t>(constant_values);
-#if defined(HIFI4)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
       /* NNLib currently only supports up to 4D input tensors */
       if (tflite::micro::GetTensorShape(input).DimensionsCount() == 4) {
         const TfLiteEvalTensor* paddings =
@@ -233,14 +233,14 @@
             pad_value);
         if (err != 0) return kTfLiteError;
       } else {
-#endif  // defined(HIFI4)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
         reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input),
                            tflite::micro::GetTensorData<int16_t>(input),
                            &pad_value, tflite::micro::GetTensorShape(output),
                            tflite::micro::GetTensorData<int16_t>(output));
-#if defined(HIFI4)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
       }
-#endif  // defined(HIFI4)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
     } break;
     case kTfLiteInt32: {
       int32_t pad_value =
diff --git a/tensorflow/lite/micro/kernels/xtensa/quantize.cc b/tensorflow/lite/micro/kernels/xtensa/quantize.cc
index e849108..06d4fbb 100644
--- a/tensorflow/lite/micro/kernels/xtensa/quantize.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/quantize.cc
@@ -29,7 +29,7 @@
 namespace tflite {
 namespace {
 
-#if defined(HIFI4) || defined(HIFI5)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 TfLiteStatus EvalXtensa(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
   auto* op_data = static_cast<OpDataQuantizeReference*>(node->user_data);
@@ -75,12 +75,19 @@
 
         case kTfLiteInt8: {
           int size = ElementCount(*input->dims);
-          reference_ops::Requantize(
-              tflite::micro::GetTensorData<int8_t>(input), size,
-              op_data->requantize_output_multiplier,
-              op_data->requantize_output_shift, op_data->input_zero_point,
-              op_data->quantization_params.zero_point,
-              tflite::micro::GetTensorData<int8_t>(output));
+          int32_t zero_point = op_data->quantization_params.zero_point;
+          const int8_t* input_data_ptr;
+          int8_t* output_data_ptr;
+          input_data_ptr = tflite::micro::GetTensorData<int8_t>(input);
+          output_data_ptr = tflite::micro::GetTensorData<int8_t>(output);
+
+          TF_LITE_ENSURE_EQ(
+              context,
+              xa_nn_elm_requantize_asym8s_asym8s(
+                  output_data_ptr, input_data_ptr, op_data->input_zero_point,
+                  zero_point, op_data->requantize_output_shift,
+                  op_data->requantize_output_multiplier, size),
+              0);
           break;
         }
 
@@ -98,7 +105,6 @@
         case kTfLiteInt32: {
           int size = ElementCount(*input->dims);
           int32_t zero_point = op_data->quantization_params.zero_point;
-#if defined(HIFI5)
           const int8_t* input_data_ptr;
           int32_t* output_data_ptr;
           input_data_ptr = tflite::micro::GetTensorData<int8_t>(input);
@@ -111,13 +117,6 @@
                   zero_point, op_data->requantize_output_shift,
                   op_data->requantize_output_multiplier, size),
               0);
-#else
-          reference_ops::Requantize(
-              tflite::micro::GetTensorData<int8_t>(input), size,
-              op_data->requantize_output_multiplier,
-              op_data->requantize_output_shift, op_data->input_zero_point,
-              zero_point, tflite::micro::GetTensorData<int32_t>(output));
-#endif  // defined(HIFI5)
           break;
         }
 
@@ -149,18 +148,20 @@
 
         case kTfLiteInt16: {
           int size = ElementCount(*input->dims);
-          reference_ops::Requantize(
-              tflite::micro::GetTensorData<int16_t>(input), size,
-              op_data->requantize_output_multiplier,
-              op_data->requantize_output_shift, op_data->input_zero_point,
-              op_data->quantization_params.zero_point,
-              tflite::micro::GetTensorData<int16_t>(output));
+          TF_LITE_ENSURE_EQ(context,
+                            xa_nn_elm_requantize_asym16s_asym16s(
+                                tflite::micro::GetTensorData<int16_t>(output),
+                                tflite::micro::GetTensorData<int16_t>(input),
+                                op_data->input_zero_point,
+                                op_data->quantization_params.zero_point,
+                                op_data->requantize_output_shift,
+                                op_data->requantize_output_multiplier, size),
+                            0);
           break;
         }
 
         case kTfLiteInt32: {
           int size = ElementCount(*input->dims);
-#if defined(HIFI5)
           TF_LITE_ENSURE_EQ(context,
                             xa_nn_elm_requantize_asym16s_asym32s(
                                 tflite::micro::GetTensorData<int32_t>(output),
@@ -170,14 +171,6 @@
                                 op_data->requantize_output_shift,
                                 op_data->requantize_output_multiplier, size),
                             0);
-#else
-          int32_t zero_point = op_data->quantization_params.zero_point;
-          reference_ops::Requantize(
-              tflite::micro::GetTensorData<int16_t>(input), size,
-              op_data->requantize_output_multiplier,
-              op_data->requantize_output_shift, op_data->input_zero_point,
-              zero_point, tflite::micro::GetTensorData<int32_t>(output));
-#endif  // defined(HIFI5)
           break;
         }
 
@@ -228,22 +221,56 @@
     case kTfLiteFloat32: {
       switch (output->type) {
         case kTfLiteInt8: {
+#if HIFI_VFPU
+          int size = ElementCount(*input->dims);
+          int32_t zero_point = op_data->quantization_params.zero_point;
+          const float* input_data_ptr;
+          int8_t* output_data_ptr;
+          input_data_ptr = tflite::micro::GetTensorData<float>(input);
+          output_data_ptr = tflite::micro::GetTensorData<int8_t>(output);
+
+          TF_LITE_ENSURE_EQ(
+              context,
+              xa_nn_elm_quantize_f32_asym8s(
+                  output_data_ptr, input_data_ptr,
+                  static_cast<float>(op_data->quantization_params.scale),
+                  zero_point, size),
+              0);
+#else   // #if HIFI_VFPU
           reference_ops::AffineQuantize(
               op_data->quantization_params,
               tflite::micro::GetTensorShape(input),
               tflite::micro::GetTensorData<float>(input),
               tflite::micro::GetTensorShape(output),
               tflite::micro::GetTensorData<int8_t>(output));
+#endif  // #if HIFI_VFPU
           break;
         }
 
         case kTfLiteInt16: {
+#if HIFI_VFPU
+          int size = ElementCount(*input->dims);
+          int32_t zero_point = op_data->quantization_params.zero_point;
+          const float* input_data_ptr;
+          int16_t* output_data_ptr;
+          input_data_ptr = tflite::micro::GetTensorData<float>(input);
+          output_data_ptr = tflite::micro::GetTensorData<int16_t>(output);
+
+          TF_LITE_ENSURE_EQ(
+              context,
+              xa_nn_elm_quantize_f32_asym16s(
+                  output_data_ptr, input_data_ptr,
+                  static_cast<float>(op_data->quantization_params.scale),
+                  zero_point, size),
+              0);
+#else   // #if HIFI_VFPU
           reference_ops::AffineQuantize(
               op_data->quantization_params,
               tflite::micro::GetTensorShape(input),
               tflite::micro::GetTensorData<float>(input),
               tflite::micro::GetTensorShape(output),
               tflite::micro::GetTensorData<int16_t>(output));
+#endif  // #if HIFI_VFPU
           break;
         }
 
@@ -267,7 +294,7 @@
 
   return kTfLiteOk;
 }
-#endif  // defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 
 void* Init(TfLiteContext* context, const char* buffer, size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
@@ -301,11 +328,11 @@
 }
 
 TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
-#if defined(HIFI4) || defined(HIFI5)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
   return EvalXtensa(context, node);
 #else
   return EvalQuantizeReference(context, node);
-#endif  // defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 }
 
 }  // namespace
diff --git a/tensorflow/lite/micro/kernels/xtensa/reduce_vision.cc b/tensorflow/lite/micro/kernels/xtensa/reduce_vision.cc
index c76525e..e539c29 100644
--- a/tensorflow/lite/micro/kernels/xtensa/reduce_vision.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/reduce_vision.cc
@@ -72,6 +72,7 @@
   }
   return true;
 }
+
 TfLiteStatus ReducePrepareVision(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
   TFLITE_DCHECK(node->builtin_data != nullptr);
@@ -83,14 +84,14 @@
   TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0);
   TfLiteTensor* axis = micro_context->AllocateTempInputTensor(node, 1);
 
-  uint32_t inputDims[4] = {1, 1, 1, 1};
-  uint32_t outputDims[4] = {1, 1, 1, 1};
-  uint32_t shouldReduceR[4] = {0, 0, 0, 0};
+  uint32_t input_dims[4] = {1, 1, 1, 1};
+  uint32_t output_dims[4] = {1, 1, 1, 1};
+  uint32_t should_reduce_r[4] = {0, 0, 0, 0};
   int32_t resolved_axis[4] = {0, 0, 0, 0};
-  OperandDims4D(inputDims, input);
-  OperandDims4D(outputDims, output);
+  OperandDims4D(input_dims, input);
+  OperandDims4D(output_dims, output);
 
-  uint32_t inputRank = NumDimensions(input);
+  const int input_rank = NumDimensions(input);
   // Interpret an axis tensor with null dimensions as a scalar
   int num_axis = static_cast<int>(ElementCount(*axis->dims));
   // Resolve axis.
@@ -99,16 +100,22 @@
                    &num_resolved_axis)) {
     return kTfLiteError;
   }
-  std::vector<bool> shouldReduce(inputRank);
 
-  for (int32_t i = 0; i < num_axis; ++i) {
-    int32_t axisD = resolved_axis[i];
-    shouldReduce[axisD] = true;
+  // ResolveAxis should eliminate dupes and negative axis, so the number of axis
+  // should be no greater than the input rank.
+  TFLITE_DCHECK(num_resolved_axis <= input_rank);
+
+  bool should_reduce[4] = {false, false, false, false};
+
+  for (int32_t i = 0; i < num_resolved_axis; ++i) {
+    int32_t axis_d = resolved_axis[i];
+    should_reduce[axis_d] = true;
   }
 
   // reverse axes and align it to dimension 0 as OperandDims4D
-  for (uint32_t axisI = 0; axisI < inputRank; ++axisI) {
-    shouldReduceR[inputRank - 1 - axisI] = (uint32_t)shouldReduce[axisI];
+  for (int axis_i = 0; axis_i < input_rank; ++axis_i) {
+    should_reduce_r[input_rank - 1 - axis_i] =
+        static_cast<uint32_t>(should_reduce[axis_i]);
   }
 
   uint32_t context_size = 0;
@@ -123,8 +130,8 @@
     data->context_size = context_size;
   }
 
-  status = xiReduceSetContext(data->p_context, data->context_size, inputDims,
-                              outputDims, shouldReduceR);
+  status = xiReduceSetContext(data->p_context, data->context_size, input_dims,
+                              output_dims, should_reduce_r);
 
   if (status) {
     return kTfLiteError;
diff --git a/tensorflow/lite/micro/kernels/xtensa/softmax.cc b/tensorflow/lite/micro/kernels/xtensa/softmax.cc
index 76c380f..c248fc5 100644
--- a/tensorflow/lite/micro/kernels/xtensa/softmax.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/softmax.cc
@@ -31,7 +31,7 @@
 namespace tflite {
 namespace {
 
-#if defined(HIFI4) || defined(HIFI5)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 TfLiteStatus EvalHifiInt8(const XtensaSoftmaxOpData* op_data,
                           const TfLiteEvalTensor* input,
                           TfLiteEvalTensor* output, TfLiteContext* context) {
@@ -56,7 +56,7 @@
   }
   return kTfLiteOk;
 }
-#endif  // defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 
 TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0);
@@ -68,7 +68,7 @@
 
   TFLITE_DCHECK(node->user_data != nullptr);
 
-#if defined(HIFI4) || defined(HIFI5)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
   XtensaSoftmaxOpData op_data =
       *static_cast<XtensaSoftmaxOpData*>(node->user_data);
   SoftmaxParams params = op_data.params;
@@ -77,7 +77,7 @@
 #endif
 
   if (input->type == kTfLiteInt8 && output->type == kTfLiteInt8) {
-#if defined(HIFI4) || defined(HIFI5)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
     return EvalHifiInt8(static_cast<XtensaSoftmaxOpData*>(node->user_data),
                         input, output, context);
 #elif defined(VISION_P6)
@@ -91,7 +91,7 @@
         tflite::micro::GetTensorShape(output),
         tflite::micro::GetTensorData<int8_t>(output));
     return kTfLiteOk;
-#endif  // defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
   }
 
   if (input->type == kTfLiteInt16 && output->type == kTfLiteInt16) {
diff --git a/tensorflow/lite/micro/kernels/xtensa/softmax_int8_int16.cc b/tensorflow/lite/micro/kernels/xtensa/softmax_int8_int16.cc
index b23a9f7..d37a2f5 100644
--- a/tensorflow/lite/micro/kernels/xtensa/softmax_int8_int16.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/softmax_int8_int16.cc
@@ -30,7 +30,7 @@
 namespace tflite {
 namespace {
 
-#if defined(HIFI4) || defined(HIFI5)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 TfLiteStatus PrepareHifi(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_OK(context, SoftmaxPrepare(context, node));
 
@@ -86,13 +86,13 @@
   }
   return kTfLiteOk;
 }
-#endif  // defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 
 }  // namespace
 
 void* XtensaInitSoftmax(TfLiteContext* context, const char* buffer,
                         size_t length) {
-#if defined(HIFI4) || defined(HIFI5)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context,
                                            sizeof(XtensaSoftmaxOpData));
@@ -105,11 +105,11 @@
                                            sizeof(XtensaSoftmaxOpData));
 #else
   return SoftmaxInit(context, buffer, length);
-#endif  // defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 }
 
 TfLiteStatus XtensaPrepareSoftmax(TfLiteContext* context, TfLiteNode* node) {
-#if defined(HIFI4) || defined(HIFI5)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
   return PrepareHifi(context, node);
 #else
   TF_LITE_ENSURE_OK(context, SoftmaxPrepare(context, node));
@@ -117,7 +117,7 @@
   TF_LITE_ENSURE_OK(context, SoftmaxPrepareVision(context, node));
 #endif
   return kTfLiteOk;
-#endif  // defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 }
 
 TfLiteStatus XtensaEvalSoftmaxInt8Int16(TfLiteContext* context,
@@ -127,7 +127,7 @@
   TFLITE_DCHECK(node->user_data != nullptr);
 
   if (input->type == kTfLiteInt8 && output->type == kTfLiteInt16) {
-#if defined(HIFI4) || defined(HIFI5)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
     return EvalHifi(static_cast<XtensaSoftmaxOpData*>(node->user_data), input,
                     output, context);
 #else
@@ -138,7 +138,7 @@
         tflite::micro::GetTensorShape(output),
         tflite::micro::GetTensorData<int16_t>(output));
     return kTfLiteOk;
-#endif  // defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
   } else {
     MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type),
                 input->type);
diff --git a/tensorflow/lite/micro/kernels/xtensa/strided_slice.cc b/tensorflow/lite/micro/kernels/xtensa/strided_slice.cc
index 0440cfc..8ebf724 100644
--- a/tensorflow/lite/micro/kernels/xtensa/strided_slice.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/strided_slice.cc
@@ -23,129 +23,14 @@
 #include "tensorflow/lite/kernels/kernel_util.h"
 #include "tensorflow/lite/kernels/op_macros.h"
 #include "tensorflow/lite/micro/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/kernels/strided_slice.h"
 #include "tensorflow/lite/micro/kernels/xtensa/xtensa.h"
 #include "tensorflow/lite/micro/micro_log.h"
 
 namespace tflite {
 namespace {
 
-constexpr int kInputTensor = 0;
-constexpr int kBeginTensor = 1;
-constexpr int kEndTensor = 2;
-constexpr int kStridesTensor = 3;
-constexpr int kOutputTensor = 0;
-
-struct StridedSliceContext {
-  StridedSliceContext(TfLiteContext* context, TfLiteNode* node) {
-    params = reinterpret_cast<TfLiteStridedSliceParams*>(node->builtin_data);
-    micro_context = GetMicroContext(context);
-    input = micro_context->AllocateTempInputTensor(node, kInputTensor);
-    begin = micro_context->AllocateTempInputTensor(node, kBeginTensor);
-    end = micro_context->AllocateTempInputTensor(node, kEndTensor);
-    strides = micro_context->AllocateTempInputTensor(node, kStridesTensor);
-    output = micro_context->AllocateTempOutputTensor(node, kOutputTensor);
-    dims = NumDimensions(input);
-  }
-  ~StridedSliceContext() {
-    micro_context->DeallocateTempTfLiteTensor(input);
-    micro_context->DeallocateTempTfLiteTensor(begin);
-    micro_context->DeallocateTempTfLiteTensor(end);
-    micro_context->DeallocateTempTfLiteTensor(strides);
-    micro_context->DeallocateTempTfLiteTensor(output);
-  }
-  const TfLiteStridedSliceParams* params;
-  MicroContext* micro_context;
-  TfLiteTensor* input;
-  TfLiteTensor* begin;
-  TfLiteTensor* end;
-  TfLiteTensor* strides;
-  TfLiteTensor* output;
-  int dims;
-};
-
-// This Op only supports 1-4D cases and since we use the reference 4D
-// implementation, the 1-3D tensors are mapped to 4D.
-const int kMaxDim = 4;
-
-tflite::StridedSliceParams BuildStridedSliceParams(
-    StridedSliceContext* op_context) {
-  tflite::StridedSliceParams op_params;
-  op_params.start_indices_count = op_context->dims;
-  op_params.stop_indices_count = op_context->dims;
-  op_params.strides_count = op_context->dims;
-
-  for (int i = 0; i < op_context->dims; ++i) {
-    op_params.start_indices[i] = GetTensorData<int32_t>(op_context->begin)[i];
-    op_params.stop_indices[i] = GetTensorData<int32_t>(op_context->end)[i];
-    op_params.strides[i] = GetTensorData<int32_t>(op_context->strides)[i];
-  }
-
-  op_params.begin_mask = op_context->params->begin_mask;
-  op_params.ellipsis_mask = 0;
-  op_params.end_mask = op_context->params->end_mask;
-  op_params.new_axis_mask = 0;
-  op_params.shrink_axis_mask = op_context->params->shrink_axis_mask;
-  return op_params;
-}
-
-// Processes the indexing tensors (begin, end and strides) to resize the
-// output tensor. This function is callable from both Prepare() and Eval() as
-// long as the caller ensures the indexing tensors are present.
-TfLiteStatus CheckOutputSize(TfLiteContext* context,
-                             StridedSliceContext* op_context) {
-  using ::tflite::strided_slice::StartForAxis;
-  using ::tflite::strided_slice::StopForAxis;
-  TfLiteIntArray* output_shape = op_context->output->dims;
-  int shape_size = 0;
-  auto op_params = BuildStridedSliceParams(op_context);
-  auto input_shape = GetTensorShape(op_context->input);
-  for (int idx = 0; idx < op_context->dims; ++idx) {
-    int32_t stride = GetTensorData<int32_t>(op_context->strides)[idx];
-    TF_LITE_ENSURE_MSG(context, stride != 0, "stride value has to be non-zero");
-    int32_t begin = StartForAxis(op_params, input_shape, idx);
-    int32_t end = StopForAxis(op_params, input_shape, idx, begin);
-
-    // When shrinking an axis, the end position does not matter (and can be
-    // incorrect when negative indexing is used, see Issue #19260). Always use
-    // begin + 1 to generate a length 1 slice, since begin has
-    // already been adjusted for negative indices by StartForAxis.
-    const bool shrink_axis = op_context->params->shrink_axis_mask & (1 << idx);
-    if (shrink_axis) {
-      end = begin + 1;
-    }
-
-    // This is valid for both positive and negative strides
-    int32_t dim_shape = std::ceil((end - begin) / static_cast<float>(stride));
-    dim_shape = dim_shape < 0 ? 0 : dim_shape;
-    if (!shrink_axis) {
-      TF_LITE_ENSURE_EQ(context, output_shape->data[shape_size], dim_shape);
-      shape_size++;
-    }
-  }
-  TF_LITE_ENSURE_EQ(context, output_shape->size, shape_size);
-  return kTfLiteOk;
-}
-
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
-  TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
-  return context->AllocatePersistentBuffer(context, sizeof(StridedSliceParams));
-}
-
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
-  TFLITE_DCHECK(node->user_data != nullptr);
-  StridedSliceParams* op_params =
-      static_cast<StridedSliceParams*>(node->user_data);
-  TF_LITE_ENSURE_EQ(context, NumInputs(node), 4);
-  TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
-  StridedSliceContext op_context(context, node);
-  TF_LITE_ENSURE_MSG(context, op_context.dims <= kMaxDim,
-                     "input dim should not exceed 4");
-  auto params = BuildStridedSliceParams(&op_context);
-  memcpy(op_params, &params, sizeof(StridedSliceParams));
-  return CheckOutputSize(context, &op_context);
-}
-
-#if defined(HIFI4)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 void StridedSlice_int16_hifi4opt(const tflite::StridedSliceParams& op_params,
                                  const RuntimeShape& unextended_input_shape,
                                  const int16_t* input_data,
@@ -192,7 +77,7 @@
                             input_shape.Dims(1), input_shape.Dims(2),
                             input_shape.Dims(3), input_shape.Dims(4));
 }
-#endif  // defined(HIFI4)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 
 TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
@@ -200,9 +85,9 @@
       *(static_cast<const StridedSliceParams*>(node->user_data));
 
   const TfLiteEvalTensor* input =
-      tflite::micro::GetEvalInput(context, node, kInputTensor);
+      tflite::micro::GetEvalInput(context, node, kStridedSliceInputTensor);
   TfLiteEvalTensor* output =
-      tflite::micro::GetEvalOutput(context, node, kOutputTensor);
+      tflite::micro::GetEvalOutput(context, node, kStridedSliceOutputTensor);
   switch (output->type) {
     case kTfLiteFloat32:
       reference_ops::StridedSlice(op_params,
@@ -219,7 +104,7 @@
                                   tflite::micro::GetTensorData<int8_t>(output));
       break;
     case kTfLiteInt16:
-#if defined(HIFI4)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
       StridedSlice_int16_hifi4opt(
           op_params, tflite::micro::GetTensorShape(input),
           tflite::micro::GetTensorData<int16_t>(input),
@@ -231,7 +116,7 @@
           tflite::micro::GetTensorData<int16_t>(input),
           tflite::micro::GetTensorShape(output),
           tflite::micro::GetTensorData<int16_t>(output));
-#endif  // defined(HIFI4)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
       break;
     case kTfLiteInt32:
       reference_ops::StridedSlice(
@@ -257,7 +142,7 @@
 }  // namespace
 
 TFLMRegistration Register_STRIDED_SLICE() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(StridedSliceInit, StridedSlicePrepare, Eval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/xtensa/sub.cc b/tensorflow/lite/micro/kernels/xtensa/sub.cc
index c4f0984..b8308c9 100644
--- a/tensorflow/lite/micro/kernels/xtensa/sub.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/sub.cc
@@ -83,15 +83,15 @@
                       &op_params);
   // TODO(b/259724572): vision_p6 and hifi code path is getting very confusing.
   // Let's separate them into two different files.
-#if !(defined(HIFI4))
+#if !(defined(HIFI3) || defined(HIFI4) || defined(HIFI5))
   bool need_broadcast = reference_ops::ProcessBroadcastShapes(
       tflite::micro::GetTensorShape(input1),
       tflite::micro::GetTensorShape(input2), &op_params);
-#endif  // !(defined(HIFI4))
+#endif  // !(defined(HIFI3) || defined(HIFI4))
 
   switch (output->type) {
     case kTfLiteInt8: {
-#if defined(HIFI4)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
       int err;
       const RuntimeShape extended_input1_shape =
           RuntimeShape::ExtendedShape(5, tflite::micro::GetTensorShape(input1));
@@ -105,7 +105,6 @@
       // TODO(b/259724572): Refactor the following block of code.
       int b;
       int inp1_off = 0;
-      int inp2_off = 0;
       int out_off;
       out_off =
           output_dims[1] * output_dims[2] * output_dims[3] * output_dims[4];
@@ -113,10 +112,6 @@
         inp1_off =
             input1_dims[1] * input1_dims[2] * input1_dims[3] * input1_dims[4];
       }
-      if (input2_dims[0] > 1) {
-        inp2_off =
-            input2_dims[1] * input2_dims[2] * input2_dims[3] * input2_dims[4];
-      }
 
       for (b = 0; b < output_dims[0]; b++) {
         err = xa_nn_elm_sub_broadcast_4D_asym8sxasym8s_asym8s(
@@ -133,7 +128,7 @@
 
         TF_LITE_ENSURE(context, err == 0);
       }
-#else   // defined(HIFI4)
+#else   // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
       if (need_broadcast) {
         tflite::reference_ops::BroadcastQuantSubSlow(
             op_params, tflite::micro::GetTensorShape(input1),
@@ -151,11 +146,11 @@
             tflite::micro::GetTensorShape(output),
             tflite::micro::GetTensorData<int8_t>(output));
       }
-#endif  // defined(HIFI4)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
       break;
     }
     case kTfLiteInt16: {
-#if defined(HIFI4)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
       int err;
       const RuntimeShape extended_input1_shape =
           RuntimeShape::ExtendedShape(5, tflite::micro::GetTensorShape(input1));
@@ -168,7 +163,6 @@
       const int* output_dims = extended_output_shape.DimsData();
       int b;
       int inp1_off = 0;
-      int inp2_off = 0;
       int out_off;
       out_off =
           output_dims[1] * output_dims[2] * output_dims[3] * output_dims[4];
@@ -176,10 +170,6 @@
         inp1_off =
             input1_dims[1] * input1_dims[2] * input1_dims[3] * input1_dims[4];
       }
-      if (input2_dims[0] > 1) {
-        inp2_off =
-            input2_dims[1] * input2_dims[2] * input2_dims[3] * input2_dims[4];
-      }
 
       for (b = 0; b < output_dims[0]; b++) {
         err = xa_nn_elm_sub_broadcast_4D_asym16sxasym16s_asym16s(
@@ -196,7 +186,7 @@
 
         TF_LITE_ENSURE(context, err == 0);
       }
-#else   // defined(HIFI4)
+#else   // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
       if (need_broadcast) {
         tflite::reference_ops::BroadcastQuantSubSlow(
             op_params, tflite::micro::GetTensorShape(input1),
@@ -214,7 +204,7 @@
             tflite::micro::GetTensorShape(output),
             tflite::micro::GetTensorData<int16_t>(output));
       }
-#endif  // defined(HIFI4)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
       break;
     }
     default:
@@ -256,4 +246,4 @@
   return tflite::micro::RegisterOp(SubInit, SubPrepare, SubEval);
 }
 
-}  // namespace tflite
\ No newline at end of file
+}  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/xtensa/svdf.cc b/tensorflow/lite/micro/kernels/xtensa/svdf.cc
index c1dac3b..da34e09 100644
--- a/tensorflow/lite/micro/kernels/xtensa/svdf.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/svdf.cc
@@ -33,7 +33,7 @@
 namespace tflite {
 namespace {
 
-#if defined(HIFI4) || defined(HIFI5)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 
 TfLiteStatus EvalIntegerSvdfHifi(TfLiteContext* context, TfLiteNode* node,
                                  const TfLiteEvalTensor* input_tensor,
@@ -63,7 +63,7 @@
 #if defined(HIFI5)
   memcpy(state_ptr, state_ptr + 1, num_bytes);
 #else
-  xa_nn_memmove_16(state_ptr, state_ptr + 1, num_bytes);
+  xa_nn_memmove_16(state_ptr, state_ptr + 1, (num_bytes >> 1));
 #endif  // defined(HIFI5)
 
   // Note: no need to clear the latest activation, matmul is not accumulative.
@@ -108,7 +108,7 @@
   }
   return kTfLiteOk;
 }
-#endif  // defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 
 void* Init(TfLiteContext* context, const char* buffer, size_t length) {
   TFLITE_DCHECK(context != nullptr);
@@ -116,7 +116,7 @@
 }
 
 TfLiteStatus PrepareInt8(TfLiteContext* context, TfLiteNode* node) {
-#if defined(HIFIMINI) || defined(HIFI4) || defined(HIFI5)
+#if defined(HIFIMINI) || defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
   TFLITE_DCHECK(node->builtin_data != nullptr);
   const auto* params = static_cast<const TfLiteSVDFParams*>(node->builtin_data);
 
@@ -252,11 +252,12 @@
   return kTfLiteOk;
 #else
   return PrepareSvdf(context, node);
-#endif  // defined(HIFIMINI) || defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFIMINI) || defined(HIFI3) || defined(HIFI4) ||
+        // defined(HIFI5)
 }
 
 TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
-#if defined(HIFIMINI) || defined(HIFI4) || defined(HIFI5)
+#if defined(HIFIMINI) || defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 
   MicroContext* micro_context = GetMicroContext(context);
   TfLiteTensor* input =
@@ -277,7 +278,8 @@
   return status;
 #else
   return PrepareSvdf(context, node);
-#endif  // defined(HIFIMINI) || defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFIMINI) || defined(HIFI3) || defined(HIFI4) ||
+        // defined(HIFI5)
 }
 
 TfLiteStatus EvalInt8(TfLiteContext* context, TfLiteNode* node) {
@@ -306,7 +308,7 @@
   return EvalIntegerSvdfHifimini(context, node, input, weights_feature,
                                  weights_time, bias, params, activation_state,
                                  output, data);
-#elif defined(HIFI4) || defined(HIFI5)
+#elif defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
   return EvalIntegerSvdfHifi(context, node, input, weights_feature,
                              weights_time, bias, params, activation_state,
                              output, data);
@@ -314,7 +316,7 @@
   EvalInt16SvdfReference(context, node, input, weights_feature, weights_time,
                          bias, params, activation_state, output, data);
   return kTfLiteOk;
-#endif  // defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 }
 
 TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
diff --git a/tensorflow/lite/micro/kernels/xtensa/transpose_conv.cc b/tensorflow/lite/micro/kernels/xtensa/transpose_conv.cc
index 826e168..44a9f86 100644
--- a/tensorflow/lite/micro/kernels/xtensa/transpose_conv.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/transpose_conv.cc
@@ -183,19 +183,57 @@
   // Quantized kernels use an int32 scratch buffer.
   if (input->type == kTfLiteInt8) {
     TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr);
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
+    const int stride_width = params->stride_width;
+    const int stride_height = params->stride_height;
+
+    const int input_height = SizeOfDimension(input, 1);
+    const int input_width = SizeOfDimension(input, 2);
+    const int input_depth = SizeOfDimension(input, 3);
+    const int output_height = height;
+    const int output_width = width;
+    int32_t scratch_buffer_size = 0;
+    scratch_buffer_size = xa_nn_transpose_conv_getsize(
+        input_height, input_width, input_depth, filter_height, filter_width,
+        stride_width, stride_height, output_height, output_width, num_channels,
+        PREC_SYM8S, PREC_ASYM8S);
+    TFLITE_DCHECK(context->RequestScratchBufferInArena(
+                      context, scratch_buffer_size,
+                      &(data->scratch_buffer_index)) == kTfLiteOk);
+#else  // #if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
     TFLITE_DCHECK(context->RequestScratchBufferInArena(
                       context,
                       GetTensorShape(output).FlatSize() * sizeof(int32_t),
                       &(data->scratch_buffer_index)) == kTfLiteOk);
+#endif
   }
 
   // Quantized 16x8 kernels use an int64 scratch buffer.
   if (input->type == kTfLiteInt16) {
     TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr);
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
+    const int stride_width = params->stride_width;
+    const int stride_height = params->stride_height;
+
+    const int input_height = SizeOfDimension(input, 1);
+    const int input_width = SizeOfDimension(input, 2);
+    const int input_depth = SizeOfDimension(input, 3);
+    const int output_height = height;
+    const int output_width = width;
+    int32_t scratch_buffer_size = 0;
+    scratch_buffer_size = xa_nn_transpose_conv_getsize(
+        input_height, input_width, input_depth, filter_height, filter_width,
+        stride_width, stride_height, output_height, output_width, num_channels,
+        PREC_SYM8S, PREC_SYM16S);
+    TFLITE_DCHECK(context->RequestScratchBufferInArena(
+                      context, scratch_buffer_size,
+                      &(data->scratch_buffer_index)) == kTfLiteOk);
+#else   // #if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
     TFLITE_DCHECK(context->RequestScratchBufferInArena(
                       context,
                       GetTensorShape(output).FlatSize() * sizeof(std::int64_t),
                       &(data->scratch_buffer_index)) == kTfLiteOk);
+#endif  // #if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
   }
 
   // All per-channel quantized tensors need valid zero point and scale arrays.
@@ -282,6 +320,63 @@
     case kTfLiteInt8: {
       int32_t* scratch_buffer = static_cast<int32_t*>(
           context->GetScratchBuffer(context, data.scratch_buffer_index));
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
+      if (bias->type == kTfLiteInt32) {
+        const RuntimeShape& input_shape = tflite::micro::GetTensorShape(input);
+        const RuntimeShape& filter_shape =
+            tflite::micro::GetTensorShape(filter);
+        const RuntimeShape& output_shape =
+            tflite::micro::GetTensorShape(output);
+        const int stride_width = data.params.stride_width;
+        const int stride_height = data.params.stride_height;
+        const int pad_width = data.params.padding_values.width;
+        const int pad_height = data.params.padding_values.height;
+
+        const int batches = MatchingDim(input_shape, 0, output_shape, 0);
+        const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3);
+        const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3);
+
+        const int input_height = input_shape.Dims(1);
+        const int input_width = input_shape.Dims(2);
+        const int filter_height = filter_shape.Dims(1);
+        const int filter_width = filter_shape.Dims(2);
+        const int output_height = output_shape.Dims(1);
+        const int output_width = output_shape.Dims(2);
+        const int8_t* input_data = tflite::micro::GetTensorData<int8_t>(input);
+        const int8_t* filter_data =
+            tflite::micro::GetTensorData<int8_t>(filter);
+        const int32_t* bias_data = tflite::micro::GetTensorData<int32_t>(bias);
+        int8_t* output_data = tflite::micro::GetTensorData<int8_t>(output);
+
+        const int num_elements = output_shape.FlatSize();
+
+        for (int b = 0; b < batches; b++) {
+          xa_nn_transpose_conv_sym8sxasym8s(
+              &output_data[b * output_height * output_width * output_depth],
+              const_cast<WORD8*>(
+                  &input_data[b * input_height * input_width * input_depth]),
+              const_cast<WORD8*>(filter_data), const_cast<WORD32*>(bias_data),
+              stride_width, stride_height, pad_width, pad_height, input_depth,
+              output_depth, input_height, input_width, filter_height,
+              filter_width, output_height, output_width, num_elements / batches,
+              data.params.input_offset, data.params.output_offset,
+              data.per_channel_output_shift, data.per_channel_output_multiplier,
+              scratch_buffer);
+        }
+      } else {
+        reference_integer_ops::TransposeConv(
+            data.params, data.per_channel_output_multiplier,
+            data.per_channel_output_shift, tflite::micro::GetTensorShape(input),
+            tflite::micro::GetTensorData<int8_t>(input),
+            tflite::micro::GetTensorShape(filter),
+            tflite::micro::GetTensorData<int8_t>(filter),
+            tflite::micro::GetTensorShape(bias),
+            tflite::micro::GetTensorData<int32_t>(bias),
+            tflite::micro::GetTensorShape(output),
+            tflite::micro::GetTensorData<int8_t>(output),
+            tflite::micro::GetTensorShape(nullptr), nullptr, scratch_buffer);
+      }
+#else
       reference_integer_ops::TransposeConv(
           data.params, data.per_channel_output_multiplier,
           data.per_channel_output_shift, tflite::micro::GetTensorShape(input),
@@ -293,6 +388,7 @@
           tflite::micro::GetTensorShape(output),
           tflite::micro::GetTensorData<int8_t>(output),
           tflite::micro::GetTensorShape(nullptr), nullptr, scratch_buffer);
+#endif
       break;
     }
     case kTfLiteInt16: {
@@ -319,7 +415,7 @@
             tflite::micro::GetTensorData<int16_t>(output),
             tflite::micro::GetTensorShape(nullptr), nullptr, scratch_buffer);
       } else {
-#if defined(HIFI4)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
         const RuntimeShape& input_shape = tflite::micro::GetTensorShape(input);
         const RuntimeShape& filter_shape =
             tflite::micro::GetTensorShape(filter);
@@ -359,9 +455,9 @@
               output_depth, input_height, input_width, filter_height,
               filter_width, output_height, output_width, num_elements / batches,
               data.per_channel_output_shift, data.per_channel_output_multiplier,
-              &scratch_buffer[b * output_height * output_width * output_depth]);
+              scratch_buffer);
         }
-#else
+#else   // #if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
         reference_integer_ops::TransposeConv(
             data.params, data.per_channel_output_multiplier,
             data.per_channel_output_shift, tflite::micro::GetTensorShape(input),
@@ -373,7 +469,7 @@
             tflite::micro::GetTensorShape(output),
             tflite::micro::GetTensorData<int16_t>(output),
             tflite::micro::GetTensorShape(nullptr), nullptr, scratch_buffer);
-#endif  // defined(HIFI4)
+#endif  // #if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
       }
       break;
     }
diff --git a/tensorflow/lite/micro/kernels/xtensa/unidirectional_sequence_lstm.cc b/tensorflow/lite/micro/kernels/xtensa/unidirectional_sequence_lstm.cc
index cbce1e1..0f6a02e 100644
--- a/tensorflow/lite/micro/kernels/xtensa/unidirectional_sequence_lstm.cc
+++ b/tensorflow/lite/micro/kernels/xtensa/unidirectional_sequence_lstm.cc
@@ -1,4 +1,4 @@
-/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -13,1109 +13,156 @@
 limitations under the License.
 ==============================================================================*/
 
-#include <math.h>
-#include <stdio.h>
+// Integer version of unidirectional sequence lstm. Only the standard LSTM
+// (defined in the keras LSTM layer, e.g., no peephole etc.) is supported here.
+// Currently used by the 16 bits activation case only
 
-#include <cstddef>
+#include <algorithm>
+#include <limits>
 
-#include "tensorflow/lite/c/builtin_op_data.h"
-#include "tensorflow/lite/c/common.h"
-#include "tensorflow/lite/kernels/internal/compatibility.h"
 #include "tensorflow/lite/kernels/internal/quantization_util.h"
-#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
 #include "tensorflow/lite/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/kernels/fully_connected.h"
+#include "tensorflow/lite/micro/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/kernels/lstm_shared.h"
 #include "tensorflow/lite/micro/kernels/xtensa/lstm_eval.h"
-#include "tensorflow/lite/micro/kernels/xtensa/lstm_shared.h"
-#include "tensorflow/lite/micro/micro_log.h"
 
-// TODO(b/230666079): Flatten the namespace to match the builtin kernel
-// implementation
 namespace tflite {
-namespace ops {
-namespace micro {
-// namespace unidirectional_sequence_lstm {
+
 namespace {
+/*Helper Functions*/
 
-struct OpData {
-  // If the lstm is layer norm.
-  bool use_layer_norm;
-  // The scratch tensor index.
-  int scratch_tensor_index;
-  bool compute_row_sums = false;
+/*Kernel functions*/
 
-  lstm_eval::IntegerLstmParameter integer_lstm_param;
-};
+void* UnidirectionalSequenceLstmInit(TfLiteContext* context, const char* buffer,
+                                     size_t length) {
+  TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
+  return context->AllocatePersistentBuffer(context, sizeof(OpDataLSTM));
+}
 
-TfLiteStatus PopulateQuantizedLstmParams8x8_16(
-    TfLiteContext* context, TfLiteNode* node,
-    lstm_eval::IntegerLstmParameter* integer_lstm_param) {
-  // Calculate quantized clip for projection and cell.
-  const auto* params =
+TfLiteStatus UnidirectionalSequenceLstmPrepare(TfLiteContext* context,
+                                               TfLiteNode* node) {
+  TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
+  TF_LITE_ENSURE_EQ(context, node->inputs->size, 24);
+
+  TFLITE_DCHECK(node->builtin_data != nullptr);
+  TFLITE_DCHECK(node->user_data != nullptr);
+
+  OpDataLSTM* op_data = reinterpret_cast<OpDataLSTM*>(node->user_data);
+  const auto* builtin_data =
       static_cast<TfLiteUnidirectionalSequenceLSTMParams*>(node->builtin_data);
-  const float cell_clip = static_cast<float>(params->cell_clip);
-  const float proj_clip = static_cast<float>(params->proj_clip);
+  // All TempTfLiteTensors will be deallocated through the destructor.
+  LstmTensors lstm_tensors(context, node);
+  TF_LITE_ENSURE_OK(context, lstm_tensors.ValidateTensorStatus(context));
 
-  const TfLiteTensor* cell_state =
-      GetVariableInput(context, node, micro::lstm::full::kCellStateTensor);
-  TF_LITE_ENSURE(context, cell_state != nullptr);
-  TfLiteTensor* output_tensor;
+  op_data->cell_gate_nonlinear_type = builtin_data->activation;
+  op_data->size_info =
+      CreateLstmSizeInfo(builtin_data->time_major,
+                         lstm_tensors.GetInternalTensor(kLstmInputTensor)->dims,
+                         lstm_tensors.HiddenStateTensor()->dims);
   TF_LITE_ENSURE_OK(
-      context, GetOutputSafe(context, node, micro::lstm::full::kOutputTensor,
-                             &output_tensor));
+      context, ValidateTensorSize(context, lstm_tensors, op_data->size_info));
 
-  auto* cell_state_params =
-      static_cast<TfLiteAffineQuantization*>(cell_state->quantization.params);
-  auto* proj_params = static_cast<TfLiteAffineQuantization*>(
-      output_tensor->quantization.params);
-  if (cell_clip > static_cast<float>(0.0)) {
-    integer_lstm_param->quantized_cell_clip = static_cast<int16_t>(std::min(
-        std::max(cell_clip / cell_state_params->scale->data[0], -32768.0f),
-        32767.0f));
+  // Create cell state information and gate parameters (Fully Connected and Mul)
+  auto cell_state_type =
+      lstm_tensors.GetInternalTensor(kLstmCellStateTensor)->type;
+  if (cell_state_type == kTfLiteFloat32) {
+    op_data->cell_state_info =
+        CreateLstmCellStateInfoFloat(builtin_data->cell_clip);
+    TF_LITE_ENSURE_OK(
+        context, PrepareGateParametersFloat(context, lstm_tensors, op_data));
+  } else if (cell_state_type == kTfLiteInt16) {
+    op_data->cell_state_info = CreateLstmCellStateInfo(
+        lstm_tensors.CellStateTensor()->params.scale, builtin_data->cell_clip);
+    TF_LITE_ENSURE_OK(
+        context, PrepareGateParametersInteger(context, lstm_tensors, op_data));
   } else {
-    integer_lstm_param->quantized_cell_clip = 0;
+    MicroPrintf(
+        "Cell state type %s (%d) not supported. The quantized Unidirectional "
+        "Sequence LSTM Op only support int16 cell state",
+        TfLiteTypeGetName(cell_state_type), cell_state_type);
+    return kTfLiteError;
   }
-  if (proj_clip > static_cast<float>(0.0)) {
-    integer_lstm_param->quantized_proj_clip = static_cast<int8_t>(std::min(
-        std::max(proj_clip / proj_params->scale->data[0], -128.0f), 127.0f));
-  } else {
-    integer_lstm_param->quantized_proj_clip = 0;
+  // request buffers (four buffers)
+  for (size_t i = 0; i < 4; i++) {
+    TF_LITE_ENSURE_OK(context, context->RequestScratchBufferInArena(
+                                   context,
+                                   op_data->size_info.batch_size *
+                                       op_data->size_info.state_dimension *
+                                       TfLiteTypeGetSize(cell_state_type),
+                                   &(op_data->buffer_indices[i])));
   }
+  return kTfLiteOk;
+}
 
-  // Calculate effective scales.
-  OpData* op_data = static_cast<OpData*>(node->user_data);
-  const bool use_layer_norm = op_data->use_layer_norm;
+TfLiteStatus UnidirectionalSequenceLstmEval(TfLiteContext* context,
+                                            TfLiteNode* node) {
+  TFLITE_DCHECK(node->user_data != nullptr);
+  const OpDataLSTM& op_data = *reinterpret_cast<OpDataLSTM*>(node->user_data);
+  auto kernel_content = CreateLSTMKernelContent(context, node);
 
-  const TfLiteTensor* input;
-  TF_LITE_ENSURE_OK(
-      context,
-      GetInputSafe(context, node, micro::lstm::full::kInputTensor, &input));
+  const auto activation_type =
+      kernel_content.internal_tensors[kLstmInputTensor]->type;
+  const auto weight_type =
+      kernel_content.internal_tensors[kLstmInputToInputWeightsTensor]->type;
 
-  const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(
-      context, node, micro::lstm::full::kInputToInputWeightsTensor);
-  const TfLiteTensor* input_to_forget_weights;
-  TF_LITE_ENSURE_OK(context,
-                    GetInputSafe(context, node,
-                                 micro::lstm::full::kInputToForgetWeightsTensor,
-                                 &input_to_forget_weights));
-  const TfLiteTensor* input_to_cell_weights;
-  TF_LITE_ENSURE_OK(
-      context,
-      GetInputSafe(context, node, micro::lstm::full::kInputToCellWeightsTensor,
-                   &input_to_cell_weights));
-  const TfLiteTensor* input_to_output_weights;
-  TF_LITE_ENSURE_OK(context,
-                    GetInputSafe(context, node,
-                                 micro::lstm::full::kInputToOutputWeightsTensor,
-                                 &input_to_output_weights));
-
-  const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor(
-      context, node, micro::lstm::full::kRecurrentToInputWeightsTensor);
-  const TfLiteTensor* recurrent_to_forget_weights;
-  TF_LITE_ENSURE_OK(
-      context, GetInputSafe(context, node,
-                            micro::lstm::full::kRecurrentToForgetWeightsTensor,
-                            &recurrent_to_forget_weights));
-  const TfLiteTensor* recurrent_to_cell_weights;
-  TF_LITE_ENSURE_OK(
-      context, GetInputSafe(context, node,
-                            micro::lstm::full::kRecurrentToCellWeightsTensor,
-                            &recurrent_to_cell_weights));
-  const TfLiteTensor* recurrent_to_output_weights;
-  TF_LITE_ENSURE_OK(
-      context, GetInputSafe(context, node,
-                            micro::lstm::full::kRecurrentToOutputWeightsTensor,
-                            &recurrent_to_output_weights));
-
-  const TfLiteTensor* cell_to_input_weights = GetOptionalInputTensor(
-      context, node, micro::lstm::full::kCellToInputWeightsTensor);
-  const TfLiteTensor* cell_to_forget_weights = GetOptionalInputTensor(
-      context, node, micro::lstm::full::kCellToForgetWeightsTensor);
-  const TfLiteTensor* cell_to_output_weights = GetOptionalInputTensor(
-      context, node, micro::lstm::full::kCellToOutputWeightsTensor);
-
-  const TfLiteTensor* input_layer_norm_coefficients = GetOptionalInputTensor(
-      context, node, micro::lstm::full::kInputLayerNormCoefficientsTensor);
-  const TfLiteTensor* forget_layer_norm_coefficients = GetOptionalInputTensor(
-      context, node, micro::lstm::full::kForgetLayerNormCoefficientsTensor);
-  const TfLiteTensor* cell_layer_norm_coefficients = GetOptionalInputTensor(
-      context, node, micro::lstm::full::kCellLayerNormCoefficientsTensor);
-  const TfLiteTensor* output_layer_norm_coefficients = GetOptionalInputTensor(
-      context, node, micro::lstm::full::kOutputLayerNormCoefficientsTensor);
-
-  const TfLiteTensor* projection_weights = GetOptionalInputTensor(
-      context, node, micro::lstm::full::kProjectionWeightsTensor);
-
-  TfLiteTensor* output_state =
-      GetVariableInput(context, node, micro::lstm::full::kOutputStateTensor);
-  TF_LITE_ENSURE(context, output_state != nullptr);
-
-  // Since we have already checked that weights are all there or none, we can
-  // check the existence of only one to get the condition.
-  const bool use_cifg = (input_to_input_weights == nullptr);
-  const bool use_peephole = (cell_to_output_weights != nullptr);
-  const bool use_projection = (projection_weights != nullptr);
-
-  // Get intermediate scales and zero points.
-  constexpr size_t kIntermediateCount = 5;
-  float intermediate_scale[kIntermediateCount];
-  int32_t intermediate_zp[kIntermediateCount];
-  for (int i = 0; i < 4; ++i) {
-    if (use_layer_norm) {
-      TfLiteTensor* intermediate =
-          context->GetTensor(context, node->intermediates->data[i]);
-      auto* tmp_params = static_cast<TfLiteAffineQuantization*>(
-          intermediate->quantization.params);
-      intermediate_scale[i] = tmp_params->scale->data[0];
-      intermediate_zp[i] = tmp_params->zero_point->data[0];
-    } else {
-      // Q3.12 for activation functions.
-      intermediate_scale[i] = std::pow(2, -12);
-      intermediate_zp[i] = 0;
+  switch (activation_type) {
+    case kTfLiteFloat32: {
+      LSTMBuffers<float> buffers =
+          CreateLSTMBuffers<float>(context, op_data.buffer_indices);
+      EvalLstm<float, float, float, float>(op_data, kernel_content, buffers);
+      break;
+    }
+    case kTfLiteInt8: {
+      switch (weight_type) {
+        case kTfLiteInt8: {
+          // 8(activation)x8(weight)->16(cell) LSTM with 32 bits bias
+          LSTMBuffers<int16_t> buffers =
+              CreateLSTMBuffers<int16_t>(context, op_data.buffer_indices);
+          EvalLstm<int8_t, int8_t, int16_t, int32_t>(op_data, kernel_content,
+                                                     buffers);
+          break;
+        }
+        default: {
+          MicroPrintf("Filter type %s (%d) not supported.",
+                      TfLiteTypeGetName(weight_type), activation_type);
+          return kTfLiteError;
+        }
+      }
+      break;
+    }
+    case kTfLiteInt16: {
+      switch (weight_type) {
+        case kTfLiteInt8: {
+          // 16(activation)x8(weight)->16(cell) LSTM with 64 bits bias
+          LSTMBuffers<int16_t> buffers =
+              CreateLSTMBuffers<int16_t>(context, op_data.buffer_indices);
+          EvalLstm<int16_t, int8_t, int16_t, int64_t>(op_data, kernel_content,
+                                                      buffers);
+          break;
+        }
+        default: {
+          MicroPrintf("Filter type %s (%d) not supported.",
+                      TfLiteTypeGetName(weight_type), weight_type);
+          return kTfLiteError;
+        }
+      }
+      break;
+    }
+    default: {
+      MicroPrintf("Input type %s (%d) not supported.",
+                  TfLiteTypeGetName(activation_type), activation_type);
+      return kTfLiteError;
     }
   }
-  // In the absence of projection, hidden becomes otuput and this intermediate
-  // is ignored.
-  TfLiteTensor* hidden =
-      context->GetTensor(context, node->intermediates->data[4]);
-  auto* hidden_params =
-      static_cast<TfLiteAffineQuantization*>(hidden->quantization.params);
-  intermediate_scale[4] = hidden_params->scale->data[0];
-  intermediate_zp[4] = hidden_params->zero_point->data[0];
-
-  // Scales.
-  const float default_scale = 1.0;
-  float input_scale = default_scale;
-  float input_to_input_weight_scale = default_scale;
-  float recurrent_to_input_weight_scale = default_scale;
-  float cell_to_input_weight_scale = default_scale;
-  float input_to_forget_weight_scale = default_scale;
-  float recurrent_to_forget_weight_scale = default_scale;
-  float cell_to_forget_weight_scale = default_scale;
-  float input_to_cell_weight_scale = default_scale;
-  float recurrent_to_cell_weight_scale = default_scale;
-  float input_to_output_weight_scale = default_scale;
-  float recurrent_to_output_weight_scale = default_scale;
-  float cell_to_output_weight_scale = default_scale;
-  float projection_weight_scale = default_scale;
-  float layer_norm_input_scale = default_scale;
-  float layer_norm_forget_scale = default_scale;
-  float layer_norm_cell_scale = default_scale;
-  float layer_norm_output_scale = default_scale;
-  float output_state_scale = default_scale;
-  int cell_scale = 1;
-
-  // Effective scales.
-  float effective_input_to_input_scale = default_scale;
-  float effective_recurrent_to_input_scale = default_scale;
-  float effective_cell_to_input_scale = default_scale;
-  float effective_input_to_forget_scale = default_scale;
-  float effective_recurrent_to_forget_scale = default_scale;
-  float effective_cell_to_forget_scale = default_scale;
-  float effective_input_to_cell_scale = default_scale;
-  float effective_recurrent_to_cell_scale = default_scale;
-  float effective_input_to_output_scale = default_scale;
-  float effective_recurrent_to_output_scale = default_scale;
-  float effective_cell_to_output_scale = default_scale;
-  float effective_proj_scale = default_scale;
-  float effective_hidden_scale = default_scale;
-
-  // Populate scales.
-  if (!use_cifg) {
-    input_to_input_weight_scale = input_to_input_weights->params.scale;
-    recurrent_to_input_weight_scale = recurrent_to_input_weights->params.scale;
-  }
-
-  if (use_peephole) {
-    if (!use_cifg) {
-      cell_to_input_weight_scale = cell_to_input_weights->params.scale;
-    }
-    cell_to_forget_weight_scale = cell_to_forget_weights->params.scale;
-    cell_to_output_weight_scale = cell_to_output_weights->params.scale;
-  }
-
-  if (use_layer_norm) {
-    if (!use_cifg) {
-      layer_norm_input_scale = input_layer_norm_coefficients->params.scale;
-    }
-    layer_norm_forget_scale = forget_layer_norm_coefficients->params.scale;
-    layer_norm_cell_scale = cell_layer_norm_coefficients->params.scale;
-    layer_norm_output_scale = output_layer_norm_coefficients->params.scale;
-  }
-
-  if (use_projection) {
-    projection_weight_scale = projection_weights->params.scale;
-  }
-  output_state_scale = output_state->params.scale;
-
-  input_to_forget_weight_scale = input_to_forget_weights->params.scale;
-  input_to_cell_weight_scale = input_to_cell_weights->params.scale;
-  input_to_output_weight_scale = input_to_output_weights->params.scale;
-  recurrent_to_forget_weight_scale = recurrent_to_forget_weights->params.scale;
-  recurrent_to_cell_weight_scale = recurrent_to_cell_weights->params.scale;
-  recurrent_to_output_weight_scale = recurrent_to_output_weights->params.scale;
-
-  // Check cell state (already used above)
-  TF_LITE_ENSURE(context, CheckedLog2(cell_state->params.scale, &cell_scale));
-  // TF_LITE_ENSURE(context, cell_scale <= -9);
-  integer_lstm_param->cell_scale = cell_scale;
-  input_scale = input->params.scale;
-
-  // Calculate effective scales.
-  if (!use_cifg) {
-    effective_input_to_input_scale =
-        input_to_input_weight_scale * input_scale / intermediate_scale[0];
-    effective_recurrent_to_input_scale = recurrent_to_input_weight_scale *
-                                         output_state_scale /
-                                         intermediate_scale[0];
-  }
-  effective_input_to_forget_scale =
-      input_to_forget_weight_scale * input_scale / intermediate_scale[1];
-  effective_recurrent_to_forget_scale = recurrent_to_forget_weight_scale *
-                                        output_state_scale /
-                                        intermediate_scale[1];
-
-  effective_input_to_cell_scale =
-      input_to_cell_weight_scale * input_scale / intermediate_scale[2];
-  effective_recurrent_to_cell_scale = recurrent_to_cell_weight_scale *
-                                      output_state_scale /
-                                      intermediate_scale[2];
-
-  effective_input_to_output_scale =
-      input_to_output_weight_scale * input_scale / intermediate_scale[3];
-  effective_recurrent_to_output_scale = recurrent_to_output_weight_scale *
-                                        output_state_scale /
-                                        intermediate_scale[3];
-
-  effective_hidden_scale = std::pow((float)2, (float)-15) /
-                           intermediate_scale[4] *
-                           std::pow((float)2, (float)-15);
-
-  effective_proj_scale =
-      projection_weight_scale * intermediate_scale[4] / output_state_scale;
-
-  if (use_peephole) {
-    if (!use_cifg) {
-      effective_cell_to_input_scale =
-          std::pow((float)(2), (float)cell_scale) *  // NOLINT
-          (float)(cell_to_input_weight_scale) / intermediate_scale[0];
-    }
-    effective_cell_to_forget_scale =
-        std::pow((float)2, (float)cell_scale) *  // NOLINT
-        (float)cell_to_forget_weight_scale / intermediate_scale[1];
-    effective_cell_to_output_scale =
-        std::pow((float)2, (float)cell_scale) *  // NOLINT
-        (float)cell_to_output_weight_scale / intermediate_scale[3];
-  }
-
-  // Decompose scales.
-  QuantizeMultiplier(static_cast<double>(effective_input_to_input_scale),
-                     &integer_lstm_param->effective_input_to_input_scale_a,
-                     &integer_lstm_param->effective_input_to_input_scale_b);
-  QuantizeMultiplier(static_cast<double>(effective_recurrent_to_input_scale),
-                     &integer_lstm_param->effective_recurrent_to_input_scale_a,
-                     &integer_lstm_param->effective_recurrent_to_input_scale_b);
-  QuantizeMultiplier(static_cast<double>(effective_cell_to_input_scale),
-                     &integer_lstm_param->effective_cell_to_input_scale_a,
-                     &integer_lstm_param->effective_cell_to_input_scale_b);
-  QuantizeMultiplier(static_cast<double>(effective_input_to_forget_scale),
-                     &integer_lstm_param->effective_input_to_forget_scale_a,
-                     &integer_lstm_param->effective_input_to_forget_scale_b);
-  QuantizeMultiplier(
-      static_cast<double>(effective_recurrent_to_forget_scale),
-      &integer_lstm_param->effective_recurrent_to_forget_scale_a,
-      &integer_lstm_param->effective_recurrent_to_forget_scale_b);
-  QuantizeMultiplier(static_cast<double>(effective_cell_to_forget_scale),
-                     &integer_lstm_param->effective_cell_to_forget_scale_a,
-                     &integer_lstm_param->effective_cell_to_forget_scale_b);
-  QuantizeMultiplier(static_cast<double>(effective_input_to_cell_scale),
-                     &integer_lstm_param->effective_input_to_cell_scale_a,
-                     &integer_lstm_param->effective_input_to_cell_scale_b);
-  QuantizeMultiplier(static_cast<double>(effective_recurrent_to_cell_scale),
-                     &integer_lstm_param->effective_recurrent_to_cell_scale_a,
-                     &integer_lstm_param->effective_recurrent_to_cell_scale_b);
-  QuantizeMultiplier(static_cast<double>(effective_input_to_output_scale),
-                     &integer_lstm_param->effective_input_to_output_scale_a,
-                     &integer_lstm_param->effective_input_to_output_scale_b);
-  QuantizeMultiplier(
-      static_cast<double>(effective_recurrent_to_output_scale),
-      &integer_lstm_param->effective_recurrent_to_output_scale_a,
-      &integer_lstm_param->effective_recurrent_to_output_scale_b);
-  QuantizeMultiplier(static_cast<double>(effective_cell_to_output_scale),
-                     &integer_lstm_param->effective_cell_to_output_scale_a,
-                     &integer_lstm_param->effective_cell_to_output_scale_b);
-  QuantizeMultiplier(static_cast<double>(effective_proj_scale),
-                     &integer_lstm_param->effective_proj_scale_a,
-                     &integer_lstm_param->effective_proj_scale_b);
-  QuantizeMultiplier(static_cast<double>(effective_hidden_scale),
-                     &integer_lstm_param->effective_hidden_scale_a,
-                     &integer_lstm_param->effective_hidden_scale_b);
-  QuantizeMultiplier(static_cast<double>(layer_norm_input_scale),
-                     &integer_lstm_param->layer_norm_input_scale_a,
-                     &integer_lstm_param->layer_norm_input_scale_b);
-  QuantizeMultiplier(static_cast<double>(layer_norm_forget_scale),
-                     &integer_lstm_param->layer_norm_forget_scale_a,
-                     &integer_lstm_param->layer_norm_forget_scale_b);
-  QuantizeMultiplier(static_cast<double>(layer_norm_cell_scale),
-                     &integer_lstm_param->layer_norm_cell_scale_a,
-                     &integer_lstm_param->layer_norm_cell_scale_b);
-  QuantizeMultiplier(static_cast<double>(layer_norm_output_scale),
-                     &integer_lstm_param->layer_norm_output_scale_a,
-                     &integer_lstm_param->layer_norm_output_scale_b);
-
-  integer_lstm_param->hidden_zp = intermediate_zp[4];
-
-  // 10000 is used to make sure the kernel logic does not overflow.
-  if (!use_cifg) {
-    integer_lstm_param->input_variance_guard =
-        std::max(static_cast<int32_t>(1),
-                 static_cast<int32_t>(10000 * layer_norm_input_scale));
-  }
-  integer_lstm_param->forget_variance_guard =
-      std::max(static_cast<int32_t>(1),
-               static_cast<int32_t>(10000 * layer_norm_forget_scale));
-  integer_lstm_param->cell_variance_guard =
-      std::max(static_cast<int32_t>(1),
-               static_cast<int32_t>(10000 * layer_norm_cell_scale));
-  integer_lstm_param->output_variance_guard =
-      std::max(static_cast<int32_t>(1),
-               static_cast<int32_t>(10000 * layer_norm_output_scale));
-
   return kTfLiteOk;
 }
 
 }  // namespace
 
-// Temporary tensors
-enum TemporaryTensor {
-  kScratchBuffer = 0,
-  kInputQuantized = 1,
-  kOutputStateQuantized = 2,
-  kCellStateQuantized = 3,
-  kInputScalingFactors = 4,
-  kOutputStateScalingFactors = 5,
-  kProductScalingFactors = 6,
-  kRecoveredCellWeights = 7,
-  kAccumScratch = 8,
-  kInputZeroPoints = 9,
-  kOutputStateZeroPoints = 10,
-  kRowSums = 11,
-  kNumTemporaryTensors = 12,
-};
-
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
-  OpData* op_data = reinterpret_cast<OpData*>(
-      context->AllocatePersistentBuffer(context, sizeof(OpData)));
-
-  return op_data;
-}
-
-// Check that input tensor dimensions matches with each other.
-TfLiteStatus CheckInputTensorDimensions(TfLiteContext* context,
-                                        TfLiteNode* node, int n_input,
-                                        int n_output, int n_cell,
-                                        bool use_layer_norm, bool is_integer) {
-  const auto* params = reinterpret_cast<TfLiteLSTMParams*>(node->builtin_data);
-
-  // Making sure clipping parameters have valid values.
-  // == 0 means no clipping
-  //  > 0 means clipping
-  TF_LITE_ENSURE(context, params->cell_clip >= 0);
-  TF_LITE_ENSURE(context, params->proj_clip >= 0);
-  const TfLiteEvalTensor* input_to_input_weights = tflite::micro::GetEvalInput(
-      context, node, micro::lstm::full::kInputToInputWeightsTensor);
-  if (input_to_input_weights != nullptr) {
-    TF_LITE_ENSURE_EQ(context, input_to_input_weights->dims->size, 2);
-    TF_LITE_ENSURE_EQ(context, input_to_input_weights->dims->data[0], n_cell);
-    TF_LITE_ENSURE_EQ(context, input_to_input_weights->dims->data[1], n_input);
-  }
-  const TfLiteEvalTensor* input_to_forget_weights = tflite::micro::GetEvalInput(
-      context, node, micro::lstm::full::kInputToForgetWeightsTensor);
-
-  TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->size, 2);
-  TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->data[0], n_cell);
-  TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->data[1], n_input);
-  const TfLiteEvalTensor* input_to_cell_weights = tflite::micro::GetEvalInput(
-      context, node, micro::lstm::full::kInputToCellWeightsTensor);
-
-  TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->size, 2);
-  TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->data[0], n_cell);
-  TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->data[1], n_input);
-  const TfLiteEvalTensor* recurrent_to_input_weights =
-      tflite::micro::GetEvalInput(
-          context, node, micro::lstm::full::kRecurrentToInputWeightsTensor);
-  if (recurrent_to_input_weights != nullptr) {
-    TF_LITE_ENSURE_EQ(context, recurrent_to_input_weights->dims->size, 2);
-    TF_LITE_ENSURE_EQ(context, recurrent_to_input_weights->dims->data[0],
-                      n_cell);
-    TF_LITE_ENSURE_EQ(context, recurrent_to_input_weights->dims->data[1],
-                      n_output);
-  }
-  const TfLiteEvalTensor* recurrent_to_forget_weights =
-      tflite::micro::GetEvalInput(
-          context, node, micro::lstm::full::kRecurrentToForgetWeightsTensor);
-
-  TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->size, 2);
-  TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->data[0],
-                    n_cell);
-  TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->data[1],
-                    n_output);
-  const TfLiteEvalTensor* recurrent_to_cell_weights =
-      tflite::micro::GetEvalInput(
-          context, node, micro::lstm::full::kRecurrentToCellWeightsTensor);
-
-  TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->size, 2);
-  TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->data[0], n_cell);
-  TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->data[1],
-                    n_output);
-
-  // We make sure the input-gate's parameters are either both present (regular
-  // LSTM) or not at all (CIFG-LSTM).
-  const bool cifg_weights_all_or_none =
-      ((input_to_input_weights != nullptr) &&
-       (recurrent_to_input_weights != nullptr)) ||
-      ((input_to_input_weights == nullptr) &&
-       (recurrent_to_input_weights == nullptr));
-  TF_LITE_ENSURE(context, cifg_weights_all_or_none == true);
-
-  const TfLiteTensor* cell_to_input_weights = GetOptionalInputTensor(
-      context, node, micro::lstm::full::kCellToInputWeightsTensor);
-  if (cell_to_input_weights != nullptr) {
-    TF_LITE_ENSURE_EQ(context, cell_to_input_weights->dims->size, 1);
-    TF_LITE_ENSURE_EQ(context, cell_to_input_weights->dims->data[0], n_cell);
-    TF_LITE_ENSURE_TYPES_EQ(
-        context, cell_to_input_weights->type,
-        is_integer ? kTfLiteInt16 : input_to_forget_weights->type);
-  }
-
-  const TfLiteTensor* cell_to_forget_weights = GetOptionalInputTensor(
-      context, node, lstm::full::kCellToForgetWeightsTensor);
-  if (cell_to_forget_weights != nullptr) {
-    TF_LITE_ENSURE_EQ(context, cell_to_forget_weights->dims->size, 1);
-    TF_LITE_ENSURE_EQ(context, cell_to_forget_weights->dims->data[0], n_cell);
-    TF_LITE_ENSURE_TYPES_EQ(
-        context, cell_to_forget_weights->type,
-        is_integer ? kTfLiteInt16 : input_to_forget_weights->type);
-  }
-
-  const TfLiteTensor* cell_to_output_weights = GetOptionalInputTensor(
-      context, node, micro::lstm::full::kCellToOutputWeightsTensor);
-  if (cell_to_output_weights != nullptr) {
-    TF_LITE_ENSURE_EQ(context, cell_to_output_weights->dims->size, 1);
-    TF_LITE_ENSURE_EQ(context, cell_to_output_weights->dims->data[0], n_cell);
-    TF_LITE_ENSURE_TYPES_EQ(
-        context, cell_to_output_weights->type,
-        is_integer ? kTfLiteInt16 : input_to_forget_weights->type);
-  }
-
-  // Making sure the peephole weights are there all or none.
-  const bool use_cifg = (input_to_input_weights == nullptr);
-  const bool peephole_weights_all_or_none =
-      ((cell_to_input_weights != nullptr || use_cifg) &&
-       (cell_to_forget_weights != nullptr) &&
-       (cell_to_output_weights != nullptr)) ||
-      ((cell_to_input_weights == nullptr) &&
-       (cell_to_forget_weights == nullptr) &&
-       (cell_to_output_weights == nullptr));
-  TF_LITE_ENSURE(context, peephole_weights_all_or_none == true);
-  const TfLiteEvalTensor* input_gate_bias = tflite::micro::GetEvalInput(
-      context, node, micro::lstm::full::kInputGateBiasTensor);
-
-  if (use_cifg) {
-    TF_LITE_ENSURE_EQ(context, input_gate_bias, nullptr);
-  } else {
-    TF_LITE_ENSURE_EQ(context, input_gate_bias->dims->size, 1);
-    TF_LITE_ENSURE_EQ(context, input_gate_bias->dims->data[0], n_cell);
-    if (is_integer) {
-      TF_LITE_ENSURE_TYPES_EQ(context, input_gate_bias->type, kTfLiteInt32);
-    } else {
-      TF_LITE_ENSURE_TYPES_EQ(context, input_gate_bias->type, kTfLiteFloat32);
-    }
-  }
-  const TfLiteEvalTensor* forget_gate_bias = tflite::micro::GetEvalInput(
-      context, node, micro::lstm::full::kForgetGateBiasTensor);
-
-  TF_LITE_ENSURE_EQ(context, forget_gate_bias->dims->size, 1);
-  TF_LITE_ENSURE_EQ(context, forget_gate_bias->dims->data[0], n_cell);
-  if (is_integer) {
-    TF_LITE_ENSURE_TYPES_EQ(context, forget_gate_bias->type, kTfLiteInt32);
-  } else {
-    TF_LITE_ENSURE_TYPES_EQ(context, forget_gate_bias->type, kTfLiteFloat32);
-  }
-  const TfLiteEvalTensor* cell_gate_bias = tflite::micro::GetEvalInput(
-      context, node, micro::lstm::full::kCellGateBiasTensor);
-
-  TF_LITE_ENSURE_EQ(context, cell_gate_bias->dims->size, 1);
-  TF_LITE_ENSURE_EQ(context, cell_gate_bias->dims->data[0], n_cell);
-  if (is_integer) {
-    TF_LITE_ENSURE_TYPES_EQ(context, cell_gate_bias->type, kTfLiteInt32);
-  } else {
-    TF_LITE_ENSURE_TYPES_EQ(context, cell_gate_bias->type, kTfLiteFloat32);
-  }
-  const TfLiteEvalTensor* output_gate_bias = tflite::micro::GetEvalInput(
-      context, node, micro::lstm::full::kOutputGateBiasTensor);
-  TF_LITE_ENSURE_EQ(context, output_gate_bias->dims->size, 1);
-  TF_LITE_ENSURE_EQ(context, output_gate_bias->dims->data[0], n_cell);
-  if (is_integer) {
-    TF_LITE_ENSURE_TYPES_EQ(context, output_gate_bias->type, kTfLiteInt32);
-  } else {
-    TF_LITE_ENSURE_TYPES_EQ(context, output_gate_bias->type, kTfLiteFloat32);
-  }
-
-  const TfLiteTensor* projection_weights = GetOptionalInputTensor(
-      context, node, micro::lstm::full::kProjectionWeightsTensor);
-  if (projection_weights != nullptr) {
-    TF_LITE_ENSURE_EQ(context, projection_weights->dims->size, 2);
-    TF_LITE_ENSURE_EQ(context, projection_weights->dims->data[0], n_output);
-    TF_LITE_ENSURE_EQ(context, projection_weights->dims->data[1], n_cell);
-  }
-
-  const TfLiteTensor* projection_bias = GetOptionalInputTensor(
-      context, node, micro::lstm::full::kProjectionBiasTensor);
-  if (projection_bias != nullptr) {
-    TF_LITE_ENSURE_EQ(context, projection_bias->dims->size, 1);
-    TF_LITE_ENSURE_EQ(context, projection_bias->dims->data[0], n_output);
-    if (is_integer) {
-      TF_LITE_ENSURE_TYPES_EQ(context, projection_bias->type, kTfLiteInt32);
-    } else {
-      TF_LITE_ENSURE_TYPES_EQ(context, projection_bias->type, kTfLiteFloat32);
-    }
-  }
-
-  // Making sure the projection tensors are consistent:
-  // 1) If projection weight is not present, then projection bias should not be
-  // present.
-  // 2) If projection weight is present, then projection bias is optional.
-  const bool projecton_tensors_consistent =
-      ((projection_weights != nullptr) || (projection_bias == nullptr));
-  TF_LITE_ENSURE(context, projecton_tensors_consistent == true);
-
-  if (use_layer_norm) {
-    const TfLiteEvalTensor* input_layer_norm_coefficients =
-        tflite::micro::GetEvalInput(
-            context, node,
-            micro::lstm::full::kInputLayerNormCoefficientsTensor);
-    if (use_cifg) {
-      TF_LITE_ENSURE_EQ(context, input_layer_norm_coefficients, nullptr);
-    } else {
-      TF_LITE_ENSURE(context, input_layer_norm_coefficients != nullptr);
-      TF_LITE_ENSURE_EQ(context, input_layer_norm_coefficients->dims->size, 1);
-      TF_LITE_ENSURE_EQ(context, input_layer_norm_coefficients->dims->data[0],
-                        n_cell);
-      if (is_integer) {
-        TF_LITE_ENSURE_TYPES_EQ(context, input_layer_norm_coefficients->type,
-                                kTfLiteInt16);
-      } else {
-        TF_LITE_ENSURE_TYPES_EQ(context, input_layer_norm_coefficients->type,
-                                kTfLiteFloat32);
-      }
-    }
-    const TfLiteEvalTensor* forget_layer_norm_coefficients =
-        tflite::micro::GetEvalInput(
-            context, node,
-            micro::lstm::full::kForgetLayerNormCoefficientsTensor);
-    TF_LITE_ENSURE_EQ(context, forget_layer_norm_coefficients->dims->size, 1);
-    TF_LITE_ENSURE_EQ(context, forget_layer_norm_coefficients->dims->data[0],
-                      n_cell);
-    if (is_integer) {
-      TF_LITE_ENSURE_TYPES_EQ(context, forget_layer_norm_coefficients->type,
-                              kTfLiteInt16);
-    } else {
-      TF_LITE_ENSURE_TYPES_EQ(context, forget_layer_norm_coefficients->type,
-                              kTfLiteFloat32);
-    }
-    const TfLiteEvalTensor* cell_layer_norm_coefficients =
-        tflite::micro::GetEvalInput(
-            context, node, micro::lstm::full::kCellLayerNormCoefficientsTensor);
-    TF_LITE_ENSURE_EQ(context, cell_layer_norm_coefficients->dims->size, 1);
-    TF_LITE_ENSURE_EQ(context, cell_layer_norm_coefficients->dims->data[0],
-                      n_cell);
-    if (is_integer) {
-      TF_LITE_ENSURE_TYPES_EQ(context, cell_layer_norm_coefficients->type,
-                              kTfLiteInt16);
-    } else {
-      TF_LITE_ENSURE_TYPES_EQ(context, cell_layer_norm_coefficients->type,
-                              kTfLiteFloat32);
-    }
-    const TfLiteEvalTensor* output_layer_norm_coefficients =
-        tflite::micro::GetEvalInput(
-            context, node,
-            micro::lstm::full::kOutputLayerNormCoefficientsTensor);
-
-    TF_LITE_ENSURE_EQ(context, output_layer_norm_coefficients->dims->size, 1);
-    TF_LITE_ENSURE_EQ(context, output_layer_norm_coefficients->dims->data[0],
-                      n_cell);
-    if (is_integer) {
-      TF_LITE_ENSURE_TYPES_EQ(context, output_layer_norm_coefficients->type,
-                              kTfLiteInt16);
-    } else {
-      TF_LITE_ENSURE_TYPES_EQ(context, output_layer_norm_coefficients->type,
-                              kTfLiteFloat32);
-    }
-  }
-
-  return kTfLiteOk;
-}
-
-TfLiteStatus PrecomputeZeroPointTimesWeightWithBias(
-    TfLiteContext* context, int32_t zero_point,
-    const TfLiteTensor* weight_tensor, const TfLiteTensor* bias_tensor,
-    std::unique_ptr<int32_t[]>* output) {
-  if (weight_tensor == nullptr) {
-    return kTfLiteOk;
-  }
-
-  const RuntimeShape& weight_shape = GetTensorShape(weight_tensor);
-  TF_LITE_ENSURE_EQ(context, weight_shape.DimensionsCount(), 2);
-  const int row = weight_shape.Dims(0);
-  const int col = weight_shape.Dims(1);
-  output->reset(new int32_t[row]);
-  if (bias_tensor == nullptr) {
-    memset(output->get(), 0, row * sizeof(int32_t));
-  } else {
-    const int32_t* bias = GetTensorData<int32_t>(bias_tensor);
-    memcpy(output->get(), bias, row * sizeof(int32_t));
-  }
-  if (zero_point != 0) {
-    const int8_t* weight = GetTensorData<int8_t>(weight_tensor);
-    tensor_utils::PortableMatrixScalarMultiplyAccumulate(
-        weight, zero_point, row, col, output->get());
-  }
-  return kTfLiteOk;
-}
-
-TfLiteStatus PopulatePrecomputedZPTimesWeightsWithBias(TfLiteContext* context,
-                                                       OpData* op_data,
-                                                       TfLiteNode* node) {
-  const TfLiteTensor* input;
-  TF_LITE_ENSURE_OK(
-      context,
-      GetInputSafe(context, node, micro::lstm::full::kInputTensor, &input));
-  const TfLiteTensor* output_state =
-      GetVariableInput(context, node, micro::lstm::full::kOutputStateTensor);
-  TF_LITE_ENSURE(context, output_state != nullptr);
-
-  const int32_t input_zero_point = -input->params.zero_point;
-  const int32_t output_state_zero_point = -output_state->params.zero_point;
-
-  const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(
-      context, node, micro::lstm::full::kInputToInputWeightsTensor);
-  const TfLiteTensor* input_to_forget_weights;
-  TF_LITE_ENSURE_OK(context,
-                    GetInputSafe(context, node,
-                                 micro::lstm::full::kInputToForgetWeightsTensor,
-                                 &input_to_forget_weights));
-  const TfLiteTensor* input_to_cell_weights;
-  TF_LITE_ENSURE_OK(
-      context,
-      GetInputSafe(context, node, micro::lstm::full::kInputToCellWeightsTensor,
-                   &input_to_cell_weights));
-  const TfLiteTensor* input_to_output_weights;
-  TF_LITE_ENSURE_OK(context,
-                    GetInputSafe(context, node,
-                                 micro::lstm::full::kInputToOutputWeightsTensor,
-                                 &input_to_output_weights));
-
-  const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor(
-      context, node, micro::lstm::full::kRecurrentToInputWeightsTensor);
-  const TfLiteTensor* recurrent_to_forget_weights;
-  TF_LITE_ENSURE_OK(
-      context, GetInputSafe(context, node,
-                            micro::lstm::full::kRecurrentToForgetWeightsTensor,
-                            &recurrent_to_forget_weights));
-  const TfLiteTensor* recurrent_to_cell_weights;
-  TF_LITE_ENSURE_OK(
-      context, GetInputSafe(context, node,
-                            micro::lstm::full::kRecurrentToCellWeightsTensor,
-                            &recurrent_to_cell_weights));
-  const TfLiteTensor* recurrent_to_output_weights;
-  TF_LITE_ENSURE_OK(
-      context, GetInputSafe(context, node,
-                            micro::lstm::full::kRecurrentToOutputWeightsTensor,
-                            &recurrent_to_output_weights));
-
-  const TfLiteTensor* projection_weights = GetOptionalInputTensor(
-      context, node, lstm::full::kProjectionWeightsTensor);
-  const TfLiteTensor* projection_bias = GetOptionalInputTensor(
-      context, node, micro::lstm::full::kProjectionBiasTensor);
-
-  lstm_eval::IntegerLstmParameter* integer_lstm_params =
-      &op_data->integer_lstm_param;
-
-  TfLiteTensor* intermediate =
-      context->GetTensor(context, node->intermediates->data[4]);
-  const auto* params =
-      static_cast<TfLiteAffineQuantization*>(intermediate->quantization.params);
-  const int32_t hidden_zp = params->zero_point->data[0];
-
-  // Get bias and perform zero point calculation.
-  // When there is layer normalization, the gate bias does not apply to matmul
-  // directly:
-  //      y = ln(w * x + w * r + w * c) + b.
-  const bool is_layer_norm = op_data->use_layer_norm;
-
-  // Forget gate.
-  const TfLiteTensor* forget_gate_bias =
-      is_layer_norm
-          ? nullptr
-          : GetInput(context, node, micro::lstm::full::kForgetGateBiasTensor);
-  TF_LITE_ENSURE_OK(
-      context,
-      PrecomputeZeroPointTimesWeightWithBias(
-          context, input_zero_point, input_to_forget_weights, forget_gate_bias,
-          &(integer_lstm_params->input_to_forget_effective_bias)));
-
-  TF_LITE_ENSURE_OK(
-      context,
-      PrecomputeZeroPointTimesWeightWithBias(
-          context, output_state_zero_point, recurrent_to_forget_weights,
-          nullptr, &(integer_lstm_params->recurrent_to_forget_effective_bias)));
-
-  // Modulation gate.
-  const TfLiteTensor* cell_gate_bias =
-      is_layer_norm
-          ? nullptr
-          : GetInput(context, node, micro::lstm::full::kCellGateBiasTensor);
-  TF_LITE_ENSURE_OK(
-      context,
-      PrecomputeZeroPointTimesWeightWithBias(
-          context, input_zero_point, input_to_cell_weights, cell_gate_bias,
-          &(integer_lstm_params->input_to_cell_effective_bias)));
-  TF_LITE_ENSURE_OK(
-      context,
-      PrecomputeZeroPointTimesWeightWithBias(
-          context, output_state_zero_point, recurrent_to_cell_weights, nullptr,
-          &(integer_lstm_params->recurrent_to_cell_effective_bias)));
-
-  // Output gate.
-  const TfLiteTensor* output_gate_bias =
-      is_layer_norm
-          ? nullptr
-          : GetInput(context, node, micro::lstm::full::kOutputGateBiasTensor);
-  TF_LITE_ENSURE_OK(
-      context,
-      PrecomputeZeroPointTimesWeightWithBias(
-          context, input_zero_point, input_to_output_weights, output_gate_bias,
-          &(integer_lstm_params->input_to_output_effective_bias)));
-
-  TF_LITE_ENSURE_OK(
-      context,
-      PrecomputeZeroPointTimesWeightWithBias(
-          context, output_state_zero_point, recurrent_to_output_weights,
-          nullptr, &(integer_lstm_params->recurrent_to_output_effective_bias)));
-
-  // Input gate. The calculation is only meaningful for non-cifg case.
-  const TfLiteTensor* input_gate_bias =
-      is_layer_norm
-          ? nullptr
-          : GetInput(context, node, micro::lstm::full::kInputGateBiasTensor);
-  TF_LITE_ENSURE_OK(
-      context,
-      PrecomputeZeroPointTimesWeightWithBias(
-          context, input_zero_point, input_to_input_weights, input_gate_bias,
-          &(integer_lstm_params->input_to_input_effective_bias)));
-  TF_LITE_ENSURE_OK(
-      context,
-      PrecomputeZeroPointTimesWeightWithBias(
-          context, output_state_zero_point, recurrent_to_input_weights, nullptr,
-          &(integer_lstm_params->recurrent_to_input_effective_bias)));
-
-  // Projection bias. The calculation is only meaningful for with projection.
-  TF_LITE_ENSURE_OK(context,
-                    PrecomputeZeroPointTimesWeightWithBias(
-                        context, hidden_zp, projection_weights, projection_bias,
-                        &(integer_lstm_params->projection_effective_bias)));
-  return kTfLiteOk;
-}
-
-// Resize the output and  state tensors based on the sizes of the input tensors.
-// Allocate a temporary scratch tensor. Also check that the sizes of the input
-// tensors match each other.
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
-  OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
-  // const int scratch_tensor_index = op_data->scratch_tensor_index;
-
-  // Check we have all the inputs and outputs we need.
-  bool use_layer_norm = false;
-  if (node->inputs->size == 24) {
-    const TfLiteTensor* forget_layer_norm_coefficients = GetOptionalInputTensor(
-        context, node, micro::lstm::full::kForgetLayerNormCoefficientsTensor);
-    if (forget_layer_norm_coefficients == nullptr) {
-      use_layer_norm = false;
-    } else {
-      use_layer_norm = true;
-    }
-  } else if (node->inputs->size == 20) {
-    // This is deprecated and is only kept here for backward compatibility.
-    use_layer_norm = false;
-  } else {
-    MicroPrintf("The LSTM Full kernel expects 20 or 24 inputs. Got %d inputs",
-                node->inputs->size);
-    return kTfLiteError;
-  }
-  TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
-  op_data->use_layer_norm = use_layer_norm;
-
-  // Inferring batch size, number of outputs and sequence length and
-  // number of cells from the input tensors.
-  const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(
-      context, node, micro::lstm::full::kInputTensor);
-  const bool is_integer = input->type == kTfLiteInt8;
-  TF_LITE_ENSURE(context, input->dims->size > 1);
-  const auto* params =
-      reinterpret_cast<TfLiteUnidirectionalSequenceLSTMParams*>(
-          node->builtin_data);
-  const bool time_major = params->time_major;
-  const int n_batch = time_major ? input->dims->data[1] : input->dims->data[0];
-  const int n_input = input->dims->data[2];
-  const TfLiteEvalTensor* input_to_output_weights = tflite::micro::GetEvalInput(
-      context, node, micro::lstm::full::kInputToOutputWeightsTensor);
-  const int n_cell = input_to_output_weights->dims->data[0];
-  TF_LITE_ENSURE_EQ(context, input_to_output_weights->dims->size, 2);
-  TF_LITE_ENSURE_EQ(context, input_to_output_weights->dims->data[1], n_input);
-  const TfLiteEvalTensor* recurrent_to_output_weights =
-      tflite::micro::GetEvalInput(
-          context, node, micro::lstm::full::kRecurrentToOutputWeightsTensor);
-
-  TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->dims->size, 2);
-  TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->dims->data[0],
-                    n_cell);
-  const int n_output = recurrent_to_output_weights->dims->data[1];
-
-  // Check that input tensor dimensions matches with each other.
-  TF_LITE_ENSURE_OK(
-      context, CheckInputTensorDimensions(context, node, n_input, n_output,
-                                          n_cell, use_layer_norm, is_integer));
-  // Get the pointer to output, output_state and cell_state buffer tensors.
-  //  TfLiteEvalTensor* output =
-  //      tflite::micro::GetEvalOutput(context, node,
-  //      micro::lstm::full::kOutputTensor);
-  TfLiteEvalTensor* output_state = tflite::micro::GetMutableEvalInput(
-      context, node, micro::lstm::full::kOutputStateTensor);
-  TFLITE_DCHECK(output_state != nullptr);
-  TfLiteEvalTensor* cell_state = tflite::micro::GetMutableEvalInput(
-      context, node, micro::lstm::full::kCellStateTensor);
-  TFLITE_DCHECK(cell_state != nullptr);
-  // Check the shape of input state tensors.
-  // These tensor may be 1D or 2D. It's fine as long as the total size is
-  // correct.
-  TF_LITE_ENSURE_EQ(context, NumElements(output_state->dims),
-                    n_batch * n_output);
-  TF_LITE_ENSURE_EQ(context, NumElements(cell_state->dims), n_batch * n_cell);
-
-  if (is_integer) {
-    const int num_intermediate_tensors = node->intermediates->size;
-    TF_LITE_ENSURE(context, num_intermediate_tensors == 5);
-  }
-
-  if (is_integer) {
-    // Integer UnidirectionalSequenceLSTM prepare function for 8x8->16.
-    // This code path needs 5 intermediate tensors per Op.
-    // Populate quantization parameters.
-    PopulateQuantizedLstmParams8x8_16(context, node,
-                                      &op_data->integer_lstm_param);
-    // Allocate scratch buffer. Need 6 16bit buffer with size n_batch * n_cell
-    // and 1 8bit buffer with size n_batch * n_cell. We also need 1 32 bit
-    // buffer with size n_batch * n_cell.
-    //
-    // Handle cifg case as well, which might save one buffer.
-
-    int scratch_idx = 0;
-
-    context->RequestScratchBufferInArena(
-        context, n_batch * n_cell * sizeof(int32_t), &(scratch_idx));
-    op_data->scratch_tensor_index = scratch_idx;
-
-    for (int scratch_index = 1; scratch_index < 6; ++scratch_index) {
-      // node->temporaries->data[scratch_index] = op_data->scratch_tensor_index
-      // + scratch_index;
-      context->RequestScratchBufferInArena(
-          context, n_batch * n_cell * sizeof(int32_t), &(scratch_idx));
-      TFLITE_DCHECK(scratch_idx ==
-                    (op_data->scratch_tensor_index + scratch_index));
-    }
-
-    // Populate precomputed zp * weight.
-    TF_LITE_ENSURE_OK(context, PopulatePrecomputedZPTimesWeightsWithBias(
-                                   context, op_data, node));
-  }
-
-  return kTfLiteOk;
-}
-
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
-  const auto* params =
-      reinterpret_cast<TfLiteUnidirectionalSequenceLSTMParams*>(
-          node->builtin_data);
-  const OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
-  //  const bool use_layer_norm = op_data->use_layer_norm;
-  //  const bool time_major = params->time_major;
-
-  const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(
-      context, node, micro::lstm::full::kInputTensor);
-  const TfLiteEvalTensor* input_to_input_weights = tflite::micro::GetEvalInput(
-      context, node, micro::lstm::full::kInputToInputWeightsTensor);
-  const TfLiteEvalTensor* input_to_forget_weights = tflite::micro::GetEvalInput(
-      context, node, micro::lstm::full::kInputToForgetWeightsTensor);
-  const TfLiteEvalTensor* input_to_cell_weights = tflite::micro::GetEvalInput(
-      context, node, micro::lstm::full::kInputToCellWeightsTensor);
-  const TfLiteEvalTensor* input_to_output_weights = tflite::micro::GetEvalInput(
-      context, node, micro::lstm::full::kInputToOutputWeightsTensor);
-  const TfLiteEvalTensor* recurrent_to_input_weights =
-      tflite::micro::GetEvalInput(
-          context, node, micro::lstm::full::kRecurrentToInputWeightsTensor);
-  const TfLiteEvalTensor* recurrent_to_forget_weights =
-      tflite::micro::GetEvalInput(
-          context, node, micro::lstm::full::kRecurrentToForgetWeightsTensor);
-  const TfLiteEvalTensor* recurrent_to_cell_weights =
-      tflite::micro::GetEvalInput(
-          context, node, micro::lstm::full::kRecurrentToCellWeightsTensor);
-  const TfLiteEvalTensor* recurrent_to_output_weights =
-      tflite::micro::GetEvalInput(
-          context, node, micro::lstm::full::kRecurrentToOutputWeightsTensor);
-  const TfLiteEvalTensor* cell_to_input_weights = context->GetEvalTensor(
-      context,
-      node->inputs->data[micro::lstm::full::kCellToInputWeightsTensor]);
-  const TfLiteEvalTensor* cell_to_forget_weights = context->GetEvalTensor(
-      context,
-      node->inputs->data[micro::lstm::full::kCellToForgetWeightsTensor]);
-  const TfLiteEvalTensor* cell_to_output_weights = context->GetEvalTensor(
-      context,
-      node->inputs->data[micro::lstm::full::kCellToOutputWeightsTensor]);
-  const TfLiteEvalTensor* input_gate_bias = context->GetEvalTensor(
-      context, node->inputs->data[micro::lstm::full::kInputGateBiasTensor]);
-
-  const TfLiteEvalTensor* forget_gate_bias = context->GetEvalTensor(
-      context, node->inputs->data[micro::lstm::full::kForgetGateBiasTensor]);
-  const TfLiteEvalTensor* cell_gate_bias = context->GetEvalTensor(
-      context, node->inputs->data[micro::lstm::full::kCellGateBiasTensor]);
-  const TfLiteEvalTensor* output_gate_bias = context->GetEvalTensor(
-      context, node->inputs->data[micro::lstm::full::kOutputGateBiasTensor]);
-
-  const TfLiteEvalTensor* projection_weights = context->GetEvalTensor(
-      context, node->inputs->data[micro::lstm::full::kProjectionWeightsTensor]);
-  const TfLiteEvalTensor* projection_bias = context->GetEvalTensor(
-      context, node->inputs->data[micro::lstm::full::kProjectionBiasTensor]);
-
-  TfLiteEvalTensor* output_state = context->GetEvalTensor(
-      context, node->inputs->data[micro::lstm::full::kOutputStateTensor]);
-  TFLITE_DCHECK(output_state != nullptr);
-  TfLiteEvalTensor* cell_state = context->GetEvalTensor(
-      context, node->inputs->data[micro::lstm::full::kCellStateTensor]);
-  TFLITE_DCHECK(cell_state != nullptr);
-  const TfLiteEvalTensor* input_layer_norm_coefficients =
-      context->GetEvalTensor(
-          context,
-          node->inputs
-              ->data[micro::lstm::full::kInputLayerNormCoefficientsTensor]);
-
-  const TfLiteEvalTensor* forget_layer_norm_coefficients =
-      context->GetEvalTensor(
-          context,
-          node->inputs
-              ->data[micro::lstm::full::kForgetLayerNormCoefficientsTensor]);
-  const TfLiteEvalTensor* cell_layer_norm_coefficients = context->GetEvalTensor(
-      context,
-      node->inputs->data[micro::lstm::full::kCellLayerNormCoefficientsTensor]);
-
-  const TfLiteEvalTensor* output_layer_norm_coefficients =
-      context->GetEvalTensor(
-          context,
-          node->inputs
-              ->data[micro::lstm::full::kOutputLayerNormCoefficientsTensor]);
-
-  TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(
-      context, node, micro::lstm::full::kOutputTensor);
-
-  // Copy out the LSTM specific params so they can be passed in the function.
-  TfLiteLSTMParams lstm_params;
-  lstm_params.activation = params->activation;
-  lstm_params.cell_clip = params->cell_clip;
-  lstm_params.proj_clip = params->proj_clip;
-  lstm_params.asymmetric_quantize_inputs = params->asymmetric_quantize_inputs;
-  switch (input_to_output_weights->type) {
-    case kTfLiteInt8: {
-      const bool is_hybrid = input->type == kTfLiteFloat32;
-      if (is_hybrid) {
-        MicroPrintf(" hybrid type is not supported.");
-        return kTfLiteError;
-
-      } else {
-        TfLiteEvalTensor* scratch[6];
-        // Allocate scratch buffer. Need 6 16bit buffer with size n_batch *
-        // n_cell
-        // and 1 8bit buffer with size n_batch * n_cell. We also need 1 32 bit
-        // buffer with size n_batch * n_cell.
-        //
-        // Handle cifg case as well, which might save one buffer.
-
-        const auto* tmp_params =
-            reinterpret_cast<TfLiteUnidirectionalSequenceLSTMParams*>(
-                node->builtin_data);
-        const bool time_major = tmp_params->time_major;
-        for (int scratch_index = 0; scratch_index < 6; ++scratch_index) {
-          TFLITE_DCHECK(context != nullptr);
-          TFLITE_DCHECK(context->GetScratchBuffer != nullptr);
-          int32_t* scratch_tensor =
-              static_cast<int32_t*>(context->GetScratchBuffer(
-                  context, op_data->scratch_tensor_index + scratch_index));
-          scratch[scratch_index] = (TfLiteEvalTensor*)scratch_tensor;
-        }
-        /*
-                                TF_LITE_ENSURE_OK(context,
-                                                GetScratchSafe(context, node, 0,
-           &scratch0));
-
-                                TF_LITE_ENSURE_OK(context,
-                                                GetScratchSafe(context, node, 1,
-           &scratch1));
-
-                                TF_LITE_ENSURE_OK(context,
-                                                GetScratchSafe(context, node, 2,
-           &scratch2));
-
-                                TF_LITE_ENSURE_OK(context,
-                                                GetScratchSafe(context, node, 3,
-           &scratch3));
-
-                                TF_LITE_ENSURE_OK(context,
-                                                GetScratchSafe(context, node, 4,
-           &scratch4));
-
-                                TF_LITE_ENSURE_OK(context,
-                                                GetScratchSafe(context, node, 5,
-           &scratch5));
-        */
-        return lstm_eval::EvalInteger8x8_16(
-            context, node, input, input_to_input_weights,
-            input_to_forget_weights, input_to_cell_weights,
-            input_to_output_weights, recurrent_to_input_weights,
-            recurrent_to_forget_weights, recurrent_to_cell_weights,
-            recurrent_to_output_weights, cell_to_input_weights,
-            cell_to_forget_weights, cell_to_output_weights,
-            input_layer_norm_coefficients, forget_layer_norm_coefficients,
-            cell_layer_norm_coefficients, output_layer_norm_coefficients,
-            input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias,
-            projection_weights, projection_bias, &lstm_params,
-            /*forward_sequence=*/true, time_major, &op_data->integer_lstm_param,
-            output_state, cell_state, output, scratch[0], scratch[1],
-            scratch[2], scratch[3], scratch[4], scratch[5]);
-      }
-    }
-
-    default:
-      MicroPrintf("Type %s is not currently supported.",
-                  TfLiteTypeGetName(input_to_output_weights->type));
-      return kTfLiteError;
-  }
-  return kTfLiteOk;
-}
-//}  // namespace unidirectional_sequence_lstm
-
-}  // namespace micro
-}  // namespace ops
-
 TFLMRegistration Register_UNIDIRECTIONAL_SEQUENCE_LSTM() {
-  return tflite::micro::RegisterOp(ops::micro::Init, ops::micro::Prepare,
-                                   ops::micro::Eval);
+  return tflite::micro::RegisterOp(UnidirectionalSequenceLstmInit,
+                                   UnidirectionalSequenceLstmPrepare,
+                                   UnidirectionalSequenceLstmEval);
 }
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/xtensa/xtensa.h b/tensorflow/lite/micro/kernels/xtensa/xtensa.h
index 47820d3..604736d 100644
--- a/tensorflow/lite/micro/kernels/xtensa/xtensa.h
+++ b/tensorflow/lite/micro/kernels/xtensa/xtensa.h
@@ -22,13 +22,13 @@
 #include "tensorflow/lite/micro/kernels/xtensa/hifimini/fixedpoint_utils.h"
 #endif  // defined(HIFMINI)
 
-#if defined(HIFI4) || defined(HIFI5)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 #include "include/nnlib/xa_nnlib_api.h"
 #include "include/nnlib/xa_nnlib_standards.h"
 
 #define ALIGNED_SIZE(x, bytes) (((x) + (bytes - 1)) & (~(bytes - 1)))
 #define ALIGN_PTR(x, bytes) ((((unsigned)(x)) + (bytes - 1)) & (~(bytes - 1)))
-#endif  // defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 
 #if defined(VISION_P6)
 #include "utils.h"
diff --git a/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h b/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h
index 355f022..f804a6d 100644
--- a/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h
+++ b/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h
@@ -25,9 +25,9 @@
 struct XtensaConvOpData {
   OpDataConv reference_op_data;
 
-#if defined(HIFI4) || defined(HIFI5)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
   int scratch_tensor_index;
-#endif  // defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 
 #if defined(VISION_P6)
   int8_t* reorder_coefficient_bias;  // buffers used to keep reordered coeff and
@@ -36,30 +36,30 @@
   int8_t* per_channel_output_shift_int8;
   uint8_t* p_context;  // persistent lib context for this instance saved here
   uint32_t context_size;
+  bool is_per_channel_quantized;
 #endif  // VISION_P6
 };
 
-#if defined(HIFI4) || defined(HIFI5)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 TfLiteStatus ConvPrepareHifi(TfLiteContext* context, TfLiteNode* node);
 
-TfLiteStatus ConvEvalHifi(TfLiteContext* context, TfLiteNode* node,
-                          const TfLiteConvParams& params,
-                          const XtensaConvOpData& data,
-                          const TfLiteEvalTensor* input,
-                          const TfLiteEvalTensor* filter,
-                          const TfLiteEvalTensor* bias,
-                          TfLiteEvalTensor* output);
-#endif  // defined(HIFI4) || defined(HIFI5)
+TfLiteStatus ConvEvalHifiInt8(TfLiteContext* context, TfLiteNode* node,
+                              const TfLiteConvParams& params,
+                              const XtensaConvOpData& data,
+                              const TfLiteEvalTensor* input,
+                              const TfLiteEvalTensor* filter,
+                              const TfLiteEvalTensor* bias,
+                              TfLiteEvalTensor* output);
 
-#if defined(HIFI4)
-TfLiteStatus ConvEvalHifi16(TfLiteContext* context, TfLiteNode* node,
-                            const TfLiteConvParams& params,
-                            const XtensaConvOpData& data,
-                            const TfLiteEvalTensor* input,
-                            const TfLiteEvalTensor* filter,
-                            const TfLiteEvalTensor* bias,
-                            TfLiteEvalTensor* output);
-#endif  // defined(HIFI4)
+TfLiteStatus ConvEvalHifiInt16(TfLiteContext* context, TfLiteNode* node,
+                               const TfLiteConvParams& params,
+                               const XtensaConvOpData& data,
+                               const TfLiteEvalTensor* input,
+                               const TfLiteEvalTensor* filter,
+                               const TfLiteEvalTensor* bias,
+                               TfLiteEvalTensor* output);
+
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 
 #if defined(VISION_P6)
 
@@ -79,6 +79,9 @@
 
 TfLiteStatus ConvReferenceEvalInt16(TfLiteContext* context, TfLiteNode* node);
 
+void* ConvInitXtensa(TfLiteContext* context, const char* buffer, size_t length);
+TfLiteStatus ConvPrepareXtensa(TfLiteContext* context, TfLiteNode* node);
+
 }  // namespace tflite
 
 #endif  // TENSORFLOW_LITE_MICRO_KERNELS_XTENSA_XTENSA_CONV_H_
diff --git a/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h b/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h
index ca15719..7d0d765 100644
--- a/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h
+++ b/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h
@@ -25,9 +25,9 @@
 struct XtensaDepthwiseConvOpData {
   OpDataConv reference_op_data;
 
-#if defined(HIFI4) || defined(HIFI5)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
   int scratch_tensor_index;
-#endif  // defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 
 #if defined(VISION_P6)
   int8_t* reorder_coefficient_bias;  // buffers used to keep reordered coeff and
@@ -39,7 +39,7 @@
 #endif  // VISION_P6
 };
 
-#if defined(HIFI4) || defined(HIFI5)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 TfLiteStatus DepthwiseConvPrepareHifi(TfLiteContext* context, TfLiteNode* node);
 
 TfLiteStatus DepthwiseConvEvalHifi(TfLiteContext* context, TfLiteNode* node,
@@ -52,7 +52,7 @@
 
 TfLiteStatus DepthwiseConvReferenceEvalInt8(TfLiteContext* context,
                                             TfLiteNode* node);
-#endif  // defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 
 #if defined(VISION_P6)
 
diff --git a/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h b/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h
index 7d0d461..d7e6a14 100644
--- a/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h
+++ b/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h
@@ -22,12 +22,12 @@
 
 namespace tflite {
 
-#if defined(HIFI4) || defined(HIFI5)
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 struct XtensaSoftmaxOpData {
   SoftmaxParams params;
   int scratch_tensor_index;
 };
-#endif  // defined(HIFI4) || defined(HIFI5)
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
 
 #if defined(VISION_P6)
 struct XtensaSoftmaxOpData {
diff --git a/tensorflow/lite/micro/kernels/zeros_like.cc b/tensorflow/lite/micro/kernels/zeros_like.cc
index 597e50e..eb1f9c6 100644
--- a/tensorflow/lite/micro/kernels/zeros_like.cc
+++ b/tensorflow/lite/micro/kernels/zeros_like.cc
@@ -25,7 +25,7 @@
 constexpr int kInputTensor = 0;
 constexpr int kOutputTensor = 0;
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ZerosLikePrepare(TfLiteContext* context, TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
 
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
@@ -50,7 +50,7 @@
   }
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ZerosLikeEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kInputTensor);
   TfLiteEvalTensor* output =
@@ -82,7 +82,7 @@
 }  // namespace
 
 TFLMRegistration Register_ZEROS_LIKE() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, ZerosLikePrepare, ZerosLikeEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/memory_arena_threshold_test.cc b/tensorflow/lite/micro/memory_arena_threshold_test.cc
index f6bb24f..3017f56 100644
--- a/tensorflow/lite/micro/memory_arena_threshold_test.cc
+++ b/tensorflow/lite/micro/memory_arena_threshold_test.cc
@@ -97,7 +97,7 @@
 // Tail size contributed by the conv model excluding the
 // RecordingMicroAllocator's overhead
 // TODO(b/207157610): replace magic number that depends on OPs
-constexpr int kTestConvModelOnlyTailSize = 1744;
+constexpr int kTestConvModelOnlyTailSize = 1816;
 constexpr int kTestConvModelPersistentTfLiteTensorDataSize = 128;
 constexpr int kTestConvModelPersistentBufferDataSize = 728;
 #else
@@ -108,7 +108,7 @@
 // Tail size contributed by the conv model excluding the
 // RecordingMicroAllocator's overhead
 // TODO(b/207157610): replace magic number that depends on OPs
-constexpr int kTestConvModelOnlyTailSize = 2016;
+constexpr int kTestConvModelOnlyTailSize = 2088;
 constexpr int kTestConvModelPersistentTfLiteTensorDataSize = 224;
 constexpr int kTestConvModelPersistentBufferDataSize = 720;
 #endif
diff --git a/tensorflow/lite/micro/memory_helpers.cc b/tensorflow/lite/micro/memory_helpers.cc
index 685f04b..94a6fe3 100644
--- a/tensorflow/lite/micro/memory_helpers.cc
+++ b/tensorflow/lite/micro/memory_helpers.cc
@@ -50,6 +50,9 @@
     case kTfLiteFloat16:
       *size = sizeof(int16_t);
       break;
+    case kTfLiteBFloat16:
+      *size = sizeof(int16_t);
+      break;
     case kTfLiteFloat32:
       *size = sizeof(float);
       break;
diff --git a/tensorflow/lite/micro/memory_helpers_test.cc b/tensorflow/lite/micro/memory_helpers_test.cc
index e44c586..9da2940 100644
--- a/tensorflow/lite/micro/memory_helpers_test.cc
+++ b/tensorflow/lite/micro/memory_helpers_test.cc
@@ -180,8 +180,8 @@
                           tflite::TfLiteTypeSizeOf(kTfLiteComplex128, &size));
   TF_LITE_MICRO_EXPECT_EQ(sizeof(double) * 2, size);
 
-  TF_LITE_MICRO_EXPECT_NE(
-      kTfLiteOk, tflite::TfLiteTypeSizeOf(static_cast<TfLiteType>(-1), &size));
+  TF_LITE_MICRO_EXPECT_NE(kTfLiteOk,
+                          tflite::TfLiteTypeSizeOf(kTfLiteNoType, &size));
 }
 
 TF_LITE_MICRO_TEST(TestBytesRequiredForTensor) {
diff --git a/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h b/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h
index ae3705d..b2cdb61 100644
--- a/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h
+++ b/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h
@@ -107,6 +107,11 @@
     return per_buffer_size;
   }
 
+  // Returns False because the GreedyMemoryPlanner doesn't preserves all tensors
+  // after invocation. Do to the fact that tensors that tensor data for tensors
+  // that aren't being used during a phase of invocation are overwritten.
+  bool preserves_all_tensors() const override { return false; }
+
  private:
   // Whether a buffer is active in a given time range.
   bool DoesEntryOverlapInTime(const ListEntry* entry, const int first_time_used,
diff --git a/tensorflow/lite/micro/memory_planner/linear_memory_planner.cc b/tensorflow/lite/micro/memory_planner/linear_memory_planner.cc
index 5c6afb5..00d707a 100644
--- a/tensorflow/lite/micro/memory_planner/linear_memory_planner.cc
+++ b/tensorflow/lite/micro/memory_planner/linear_memory_planner.cc
@@ -19,6 +19,9 @@
 
 namespace tflite {
 
+// C++11 requires defining a constexpr static class member in a .cc file
+constexpr int tflite::LinearMemoryPlanner::kMaxBufferCount;
+
 LinearMemoryPlanner::LinearMemoryPlanner()
     : current_buffer_count_(0), next_free_offset_(0) {}
 LinearMemoryPlanner::~LinearMemoryPlanner() {}
diff --git a/tensorflow/lite/micro/memory_planner/linear_memory_planner.h b/tensorflow/lite/micro/memory_planner/linear_memory_planner.h
index d4938dd..9850569 100644
--- a/tensorflow/lite/micro/memory_planner/linear_memory_planner.h
+++ b/tensorflow/lite/micro/memory_planner/linear_memory_planner.h
@@ -35,6 +35,10 @@
   int GetBufferCount() override;
   TfLiteStatus GetOffsetForBuffer(int buffer_index, int* offset) override;
 
+  // Returns True because the LinearMemoryPlanner preserves all tensors after
+  // invocation.
+  bool preserves_all_tensors() const override { return true; }
+
  private:
   static constexpr int kMaxBufferCount = 1024;
   size_t buffer_offsets_[kMaxBufferCount];
diff --git a/tensorflow/lite/micro/memory_planner/micro_memory_planner.h b/tensorflow/lite/micro/memory_planner/micro_memory_planner.h
index 0bfe693..035f467 100644
--- a/tensorflow/lite/micro/memory_planner/micro_memory_planner.h
+++ b/tensorflow/lite/micro/memory_planner/micro_memory_planner.h
@@ -81,6 +81,10 @@
     return kTfLiteOk;
   }
 
+  // Method will return True if the MicroMemoryPlanner preserves all tensors
+  // after invocation, and False if it doesn't.
+  virtual bool preserves_all_tensors() const = 0;
+
   virtual void PrintMemoryPlan() {
     // Default does nothing.
   }
diff --git a/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h b/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h
index 8f9bb26..13a3fad 100644
--- a/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h
+++ b/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h
@@ -115,6 +115,10 @@
   size_t GetMaximumMemorySize() override;
   int GetBufferCount() override;
 
+  // Returns False because the NonPersistentMemoryPlannerShim doesn't preserves
+  // all tensors after invocation.
+  bool preserves_all_tensors() const override { return false; }
+
  private:
   const BufferPlan* buffer_plan_;  // not owned, can't be null
 
diff --git a/tensorflow/lite/micro/micro_allocation_info.h b/tensorflow/lite/micro/micro_allocation_info.h
index 688d04e..c0c7bd5 100644
--- a/tensorflow/lite/micro/micro_allocation_info.h
+++ b/tensorflow/lite/micro/micro_allocation_info.h
@@ -129,7 +129,8 @@
 
   const tflite::Model* model_ = nullptr;
   INonPersistentBufferAllocator* non_persistent_allocator_ = nullptr;
-  GraphAllocationInfo info_;
+  GraphAllocationInfo info_ =
+      {};  // Prevents problems caused by accessing uninitialized memory.
   int allocation_scope_count_ = 0;
 };
 
diff --git a/tensorflow/lite/micro/micro_allocator.cc b/tensorflow/lite/micro/micro_allocator.cc
index ba7cb66..930da75 100644
--- a/tensorflow/lite/micro/micro_allocator.cc
+++ b/tensorflow/lite/micro/micro_allocator.cc
@@ -28,13 +28,13 @@
 #include "tensorflow/lite/micro/flatbuffer_utils.h"
 #include "tensorflow/lite/micro/memory_helpers.h"
 #include "tensorflow/lite/micro/memory_planner/greedy_memory_planner.h"
+#include "tensorflow/lite/micro/memory_planner/linear_memory_planner.h"
 #include "tensorflow/lite/micro/memory_planner/micro_memory_planner.h"
 #include "tensorflow/lite/micro/micro_allocation_info.h"
 #include "tensorflow/lite/micro/micro_arena_constants.h"
 #include "tensorflow/lite/micro/micro_log.h"
 #include "tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h"
 #include "tensorflow/lite/schema/schema_generated.h"
-#include "tensorflow/lite/schema/schema_utils.h"
 
 namespace tflite {
 
@@ -71,6 +71,29 @@
   IPersistentBufferAllocator* persistent_allocator_;
 };
 
+MicroMemoryPlanner* CreateMemoryPlanner(
+    MemoryPlannerType memory_planner_type,
+    IPersistentBufferAllocator* memory_allocator) {
+  MicroMemoryPlanner* memory_planner = nullptr;
+  uint8_t* memory_planner_buffer = nullptr;
+
+  switch (memory_planner_type) {
+    case MemoryPlannerType::kLinear: {
+      memory_planner_buffer = memory_allocator->AllocatePersistentBuffer(
+          sizeof(LinearMemoryPlanner), alignof(LinearMemoryPlanner));
+      memory_planner = new (memory_planner_buffer) LinearMemoryPlanner();
+      break;
+    }
+    case MemoryPlannerType::kGreedy: {
+      memory_planner_buffer = memory_allocator->AllocatePersistentBuffer(
+          sizeof(GreedyMemoryPlanner), alignof(GreedyMemoryPlanner));
+      memory_planner = new (memory_planner_buffer) GreedyMemoryPlanner();
+      break;
+    }
+  }
+  return memory_planner;
+}
+
 TfLiteStatus CreatePlan(MicroMemoryPlanner* planner,
                         const AllocationInfo* allocation_info,
                         size_t allocation_info_size) {
@@ -374,8 +397,8 @@
   return Create(memory_allocator, memory_planner);
 }
 
-MicroAllocator* MicroAllocator::Create(uint8_t* tensor_arena,
-                                       size_t arena_size) {
+MicroAllocator* MicroAllocator::Create(uint8_t* tensor_arena, size_t arena_size,
+                                       MemoryPlannerType memory_planner_type) {
   uint8_t* aligned_arena =
       AlignPointerUp(tensor_arena, MicroArenaBufferAlignment());
   size_t aligned_arena_size = tensor_arena + arena_size - aligned_arena;
@@ -384,10 +407,8 @@
 
   // By default create GreedyMemoryPlanner.
   // If a different MemoryPlanner is needed, use the other api.
-  uint8_t* memory_planner_buffer = memory_allocator->AllocatePersistentBuffer(
-      sizeof(GreedyMemoryPlanner), alignof(GreedyMemoryPlanner));
-  GreedyMemoryPlanner* memory_planner =
-      new (memory_planner_buffer) GreedyMemoryPlanner();
+  MicroMemoryPlanner* memory_planner =
+      CreateMemoryPlanner(memory_planner_type, memory_allocator);
 
   return Create(memory_allocator, memory_planner);
 }
@@ -408,7 +429,8 @@
 MicroAllocator* MicroAllocator::Create(uint8_t* persistent_tensor_arena,
                                        size_t persistent_arena_size,
                                        uint8_t* non_persistent_tensor_arena,
-                                       size_t non_persistent_arena_size) {
+                                       size_t non_persistent_arena_size,
+                                       MemoryPlannerType memory_planner_type) {
   TFLITE_DCHECK(persistent_tensor_arena != nullptr);
   TFLITE_DCHECK(non_persistent_tensor_arena != nullptr);
   TFLITE_DCHECK(persistent_tensor_arena != non_persistent_tensor_arena);
@@ -421,11 +443,22 @@
                                         non_persistent_arena_size,
                                         persistent_buffer_allocator);
 
-  uint8_t* memory_planner_buffer =
-      persistent_buffer_allocator->AllocatePersistentBuffer(
-          sizeof(GreedyMemoryPlanner), alignof(GreedyMemoryPlanner));
-  GreedyMemoryPlanner* memory_planner =
-      new (memory_planner_buffer) GreedyMemoryPlanner();
+  // TODO(b/297821738): this should be changed to CreateMemoryPlanner if
+  // possible once  it's figured out why it breaks the HifiMini Build
+  uint8_t* memory_planner_buffer = nullptr;
+  MicroMemoryPlanner* memory_planner = nullptr;
+
+  if (memory_planner_type == MemoryPlannerType::kGreedy) {
+    memory_planner_buffer =
+        persistent_buffer_allocator->AllocatePersistentBuffer(
+            sizeof(GreedyMemoryPlanner), alignof(GreedyMemoryPlanner));
+    memory_planner = new (memory_planner_buffer) GreedyMemoryPlanner();
+  } else if (memory_planner_type == MemoryPlannerType::kLinear) {
+    memory_planner_buffer =
+        persistent_buffer_allocator->AllocatePersistentBuffer(
+            sizeof(LinearMemoryPlanner), alignof(LinearMemoryPlanner));
+    memory_planner = new (memory_planner_buffer) LinearMemoryPlanner();
+  }
 
   uint8_t* micro_allocator_buffer =
       persistent_buffer_allocator->AllocatePersistentBuffer(
diff --git a/tensorflow/lite/micro/micro_allocator.h b/tensorflow/lite/micro/micro_allocator.h
index 3532577..0231722 100644
--- a/tensorflow/lite/micro/micro_allocator.h
+++ b/tensorflow/lite/micro/micro_allocator.h
@@ -66,6 +66,13 @@
 
 }  // namespace internal
 
+// Enum used to keep track of which MemoryPlanner is being used for
+// MicroAllocater::Create();
+enum class MemoryPlannerType {
+  kGreedy,
+  kLinear,
+};
+
 struct NodeAndRegistration {
   TfLiteNode node;
   const TFLMRegistration* registration;
@@ -117,7 +124,9 @@
   // Note: Please use alignas(16) to make sure tensor_arena is 16
   // bytes aligned, otherwise some head room will be wasted.
   // TODO(b/157615197): Cleanup constructor + factory usage.
-  static MicroAllocator* Create(uint8_t* tensor_arena, size_t arena_size);
+  static MicroAllocator* Create(
+      uint8_t* tensor_arena, size_t arena_size,
+      MemoryPlannerType memory_planner_type = MemoryPlannerType::kGreedy);
 
   // Creates a MicroAllocator instance from a given tensor arena and a given
   // MemoryPlanner. This arena will be managed by the created instance. Note:
@@ -137,14 +146,20 @@
   // SingleArenaBufferAllocator instance and the MemoryPlanner. This allocator
   // instance will use the SingleArenaBufferAllocator instance to manage
   // allocations internally.
-  static MicroAllocator* Create(uint8_t* persistent_tensor_arena,
-                                size_t persistent_arena_size,
-                                uint8_t* non_persistent_tensor_arena,
-                                size_t non_persistent_arena_size);
+  static MicroAllocator* Create(
+      uint8_t* persistent_tensor_arena, size_t persistent_arena_size,
+      uint8_t* non_persistent_tensor_arena, size_t non_persistent_arena_size,
+      MemoryPlannerType memory_planner_type = MemoryPlannerType::kGreedy);
 
   // Returns the fixed amount of memory overhead of MicroAllocator.
   static size_t GetDefaultTailUsage(bool is_memory_planner_given);
 
+  // Returns True if the MicroAllocator uses a LinearMemoryPlanner(is compatible
+  // with the PerserveAllTensors flag / feature ) and False otherwise.
+  bool preserves_all_tensor() const {
+    return memory_planner_->preserves_all_tensors();
+  };
+
   // Allocates internal resources required for model inference for each subgraph
   // from the arena.
   //
@@ -307,7 +322,9 @@
   IPersistentBufferAllocator* persistent_buffer_allocator_;
 
   // Allocator used to allocate persistent builtin data.
-  TfLiteBridgeBuiltinDataAllocator* builtin_data_allocator_;
+  TfLiteBridgeBuiltinDataAllocator* builtin_data_allocator_ =
+      nullptr;  // Initialized as nullptr to prevent any possible issues related
+                // to accessing uninitialized memory.
 
   // Activation buffer memory planner.
   MicroMemoryPlanner* memory_planner_;
diff --git a/tensorflow/lite/micro/micro_common.h b/tensorflow/lite/micro/micro_common.h
index dc0bc08..9ab427f 100644
--- a/tensorflow/lite/micro/micro_common.h
+++ b/tensorflow/lite/micro/micro_common.h
@@ -30,4 +30,9 @@
   const char* custom_name;
 };
 
+struct TFLMInferenceRegistration {
+  TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node);
+  void (*reset)(TfLiteContext* context, void* buffer);
+};
+
 #endif  // THIRD_PARTY_TFLITE_MICRO_TENSORFLOW_LITE_MICRO_MICRO_COMMON_H_
diff --git a/tensorflow/lite/micro/micro_context.cc b/tensorflow/lite/micro/micro_context.cc
index 352d2c0..680dee8 100644
--- a/tensorflow/lite/micro/micro_context.cc
+++ b/tensorflow/lite/micro/micro_context.cc
@@ -1,4 +1,4 @@
-/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -17,48 +17,16 @@
 
 #include <cstdarg>
 #include <cstddef>
-#include <cstdint>
 
 #include "tensorflow/lite/kernels/internal/compatibility.h"
+#include "tensorflow/lite/micro/micro_common.h"
 #include "tensorflow/lite/micro/micro_log.h"
+#include "tensorflow/lite/micro/micro_utils.h"
 
 namespace tflite {
-MicroContext::MicroContext(MicroAllocator* allocator, const Model* model,
-                           MicroGraph* graph)
-    : allocator_(*allocator),
-      graph_(*graph),
-      model_(model),
-      state_(InterpreterState::kInit) {}
+namespace {
 
-MicroContext::~MicroContext() {}
-
-void* MicroContext::AllocatePersistentBuffer(size_t bytes) {
-  TFLITE_DCHECK(state_ == InterpreterState::kPrepare ||
-                state_ == InterpreterState::kInit);
-  return allocator_.AllocatePersistentBuffer(bytes);
-}
-
-TfLiteStatus MicroContext::RequestScratchBufferInArena(size_t bytes,
-                                                       int* buffer_idx) {
-  TFLITE_DCHECK(state_ == InterpreterState::kPrepare);
-  return allocator_.RequestScratchBufferInArena(
-      bytes, graph_.GetCurrentSubgraphIndex(), buffer_idx);
-}
-
-void* MicroContext::GetScratchBuffer(int buffer_idx) {
-  TFLITE_DCHECK(state_ == InterpreterState::kInvoke);
-  ScratchBufferHandle* handle = scratch_buffer_handles_ + buffer_idx;
-  return handle->data;
-}
-
-TfLiteTensor* MicroContext::AllocateTempTfLiteTensor(int tensor_idx) {
-  return allocator_.AllocateTempTfLiteTensor(model_, graph_.GetAllocations(),
-                                             tensor_idx,
-                                             graph_.GetCurrentSubgraphIndex());
-}
-
-int MicroContext::GetTensorIndex(int index, int max_size,
-                                 const int* tensor_indices) {
+int GetTensorIndex(int index, int max_size, const int* tensor_indices) {
   if (index >= 0 && index < max_size) {
     const int tensor_index = tensor_indices[index];
     if (tensor_index != kTfLiteOptionalTensor) {
@@ -68,6 +36,105 @@
   return -1;
 }
 
+#ifdef USE_TFLM_COMPRESSION
+
+struct DecompressionState {
+  DecompressionState() = delete;
+
+  DecompressionState(const uint8_t* compressed_indices,
+                     const size_t count_indices,
+                     const CompressionTensorData& comp_data,
+                     const size_t num_channels)
+      : compressed_indices_(compressed_indices),
+        count_indices_(count_indices),
+        comp_data_(comp_data),
+        num_channels_(num_channels) {}
+
+  template <typename T>
+  T* DecompressToBuffer(void* buffer);
+
+  size_t GetNextTableIndex();
+  void UpdateBufferAndChannelIndex();
+
+ private:
+  const uint8_t* compressed_indices_;
+  const size_t count_indices_;
+  const CompressionTensorData& comp_data_;
+  const size_t num_channels_;
+  const size_t compressed_bit_width_ =
+      comp_data_.data.lut_data->compressed_bit_width;
+  size_t channel_ = 0;
+  size_t index_in_channel_ = 0;
+  const size_t elements_per_channel_ =
+      comp_data_.data.lut_data->use_alternate_axis
+          ? 1
+          : count_indices_ / num_channels_;
+  size_t buffer_index_ = 0;
+  size_t current_offset_ = 0;
+  size_t current_bits_remaining_ = 8;
+  uint8_t current_byte_ = compressed_indices_[0];
+};
+
+template <typename T>
+T* DecompressionState::DecompressToBuffer(void* buffer) {
+  while (buffer_index_ < count_indices_) {
+    const size_t table_index = GetNextTableIndex();
+    static_cast<T*>(buffer)[buffer_index_] =
+        static_cast<const T*>(comp_data_.data.lut_data->value_table)
+            [table_index +
+             (channel_ * comp_data_.data.lut_data->value_table_channel_stride)];
+    UpdateBufferAndChannelIndex();
+  }
+
+  return static_cast<T*>(buffer);
+}
+
+size_t DecompressionState::GetNextTableIndex() {
+  TFLITE_DCHECK(compressed_bit_width_ <= LookupTableData::kMaxBitWidth);
+  TFLITE_DCHECK(compressed_bit_width_ > 0);
+
+  size_t table_index_bits_to_fill = compressed_bit_width_;
+  size_t table_index = 0;
+
+  while (table_index_bits_to_fill > 0) {
+    if (current_bits_remaining_ == 0) {
+      current_offset_++;
+      current_byte_ = compressed_indices_[current_offset_];
+      current_bits_remaining_ = 8;
+    }
+
+    const uint8_t mask_bit_count =
+        std::min(table_index_bits_to_fill,
+                 std::min(compressed_bit_width_, current_bits_remaining_));
+    const uint8_t current_byte_mask = (1 << mask_bit_count) - 1;
+    table_index <<= mask_bit_count;
+    table_index |=
+        (current_byte_ >> (current_bits_remaining_ - mask_bit_count)) &
+        current_byte_mask;
+
+    table_index_bits_to_fill -= mask_bit_count;
+    current_bits_remaining_ -= mask_bit_count;
+  }
+
+  return table_index;
+}
+
+void DecompressionState::UpdateBufferAndChannelIndex() {
+  buffer_index_++;
+  index_in_channel_++;
+  if (index_in_channel_ == elements_per_channel_) {
+    index_in_channel_ = 0;
+    channel_++;
+    if (channel_ == num_channels_) {
+      channel_ = 0;
+    }
+  }
+}
+
+#endif  // USE_TFLM_COMPRESSION
+
+}  // namespace
+
 TfLiteTensor* MicroContext::AllocateTempInputTensor(const TfLiteNode* node,
                                                     int index) {
   const int tensor_index =
@@ -98,46 +165,6 @@
   return AllocateTempTfLiteTensor(tensor_index);
 }
 
-void MicroContext::DeallocateTempTfLiteTensor(TfLiteTensor* tensor) {
-  return allocator_.DeallocateTempTfLiteTensor(tensor);
-}
-
-uint8_t* MicroContext::AllocateTempBuffer(size_t size, size_t alignment) {
-  TFLITE_DCHECK(state_ == InterpreterState::kPrepare);
-  return allocator_.AllocateTempBuffer(size, alignment);
-}
-
-void MicroContext::DeallocateTempBuffer(uint8_t* buffer) {
-  TFLITE_DCHECK(state_ == InterpreterState::kPrepare);
-  allocator_.DeallocateTempBuffer(buffer);
-}
-
-TfLiteEvalTensor* MicroContext::GetEvalTensor(int tensor_idx) {
-  return &graph_.GetAllocations()[graph_.GetCurrentSubgraphIndex()]
-              .tensors[tensor_idx];
-}
-
-void MicroContext::SetScratchBufferHandles(
-    ScratchBufferHandle* scratch_buffer_handles) {
-  scratch_buffer_handles_ = scratch_buffer_handles;
-}
-
-TfLiteStatus MicroContext::set_external_context(
-    void* external_context_payload) {
-  TFLITE_DCHECK(state_ == InterpreterState::kPrepare ||
-                state_ == InterpreterState::kInvoke);
-  if (external_context_payload == nullptr ||
-      external_context_payload_ != nullptr) {
-    MicroPrintf(
-        "Attempting to set external context to %x but it was %x already",
-        external_context_payload, external_context_payload_);
-    return kTfLiteError;
-  }
-
-  external_context_payload_ = external_context_payload;
-  return kTfLiteOk;
-}
-
 void MicroContextReportOpError(struct TfLiteContext* context,
                                const char* format, ...) {
   va_list args;
@@ -146,12 +173,56 @@
   va_end(args);
 }
 
-void MicroContext::SetInterpreterState(MicroContext::InterpreterState state) {
-  state_ = state;
+#ifdef USE_TFLM_COMPRESSION
+
+void* MicroContext::DecompressTensorToScratchBuffer(
+    const TfLiteEvalTensor& tensor,
+    const CompressionTensorData& compression_data, int scratch_buffer_handle) {
+  TFLITE_DCHECK(compression_data.scheme == CompressionScheme::kBinQuant);
+  TFLITE_DCHECK(scratch_buffer_handle != -1);
+  void* scratch_buffer = GetScratchBuffer(scratch_buffer_handle);
+  TFLITE_DCHECK(scratch_buffer != nullptr);
+  size_t count = ElementCount(*tensor.dims);
+  size_t num_channels = 1;
+
+  if (compression_data.data.lut_data->is_per_channel_quantized) {
+    const size_t channel_axis =
+        compression_data.data.lut_data->use_alternate_axis
+            ? tensor.dims->size - 1
+            : 0;
+    num_channels = tensor.dims->data[channel_axis];
+  }
+
+  DecompressionState ds(static_cast<uint8_t*>(tensor.data.data), count,
+                        compression_data, num_channels);
+
+  switch (tensor.type) {
+    case kTfLiteBool: {
+      return ds.DecompressToBuffer<bool>(scratch_buffer);
+    } break;
+    case kTfLiteInt8: {
+      return ds.DecompressToBuffer<int8_t>(scratch_buffer);
+    } break;
+    case kTfLiteInt16: {
+      return ds.DecompressToBuffer<int16_t>(scratch_buffer);
+    } break;
+    case kTfLiteInt32: {
+      return ds.DecompressToBuffer<int32_t>(scratch_buffer);
+    } break;
+    case kTfLiteInt64: {
+      return ds.DecompressToBuffer<int64_t>(scratch_buffer);
+    } break;
+    case kTfLiteFloat32: {
+      return ds.DecompressToBuffer<float>(scratch_buffer);
+    } break;
+    default: {
+      MicroPrintf("Unsupported decompression tensor type %d", tensor.type);
+    } break;
+  }
+
+  return nullptr;
 }
 
-MicroContext::InterpreterState MicroContext::GetInterpreterState() const {
-  return state_;
-}
+#endif  // USE_TFLM_COMPRESSION
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/micro_context.h b/tensorflow/lite/micro/micro_context.h
index 73df9e6..33cad89 100644
--- a/tensorflow/lite/micro/micro_context.h
+++ b/tensorflow/lite/micro/micro_context.h
@@ -1,4 +1,4 @@
-/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -17,9 +17,14 @@
 #define TENSORFLOW_LITE_MICRO_MICRO_CONTEXT_H_
 
 #include "tensorflow/lite/c/common.h"
-#include "tensorflow/lite/micro/micro_allocator.h"
 #include "tensorflow/lite/micro/micro_graph.h"
 
+#ifdef USE_TFLM_COMPRESSION
+
+#include "tensorflow/lite/micro/compression.h"
+
+#endif  // USE_TFLM_COMPRESSION
+
 namespace tflite {
 // TODO(b/149795762): kTfLiteAbort cannot be part of the tflite TfLiteStatus.
 const TfLiteStatus kTfLiteAbort = static_cast<TfLiteStatus>(15);
@@ -32,117 +37,95 @@
 // micro_context-><TFLM kernel API>
 class MicroContext {
  public:
-  // Enum that allows MicroContext to keep track of the stages different memory
-  // planning APIs are available to kernels.
-  enum class InterpreterState {
-    kInit,
-    kPrepare,
-    kMemoryPlanning,
-    kInvoke,
-  };
-
-  // Does not take any ownership, and all pointers must refer to valid objects
-  // that outlive the one constructed.
-  explicit MicroContext(MicroAllocator* allocator, const Model* model,
-                        MicroGraph* graph);
-  virtual ~MicroContext();
+  virtual ~MicroContext() = default;
 
   // Allocate persistent buffer which has the same life time as the interpreter.
   // Returns nullptr on failure.
   // The memory is allocated from the tail.
   // This method is only available in Init or Prepare stage.
-  // Virtual so that it can be faked for kernel tests.
-  virtual void* AllocatePersistentBuffer(size_t bytes);
+  virtual void* AllocatePersistentBuffer(size_t bytes) = 0;
 
   // Request a scratch buffer in the arena through static memory planning.
   // This method is only available in Prepare stage and the buffer is allocated
   // by the interpreter between Prepare and Eval stage. In Eval stage,
   // GetScratchBuffer API can be used to fetch the address.
-  // Virtual so that it can be faked for kernel tests.
   virtual TfLiteStatus RequestScratchBufferInArena(size_t bytes,
-                                                   int* buffer_idx);
+                                                   int* buffer_idx) = 0;
 
   // Get the scratch buffer pointer.
   // This method is only available in Eval stage.
-  // Virtual so that it can be faked for kernel tests.
-  virtual void* GetScratchBuffer(int buffer_idx);
+  virtual void* GetScratchBuffer(int buffer_idx) = 0;
 
   // Returns a temporary TfLiteTensor struct for a given index.
-  // Virtual so that it can be faked for kernel tests.
-  virtual TfLiteTensor* AllocateTempTfLiteTensor(int tensor_idx);
+  virtual TfLiteTensor* AllocateTempTfLiteTensor(int tensor_idx) = 0;
 
   // Returns a temporary TfLiteTensor struct for the specified input tensor of a
   // given mode. This is the recommended API over the deprecated
   // GetInput/GetInputSafe to get a temp input tensor. The returned tensor shall
   // be freed via calling DeallocateTempTfLiteTensor.
-  virtual TfLiteTensor* AllocateTempInputTensor(const TfLiteNode* node,
-                                                int index);
+  TfLiteTensor* AllocateTempInputTensor(const TfLiteNode* node, int index);
 
   // Returns a temporary TfLiteTensor struct for the specified output tensor of
   // a given mode. This is the recommended API over the deprecated
   // GetOutput/GetOutputSafe to get a temp output tensor. The returned tensor
   // shall be freed via calling DeallocateTempTfLiteTensor.
-  virtual TfLiteTensor* AllocateTempOutputTensor(const TfLiteNode* node,
-                                                 int index);
+  TfLiteTensor* AllocateTempOutputTensor(const TfLiteNode* node, int index);
 
   // Returns a temporary TfLiteTensor struct for the specified intermediate
   // tensor of a given mode. This is the recommended API over the deprecated
   // GetIntermediates/GetIntermediatesSafe to get a temp intermediate tensor.
   // The returned tensor shall be freed via calling DeallocateTempTfLiteTensor.
-  virtual TfLiteTensor* AllocateTempIntermediateTensor(const TfLiteNode* node,
-                                                       int index);
+  TfLiteTensor* AllocateTempIntermediateTensor(const TfLiteNode* node,
+                                               int index);
 
   // Deallocates a temp TfLiteTensor.
-  // Virtual so that it can be faked for kernel tests.
-  virtual void DeallocateTempTfLiteTensor(TfLiteTensor* tensor);
+  virtual void DeallocateTempTfLiteTensor(TfLiteTensor* tensor) = 0;
 
   // Returns a pointer to a temporary buffer (from the arena).
   // This API is only valid from the kernel's Prepare function and
   // the buffer's lifetime is also that of the Prepare function.
-  // Virtual so that it can be faked for kernel tests.
-  virtual uint8_t* AllocateTempBuffer(size_t size, size_t alignment);
+  virtual uint8_t* AllocateTempBuffer(size_t size, size_t alignment) = 0;
 
   // Signals that the temporary buffer is no longer needed.
-  // Virtual so that it can be faked for kernel tests.
-  virtual void DeallocateTempBuffer(uint8_t* buffer);
+  virtual void DeallocateTempBuffer(uint8_t* buffer) = 0;
 
   // Returns a TfLiteEvalTensor struct for a given index.
-  // Virtual so that it can be faked for kernel tests.
-  virtual TfLiteEvalTensor* GetEvalTensor(int tensor_idx);
-
-  // Sets the State of MemoryPlanning MicroContext
-  void SetInterpreterState(MicroContext::InterpreterState state);
-
-  // Sets the State of MemoryPlanning MicroContext
-  MicroContext::InterpreterState GetInterpreterState() const;
+  virtual TfLiteEvalTensor* GetEvalTensor(int tensor_idx) = 0;
 
   // Does not take ownership of the pointer and the pointer must refer to valid
   // an object that outlive this class instance.
   // This can only be called once to set one external context.
-  TfLiteStatus set_external_context(void* external_context_payload);
+  virtual TfLiteStatus set_external_context(void* external_context_payload) = 0;
 
-  void* external_context() { return external_context_payload_; }
+  virtual void* external_context() = 0;
 
-  MicroGraph& graph() { return graph_; }
+  virtual MicroGraph& graph() = 0;
 
-  // Sets the pointer to a list of ScratchBufferHandle instances.
-  // Not API between TFLM and kernels. Primarily used by the framework for
-  // housekeeping in MicroContext.
-  void SetScratchBufferHandles(ScratchBufferHandle* scratch_buffer_handles);
+#ifdef USE_TFLM_COMPRESSION
+
+  // Available during Prepare & Eval. Returns false if tensor is not
+  // compressed.
+  virtual bool IsTensorCompressed(const TfLiteNode* node, int tensor_idx) = 0;
+
+  // Only available during Prepare. The kernel is responsible for storing the
+  // scratch buffer handle.
+  virtual int AllocateDecompressionScratchBuffer(const TfLiteNode* node,
+                                                 int tensor_idx) = 0;
+
+  // Available during Prepare & Eval. Returns nullptr if tensor is not
+  // compressed.
+  virtual const CompressionTensorData* GetTensorCompressionData(
+      const TfLiteNode* node, int tensor_idx) = 0;
+
+  // Only available during Eval. Returns nullptr on failure, otherwise returns a
+  // pointer to the scratch buffer.
+  virtual void* DecompressTensorToScratchBuffer(
+      const TfLiteEvalTensor& tensor,
+      const CompressionTensorData& compression_data, int scratch_buffer_handle);
+
+#endif  // USE_TFLM_COMPRESSION
 
  private:
-  // Return the tensor index as tensor_indices[index]. tensor_indices is of
-  // max_size. Return -1 if index is not in the valid range of tensor_indices.
-  int GetTensorIndex(int index, int max_size, const int* tensor_indices);
-
-  MicroAllocator& allocator_;
-  MicroGraph& graph_;
-  const Model* model_;
-  InterpreterState state_;
-
-  ScratchBufferHandle* scratch_buffer_handles_ = nullptr;
-  void* external_context_payload_ = nullptr;
-
   TF_LITE_REMOVE_VIRTUAL_DELETE
 };
 
diff --git a/tensorflow/lite/micro/micro_graph.h b/tensorflow/lite/micro/micro_graph.h
index ca8c40e..79b3649 100644
--- a/tensorflow/lite/micro/micro_graph.h
+++ b/tensorflow/lite/micro/micro_graph.h
@@ -16,90 +16,44 @@
 #ifndef TENSORFLOW_LITE_MICRO_MICRO_GRAPH_H_
 #define TENSORFLOW_LITE_MICRO_MICRO_GRAPH_H_
 
-#include "tensorflow/lite/micro/micro_allocator.h"
+#include "tensorflow/lite/micro/compatibility.h"
 #include "tensorflow/lite/micro/micro_common.h"
 #include "tensorflow/lite/micro/micro_resource_variable.h"
-#include "tensorflow/lite/schema/schema_generated.h"
 
 namespace tflite {
 
-// Abstracts the details of interacting with the tflite::Model.
+// Abstracts the details of interacting with the graph from the kernels
 //
-// Provides methods to access, initialize, prepare, invoke and free any
-// subgraph in the tflite::Graph.
+// Provides methods to invoke any subgraph in the tflite::Graph.
 class MicroGraph {
  public:
-  // The lifetime of the context, model, allocator and resource_variables must
-  // be at least as long as that of the graph object, since the this class may
-  // need to access them at any time. If resource_variables is a nullptr,
-  // GetResourceVariables will return a nullptr.
-  MicroGraph(TfLiteContext* context, const Model* model,
-             MicroAllocator* allocator,
-             MicroResourceVariables* resource_variables);
-  virtual ~MicroGraph();
-
-  // Sets up builtin data and calls TFLMRegistration->Init for every
-  // operator in every subgraph in the model.
-  virtual TfLiteStatus InitSubgraphs();
-
-  // Calls TFLMRegistration->Prepare for every operator in every subgraph
-  // in the model.
-  virtual TfLiteStatus PrepareSubgraphs();
-
-  // Calls TFLMRegistration->Reset for every operator in every subgraph in
-  // the model.
-  virtual TfLiteStatus ResetSubgraphs();
-
-  // Calls TFLMRegistration->Free for every operator in every subgraph in
-  // the model.
-  virtual TfLiteStatus FreeSubgraphs();
+  virtual ~MicroGraph() = default;
 
   // Calls TFLMRegistration->Invoke for every operator in a single subgraph
   // in the model.
-  virtual TfLiteStatus InvokeSubgraph(int subgraph_idx);
-
-  // Zeros out all variable tensors in all subgraphs in the model.
-  virtual TfLiteStatus ResetVariableTensors();
+  virtual TfLiteStatus InvokeSubgraph(int subgraph_idx) = 0;
 
   // Number of tensor inputs to a specified subgraph in the model.
-  virtual size_t NumSubgraphInputs(int subgraph_idx);
+  virtual size_t NumSubgraphInputs(int subgraph_idx) = 0;
 
   // Get the specified input tensor of a specified subgraph in the model.
-  virtual TfLiteEvalTensor* GetSubgraphInput(int subgraph_idx, int input_idx);
+  virtual TfLiteEvalTensor* GetSubgraphInput(int subgraph_idx,
+                                             int input_idx) = 0;
 
   // Number of tensor outputs from a specified subgraph in the model.
-  virtual size_t NumSubgraphOutputs(int subgraph_idx);
+  virtual size_t NumSubgraphOutputs(int subgraph_idx) = 0;
 
   // Get the specified output tensor of a specified subgraph in the model.
-  virtual TfLiteEvalTensor* GetSubgraphOutput(int subgraph_idx, int output_idx);
+  virtual TfLiteEvalTensor* GetSubgraphOutput(int subgraph_idx,
+                                              int output_idx) = 0;
 
   // Number of subgraphs in the model.
-  virtual int NumSubgraphs();
-
-  // Hook to pass in subgraph allocations tracked within the interpreter,
-  // allowing MicroGraph to init / prepare / invoke subgraphs in the model.
-  void SetSubgraphAllocations(SubgraphAllocations* subgraph_allocations);
-
-  // Get the current subgraph index. Within an on operator, this is guaranteed
-  // to be the subgraph of that operator.
-  int GetCurrentSubgraphIndex() { return current_subgraph_index_; }
-
-  // Gets the list of alloctions for each subgraph. This is the source of truth
-  // for all per-subgraph allocation data.
-  SubgraphAllocations* GetAllocations() { return subgraph_allocations_; }
+  virtual int NumSubgraphs() = 0;
 
   // Get the resource variables for this TFLM graph.
-  MicroResourceVariables* GetResourceVariables() { return resource_variables_; }
+  virtual MicroResourceVariables* GetResourceVariables() = 0;
 
  private:
-  TfLiteContext* context_;
-  const Model* model_;
-  MicroAllocator* allocator_;
-  SubgraphAllocations* subgraph_allocations_ = nullptr;
-  int current_subgraph_index_;
-  MicroResourceVariables* resource_variables_;
-  const flatbuffers::Vector<flatbuffers::Offset<SubGraph>>* subgraphs_;
-
   TF_LITE_REMOVE_VIRTUAL_DELETE
 };
 
diff --git a/tensorflow/lite/micro/micro_interpreter.cc b/tensorflow/lite/micro/micro_interpreter.cc
index c6917b4..7f4565e 100644
--- a/tensorflow/lite/micro/micro_interpreter.cc
+++ b/tensorflow/lite/micro/micro_interpreter.cc
@@ -24,7 +24,7 @@
 #include "tensorflow/lite/micro/flatbuffer_utils.h"
 #include "tensorflow/lite/micro/memory_helpers.h"
 #include "tensorflow/lite/micro/micro_allocator.h"
-#include "tensorflow/lite/micro/micro_context.h"
+#include "tensorflow/lite/micro/micro_interpreter_context.h"
 #include "tensorflow/lite/micro/micro_log.h"
 #include "tensorflow/lite/micro/micro_op_resolver.h"
 #include "tensorflow/lite/micro/micro_profiler_interface.h"
@@ -33,17 +33,28 @@
 #include "tensorflow/lite/schema/schema_utils.h"
 
 namespace tflite {
+namespace {
+MemoryPlannerType FlagToMemoryPlannerType(bool preserve_all_tensors) {
+  if (preserve_all_tensors) {
+    return MemoryPlannerType::kLinear;
+  } else {
+    return MemoryPlannerType::kGreedy;
+  }
+}
+}  // namespace
 
 MicroInterpreter::MicroInterpreter(const Model* model,
                                    const MicroOpResolver& op_resolver,
                                    uint8_t* tensor_arena,
                                    size_t tensor_arena_size,
                                    MicroResourceVariables* resource_variables,
-                                   MicroProfilerInterface* profiler)
+                                   MicroProfilerInterface* profiler,
+                                   bool preserve_all_tensors)
     : model_(model),
       op_resolver_(op_resolver),
-      allocator_(*MicroAllocator::Create(tensor_arena, tensor_arena_size)),
-
+      allocator_(*MicroAllocator::Create(
+          tensor_arena, tensor_arena_size,
+          FlagToMemoryPlannerType(preserve_all_tensors))),
       graph_(&context_, model, &allocator_, resource_variables),
       tensors_allocated_(false),
       initialization_status_(kTfLiteError),
@@ -77,7 +88,8 @@
 }
 
 void MicroInterpreter::Init(MicroProfilerInterface* profiler) {
-  micro_context_.SetInterpreterState(MicroContext::InterpreterState::kInit);
+  micro_context_.SetInterpreterState(
+      MicroInterpreterContext::InterpreterState::kInit);
   context_.impl_ = static_cast<void*>(&micro_context_);
   context_.ReportError = MicroContextReportOpError;
   context_.GetTensor = MicroContextGetTensor;
@@ -198,15 +210,17 @@
 
   TF_LITE_ENSURE_STATUS(PrepareNodeAndRegistrationDataFromFlatbuffer());
 
-  micro_context_.SetInterpreterState(MicroContext::InterpreterState::kInit);
+  micro_context_.SetInterpreterState(
+      MicroInterpreterContext::InterpreterState::kInit);
   TF_LITE_ENSURE_STATUS(graph_.InitSubgraphs());
 
-  micro_context_.SetInterpreterState(MicroContext::InterpreterState::kPrepare);
+  micro_context_.SetInterpreterState(
+      MicroInterpreterContext::InterpreterState::kPrepare);
 
   TF_LITE_ENSURE_STATUS(graph_.PrepareSubgraphs());
 
   micro_context_.SetInterpreterState(
-      MicroContext::InterpreterState::kMemoryPlanning);
+      MicroInterpreterContext::InterpreterState::kMemoryPlanning);
 
   TF_LITE_ENSURE_OK(&context_, allocator_.FinishModelAllocation(
                                    model_, graph_.GetAllocations(),
@@ -261,7 +275,8 @@
   TF_LITE_ENSURE_STATUS(Reset());
 
   tensors_allocated_ = true;
-  micro_context_.SetInterpreterState(MicroContext::InterpreterState::kInvoke);
+  micro_context_.SetInterpreterState(
+      MicroInterpreterContext::InterpreterState::kInvoke);
   return kTfLiteOk;
 }
 
@@ -305,6 +320,15 @@
   return graph_.ResetVariableTensors();
 }
 
+TfLiteEvalTensor* MicroInterpreter::GetTensor(int tensor_index,
+                                              int subgraph_index) {
+  if (!allocator_.preserves_all_tensor()) {
+    MicroPrintf("GetTensor requires all tensors to be preserved");
+    return nullptr;
+  }
+  return &graph_.GetAllocations()[subgraph_index].tensors[tensor_index];
+}
+
 TfLiteStatus MicroInterpreter::SetMicroExternalContext(
     void* external_context_payload) {
   return micro_context_.set_external_context(external_context_payload);
diff --git a/tensorflow/lite/micro/micro_interpreter.h b/tensorflow/lite/micro/micro_interpreter.h
index a77b0e0..1c41996 100644
--- a/tensorflow/lite/micro/micro_interpreter.h
+++ b/tensorflow/lite/micro/micro_interpreter.h
@@ -24,8 +24,8 @@
 #include "tensorflow/lite/core/api/error_reporter.h"
 #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
 #include "tensorflow/lite/micro/micro_allocator.h"
-#include "tensorflow/lite/micro/micro_context.h"
-#include "tensorflow/lite/micro/micro_graph.h"
+#include "tensorflow/lite/micro/micro_interpreter_context.h"
+#include "tensorflow/lite/micro/micro_interpreter_graph.h"
 #include "tensorflow/lite/micro/micro_op_resolver.h"
 #include "tensorflow/lite/micro/micro_profiler_interface.h"
 #include "tensorflow/lite/portable_type_to_tflitetype.h"
@@ -50,7 +50,8 @@
   MicroInterpreter(const Model* model, const MicroOpResolver& op_resolver,
                    uint8_t* tensor_arena, size_t tensor_arena_size,
                    MicroResourceVariables* resource_variables = nullptr,
-                   MicroProfilerInterface* profiler = nullptr);
+                   MicroProfilerInterface* profiler = nullptr,
+                   bool preserve_all_tensors = false);
 
   // Create an interpreter instance using an existing MicroAllocator instance.
   // This constructor should be used when creating an allocator that needs to
@@ -115,6 +116,9 @@
     return nullptr;
   }
 
+  // Returns a pointer to the tensor for the corresponding tensor_index
+  TfLiteEvalTensor* GetTensor(int tensor_index, int subgraph_index = 0);
+
   // Reset the state to be what you would expect when the interpreter is first
   // created. i.e. after Init and Prepare is called for the very first time.
   TfLiteStatus Reset();
@@ -135,6 +139,13 @@
   // arena_used_bytes() + 16.
   size_t arena_used_bytes() const { return allocator_.used_bytes(); }
 
+  // Returns True if all Tensors are being preserves
+  // TODO(b/297106074) : revisit making C++ example or test for
+  // preserve_all_tesnors
+  bool preserve_all_tensors() const {
+    return allocator_.preserves_all_tensor();
+  }
+
  protected:
   const MicroAllocator& allocator() const { return allocator_; }
   const TfLiteContext& context() const { return context_; }
@@ -151,7 +162,7 @@
   const MicroOpResolver& op_resolver_;
   TfLiteContext context_ = {};
   MicroAllocator& allocator_;
-  MicroGraph graph_;
+  MicroInterpreterGraph graph_;
   bool tensors_allocated_;
 
   TfLiteStatus initialization_status_;
@@ -163,7 +174,7 @@
   TfLiteTensor** input_tensors_;
   TfLiteTensor** output_tensors_;
 
-  MicroContext micro_context_;
+  MicroInterpreterContext micro_context_;
 };
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/micro_interpreter_context.cc b/tensorflow/lite/micro/micro_interpreter_context.cc
new file mode 100644
index 0000000..0ba461f
--- /dev/null
+++ b/tensorflow/lite/micro/micro_interpreter_context.cc
@@ -0,0 +1,208 @@
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/lite/micro/micro_interpreter_context.h"
+
+#include <cstdint>
+
+#include "tensorflow/lite/kernels/internal/compatibility.h"
+#include "tensorflow/lite/micro/micro_utils.h"
+
+namespace tflite {
+
+namespace {
+
+#ifdef USE_TFLM_COMPRESSION
+
+int GetInputTensorIndex(const TfLiteNode* node, const int index) {
+  if (index >= 0 && index < node->inputs->size) {
+    const int tensor_index = node->inputs->data[index];
+    if (tensor_index != kTfLiteOptionalTensor) {
+      return tensor_index;
+    }
+  }
+  return -1;
+}
+
+#endif  // USE_TFLM_COMPRESSION
+
+}  // namespace
+
+MicroInterpreterContext::MicroInterpreterContext(MicroAllocator* allocator,
+                                                 const Model* model,
+                                                 MicroInterpreterGraph* graph)
+    : allocator_(*allocator),
+      graph_(*graph),
+      model_(model),
+      state_(InterpreterState::kInit) {}
+
+MicroInterpreterContext::~MicroInterpreterContext() {}
+
+void* MicroInterpreterContext::AllocatePersistentBuffer(size_t bytes) {
+  TFLITE_DCHECK(state_ == InterpreterState::kPrepare ||
+                state_ == InterpreterState::kInit);
+  return allocator_.AllocatePersistentBuffer(bytes);
+}
+
+TfLiteStatus MicroInterpreterContext::RequestScratchBufferInArena(
+    size_t bytes, int* buffer_idx) {
+  TFLITE_DCHECK(state_ == InterpreterState::kPrepare);
+  return allocator_.RequestScratchBufferInArena(
+      bytes, graph_.GetCurrentSubgraphIndex(), buffer_idx);
+}
+
+void* MicroInterpreterContext::GetScratchBuffer(int buffer_idx) {
+  TFLITE_DCHECK(state_ == InterpreterState::kInvoke);
+  ScratchBufferHandle* handle = scratch_buffer_handles_ + buffer_idx;
+  return handle->data;
+}
+
+TfLiteTensor* MicroInterpreterContext::AllocateTempTfLiteTensor(
+    int tensor_idx) {
+  return allocator_.AllocateTempTfLiteTensor(model_, graph_.GetAllocations(),
+                                             tensor_idx,
+                                             graph_.GetCurrentSubgraphIndex());
+}
+
+void MicroInterpreterContext::DeallocateTempTfLiteTensor(TfLiteTensor* tensor) {
+  return allocator_.DeallocateTempTfLiteTensor(tensor);
+}
+
+uint8_t* MicroInterpreterContext::AllocateTempBuffer(size_t size,
+                                                     size_t alignment) {
+  TFLITE_DCHECK(state_ == InterpreterState::kPrepare);
+  return allocator_.AllocateTempBuffer(size, alignment);
+}
+
+void MicroInterpreterContext::DeallocateTempBuffer(uint8_t* buffer) {
+  TFLITE_DCHECK(state_ == InterpreterState::kPrepare);
+  allocator_.DeallocateTempBuffer(buffer);
+}
+
+TfLiteEvalTensor* MicroInterpreterContext::GetEvalTensor(int tensor_idx) {
+  return &graph_.GetAllocations()[graph_.GetCurrentSubgraphIndex()]
+              .tensors[tensor_idx];
+}
+
+void MicroInterpreterContext::SetScratchBufferHandles(
+    ScratchBufferHandle* scratch_buffer_handles) {
+  scratch_buffer_handles_ = scratch_buffer_handles;
+}
+
+TfLiteStatus MicroInterpreterContext::set_external_context(
+    void* external_context_payload) {
+  TFLITE_DCHECK(state_ == InterpreterState::kPrepare ||
+                state_ == InterpreterState::kInvoke);
+  if (external_context_payload == nullptr ||
+      external_context_payload_ != nullptr) {
+    MicroPrintf(
+        "Attempting to set external context to %x but it was %x already",
+        external_context_payload, external_context_payload_);
+    return kTfLiteError;
+  }
+
+  external_context_payload_ = external_context_payload;
+  return kTfLiteOk;
+}
+
+void MicroInterpreterContext::SetInterpreterState(InterpreterState state) {
+  state_ = state;
+}
+
+MicroInterpreterContext::InterpreterState
+MicroInterpreterContext::GetInterpreterState() const {
+  return state_;
+}
+
+#ifdef USE_TFLM_COMPRESSION
+
+// Available during Prepare & Eval. Returns false if tensor is not
+// compressed.
+bool MicroInterpreterContext::IsTensorCompressed(const TfLiteNode* node,
+                                                 int tensor_idx) {
+  TFLITE_DCHECK(state_ == InterpreterState::kPrepare ||
+                state_ == InterpreterState::kInvoke);
+
+  const SubgraphAllocations* allocations =
+      &graph_.GetAllocations()[graph_.GetCurrentSubgraphIndex()];
+  if (allocations->compressed.tensors == nullptr) {
+    return false;
+  }
+  int index = GetInputTensorIndex(node, tensor_idx);
+  if (index == -1) {
+    return false;
+  }
+  return allocations->compressed.tensors[index] != nullptr;
+}
+
+// Only available during Prepare. The kernel is responsible for storing the
+// scratch buffer handle.
+int MicroInterpreterContext::AllocateDecompressionScratchBuffer(
+    const TfLiteNode* node, int tensor_idx) {
+  TFLITE_DCHECK(state_ == InterpreterState::kPrepare);
+
+  const SubgraphAllocations* allocations =
+      &graph_.GetAllocations()[graph_.GetCurrentSubgraphIndex()];
+  if (allocations->compressed.tensors == nullptr) {
+    return -1;
+  }
+  int index = GetInputTensorIndex(node, tensor_idx);
+  if (index == -1 || allocations->compressed.tensors[index] == nullptr) {
+    return -1;
+  }
+  const TfLiteEvalTensor* tensor = &allocations->tensors[index];
+  const size_t byte_count = EvalTensorBytes(tensor);
+  int scratch_index = -1;
+  TfLiteStatus result = RequestScratchBufferInArena(byte_count, &scratch_index);
+  if (result != kTfLiteOk) {
+    return -1;
+  }
+
+  return scratch_index;
+}
+
+// Available during Prepare & Eval. Returns nullptr if tensor is not
+// compressed.
+const CompressionTensorData* MicroInterpreterContext::GetTensorCompressionData(
+    const TfLiteNode* node, int tensor_idx) {
+  TFLITE_DCHECK(state_ == InterpreterState::kPrepare ||
+                state_ == InterpreterState::kInvoke);
+
+  const SubgraphAllocations* allocations =
+      &graph_.GetAllocations()[graph_.GetCurrentSubgraphIndex()];
+  if (allocations->compressed.tensors == nullptr) {
+    return nullptr;
+  }
+  int index = GetInputTensorIndex(node, tensor_idx);
+  if (index == -1) {
+    return nullptr;
+  }
+  return allocations->compressed.tensors[index];
+}
+
+// Only available during Eval. Returns nullptr on failure, otherwise returns a
+// pointer to the scratch buffer.
+void* MicroInterpreterContext::DecompressTensorToScratchBuffer(
+    const TfLiteEvalTensor& tensor,
+    const CompressionTensorData& compression_data, int scratch_buffer_handle) {
+  TFLITE_DCHECK(state_ == InterpreterState::kInvoke);
+
+  return MicroContext::DecompressTensorToScratchBuffer(tensor, compression_data,
+                                                       scratch_buffer_handle);
+}
+
+#endif  // USE_TFLM_COMPRESSION
+
+}  // namespace tflite
diff --git a/tensorflow/lite/micro/micro_interpreter_context.h b/tensorflow/lite/micro/micro_interpreter_context.h
new file mode 100644
index 0000000..7b336aa
--- /dev/null
+++ b/tensorflow/lite/micro/micro_interpreter_context.h
@@ -0,0 +1,148 @@
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_CONTEXT_H_
+#define TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_CONTEXT_H_
+
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/micro/micro_allocator.h"
+#include "tensorflow/lite/micro/micro_context.h"
+#include "tensorflow/lite/micro/micro_interpreter_graph.h"
+#include "tensorflow/lite/micro/micro_log.h"
+
+namespace tflite {
+
+// A full implementation of the MicroContext, to be used by the
+// MicroInterpreter. Kernels should not depend on this directly. Instead they
+// should only depend on the MicroContext.
+class MicroInterpreterContext : public MicroContext {
+ public:
+  // Enum that allows MicroContext to keep track of the stages different memory
+  // planning APIs are available to kernels.
+  enum class InterpreterState {
+    kInit,
+    kPrepare,
+    kMemoryPlanning,
+    kInvoke,
+  };
+
+  // Does not take any ownership, and all pointers must refer to valid objects
+  // that outlive the one constructed.
+  MicroInterpreterContext(MicroAllocator* allocator, const Model* model,
+                          MicroInterpreterGraph* graph);
+  virtual ~MicroInterpreterContext();
+
+  // Allocate persistent buffer which has the same life time as the interpreter.
+  // Returns nullptr on failure.
+  // The memory is allocated from the tail.
+  // This method is only available in Init or Prepare stage.
+  // Virtual so that it can be faked for kernel tests.
+  virtual void* AllocatePersistentBuffer(size_t bytes) override;
+
+  // Request a scratch buffer in the arena through static memory planning.
+  // This method is only available in Prepare stage and the buffer is allocated
+  // by the interpreter between Prepare and Eval stage. In Eval stage,
+  // GetScratchBuffer API can be used to fetch the address.
+  // Virtual so that it can be faked for kernel tests.
+  virtual TfLiteStatus RequestScratchBufferInArena(size_t bytes,
+                                                   int* buffer_idx) override;
+
+  // Get the scratch buffer pointer.
+  // This method is only available in Eval stage.
+  // Virtual so that it can be faked for kernel tests.
+  virtual void* GetScratchBuffer(int buffer_idx) override;
+
+  // Returns a temporary TfLiteTensor struct for a given index.
+  // Virtual so that it can be faked for kernel tests.
+  virtual TfLiteTensor* AllocateTempTfLiteTensor(int tensor_idx) override;
+
+  // Deallocates a temp TfLiteTensor.
+  // Virtual so that it can be faked for kernel tests.
+  virtual void DeallocateTempTfLiteTensor(TfLiteTensor* tensor) override;
+
+  // Returns a pointer to a temporary buffer (from the arena).
+  // This API is only valid from the kernel's Prepare function and
+  // the buffer's lifetime is also that of the Prepare function.
+  // Virtual so that it can be faked for kernel tests.
+  virtual uint8_t* AllocateTempBuffer(size_t size, size_t alignment) override;
+
+  // Signals that the temporary buffer is no longer needed.
+  // Virtual so that it can be faked for kernel tests.
+  virtual void DeallocateTempBuffer(uint8_t* buffer) override;
+
+  // Returns a TfLiteEvalTensor struct for a given index.
+  // Virtual so that it can be faked for kernel tests.
+  virtual TfLiteEvalTensor* GetEvalTensor(int tensor_idx) override;
+
+  // Sets the State of MemoryPlanning MicroInterpreterContext
+  void SetInterpreterState(InterpreterState state);
+
+  // Sets the State of MemoryPlanning MicroInterpreterContext
+  InterpreterState GetInterpreterState() const;
+
+  // Does not take ownership of the pointer and the pointer must refer to valid
+  // an object that outlive this class instance.
+  // This can only be called once to set one external context.
+  TfLiteStatus set_external_context(void* external_context_payload) override;
+
+  void* external_context() override { return external_context_payload_; }
+
+  MicroGraph& graph() override { return graph_; }
+
+  // Sets the pointer to a list of ScratchBufferHandle instances.
+  // Not API between TFLM and kernels. Primarily used by the framework for
+  // housekeeping in MicroInterpreterContext.
+  void SetScratchBufferHandles(ScratchBufferHandle* scratch_buffer_handles);
+
+#ifdef USE_TFLM_COMPRESSION
+
+  // Available during Prepare & Eval. Returns false if tensor is not
+  // compressed.
+  bool IsTensorCompressed(const TfLiteNode* node, int tensor_idx) override;
+
+  // Only available during Prepare. The kernel is responsible for storing the
+  // scratch buffer handle.
+  int AllocateDecompressionScratchBuffer(const TfLiteNode* node,
+                                         int tensor_idx) override;
+
+  // Available during Prepare & Eval. Returns nullptr if tensor is not
+  // compressed.
+  const CompressionTensorData* GetTensorCompressionData(
+      const TfLiteNode* node, int tensor_idx) override;
+
+  // Only available during Eval. Returns nullptr on failure, otherwise returns a
+  // pointer to the scratch buffer.
+  void* DecompressTensorToScratchBuffer(
+      const TfLiteEvalTensor& tensor,
+      const CompressionTensorData& compression_data,
+      int scratch_buffer_handle) override;
+
+#endif  // USE_TFLM_COMPRESSION
+
+ private:
+  MicroAllocator& allocator_;
+  MicroInterpreterGraph& graph_;
+  const Model* model_;
+  InterpreterState state_;
+
+  ScratchBufferHandle* scratch_buffer_handles_ = nullptr;
+  void* external_context_payload_ = nullptr;
+
+  TF_LITE_REMOVE_VIRTUAL_DELETE
+};
+
+}  // namespace tflite
+
+#endif  // TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_CONTEXT_H_
diff --git a/tensorflow/lite/micro/micro_context_test.cc b/tensorflow/lite/micro/micro_interpreter_context_test.cc
similarity index 78%
rename from tensorflow/lite/micro/micro_context_test.cc
rename to tensorflow/lite/micro/micro_interpreter_context_test.cc
index e01d387..3af123f 100644
--- a/tensorflow/lite/micro/micro_context_test.cc
+++ b/tensorflow/lite/micro/micro_interpreter_context_test.cc
@@ -12,12 +12,13 @@
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
-#include "tensorflow/lite/micro/micro_context.h"
+#include "tensorflow/lite/micro/micro_interpreter_context.h"
 
 #include <cstdint>
 
 #include "tensorflow/lite/micro/micro_allocator.h"
 #include "tensorflow/lite/micro/micro_arena_constants.h"
+#include "tensorflow/lite/micro/micro_interpreter_graph.h"
 #include "tensorflow/lite/micro/test_helpers.h"
 #include "tensorflow/lite/micro/testing/micro_test.h"
 
@@ -26,23 +27,20 @@
 namespace tflite {
 namespace {
 
-tflite::MicroContext CreateMicroContext() {
+tflite::MicroInterpreterContext CreateMicroInterpreterContext() {
   // Some targets do not support dynamic memory (i.e., no malloc or new), thus,
   // the test need to place non-transient memories in static variables. This is
   // safe because tests are guaranteed to run serially.
-  constexpr size_t kMicroGraphPlacementBufferSize = 1024;
-  alignas(4) static uint8_t
-      micro_graph_placement_buffer[kMicroGraphPlacementBufferSize];
   constexpr size_t kArenaSize = 1024;
   static uint8_t tensor_arena[kArenaSize];
 
   const tflite::Model* model = tflite::testing::GetSimpleMockModel();
   MicroAllocator* micro_allocator =
       MicroAllocator::Create(tensor_arena, kArenaSize);
-  MicroGraph* micro_graph = new (micro_graph_placement_buffer)
-      MicroGraph(nullptr, nullptr, nullptr, nullptr);
+  static MicroInterpreterGraph micro_graph(nullptr, nullptr, nullptr, nullptr);
 
-  tflite::MicroContext micro_context(micro_allocator, model, micro_graph);
+  tflite::MicroInterpreterContext micro_context(micro_allocator, model,
+                                                &micro_graph);
   return micro_context;
 }
 
@@ -58,9 +56,10 @@
 
 // Ensures that a regular set and get pair works ok.
 TF_LITE_MICRO_TEST(TestSetGetExternalContextSuccess) {
-  tflite::MicroContext micro_context = tflite::CreateMicroContext();
+  tflite::MicroInterpreterContext micro_context =
+      tflite::CreateMicroInterpreterContext();
   micro_context.SetInterpreterState(
-      tflite::MicroContext::InterpreterState::kInvoke);
+      tflite::MicroInterpreterContext::InterpreterState::kInvoke);
 
   tflite::TestExternalContextPayloadData payload;
   TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk,
@@ -75,7 +74,8 @@
 }
 
 TF_LITE_MICRO_TEST(TestGetExternalContextWithoutSetShouldReturnNull) {
-  tflite::MicroContext micro_context = tflite::CreateMicroContext();
+  tflite::MicroInterpreterContext micro_context =
+      tflite::CreateMicroInterpreterContext();
 
   tflite::TestExternalContextPayloadData* returned_external_context =
       reinterpret_cast<tflite::TestExternalContextPayloadData*>(
@@ -86,9 +86,10 @@
 }
 
 TF_LITE_MICRO_TEST(TestSetExternalContextCanOnlyBeCalledOnce) {
-  tflite::MicroContext micro_context = tflite::CreateMicroContext();
+  tflite::MicroInterpreterContext micro_context =
+      tflite::CreateMicroInterpreterContext();
   micro_context.SetInterpreterState(
-      tflite::MicroContext::InterpreterState::kPrepare);
+      tflite::MicroInterpreterContext::InterpreterState::kPrepare);
   tflite::TestExternalContextPayloadData payload;
 
   TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk,
@@ -100,15 +101,17 @@
 }
 
 TF_LITE_MICRO_TEST(TestSetExternalContextToNullShouldFail) {
-  tflite::MicroContext micro_context = tflite::CreateMicroContext();
+  tflite::MicroInterpreterContext micro_context =
+      tflite::CreateMicroInterpreterContext();
   micro_context.SetInterpreterState(
-      tflite::MicroContext::InterpreterState::kPrepare);
+      tflite::MicroInterpreterContext::InterpreterState::kPrepare);
   TF_LITE_MICRO_EXPECT_EQ(kTfLiteError,
                           micro_context.set_external_context(nullptr));
 }
 
 TF_LITE_MICRO_TEST(TestGetTempInputTensor) {
-  tflite::MicroContext micro_context = tflite::CreateMicroContext();
+  tflite::MicroInterpreterContext micro_context =
+      tflite::CreateMicroInterpreterContext();
 
   TfLiteNode node;
   int input_data[] = {2, 0, 1};
@@ -127,7 +130,8 @@
 }
 
 TF_LITE_MICRO_TEST(TestGetTempOutputTensor) {
-  tflite::MicroContext micro_context = tflite::CreateMicroContext();
+  tflite::MicroInterpreterContext micro_context =
+      tflite::CreateMicroInterpreterContext();
 
   TfLiteNode node;
   int output_data[] = {1, 0};
@@ -143,16 +147,18 @@
 }
 
 TF_LITE_MICRO_TEST(TestAllocateTempBuffer) {
-  tflite::MicroContext micro_context = tflite::CreateMicroContext();
+  tflite::MicroInterpreterContext micro_context =
+      tflite::CreateMicroInterpreterContext();
   micro_context.SetInterpreterState(
-      tflite::MicroContext::InterpreterState::kPrepare);
+      tflite::MicroInterpreterContext::InterpreterState::kPrepare);
   uint8_t* buffer1 =
       micro_context.AllocateTempBuffer(10, tflite::MicroArenaBufferAlignment());
   TF_LITE_MICRO_EXPECT(buffer1 != nullptr);
 }
 
 TF_LITE_MICRO_TEST(TestGetTempIntermediateTensor) {
-  tflite::MicroContext micro_context = tflite::CreateMicroContext();
+  tflite::MicroInterpreterContext micro_context =
+      tflite::CreateMicroInterpreterContext();
 
   TfLiteNode node;
   int intermediate_data[] = {1, 0};
diff --git a/tensorflow/lite/micro/micro_graph.cc b/tensorflow/lite/micro/micro_interpreter_graph.cc
similarity index 60%
rename from tensorflow/lite/micro/micro_graph.cc
rename to tensorflow/lite/micro/micro_interpreter_graph.cc
index 35c6c1f..7f096ae 100644
--- a/tensorflow/lite/micro/micro_graph.cc
+++ b/tensorflow/lite/micro/micro_interpreter_graph.cc
@@ -13,7 +13,7 @@
 limitations under the License.
 ==============================================================================*/
 
-#include "tensorflow/lite/micro/micro_graph.h"
+#include "tensorflow/lite/micro/micro_interpreter_graph.h"
 
 #include "flatbuffers/flatbuffers.h"  // from @flatbuffers
 #include "tensorflow/lite/c/common.h"
@@ -37,34 +37,39 @@
 
 }  // namespace
 
-MicroGraph::MicroGraph(TfLiteContext* context, const Model* model,
-                       MicroAllocator* allocator,
-                       MicroResourceVariables* resource_variables)
+MicroInterpreterGraph::MicroInterpreterGraph(
+    TfLiteContext* context, const Model* model, MicroAllocator* allocator,
+    MicroResourceVariables* resource_variables)
     : context_(context),
       model_(model),
       allocator_(allocator),
       current_subgraph_index_(0),
+      current_operator_index_(0),
       resource_variables_(resource_variables) {
   if (model != nullptr) {
     subgraphs_ = model->subgraphs();
   }
 }
 
-MicroGraph::~MicroGraph() {}
+MicroInterpreterGraph::~MicroInterpreterGraph() {}
 
-TfLiteStatus MicroGraph::InitSubgraphs() {
+TfLiteStatus MicroInterpreterGraph::InitSubgraphs() {
   int previous_subgraph_idx = current_subgraph_index_;
+  uint32_t previous_operator_idx = current_operator_index_;
 
   for (size_t subgraph_idx = 0; subgraph_idx < subgraphs_->size();
        subgraph_idx++) {
     current_subgraph_index_ = subgraph_idx;
     uint32_t operators_size = NumSubgraphOperators(model_, subgraph_idx);
-    for (size_t i = 0; i < operators_size; ++i) {
-      TfLiteNode* node =
-          &(subgraph_allocations_[subgraph_idx].node_and_registrations[i].node);
-      const TFLMRegistration* registration = subgraph_allocations_[subgraph_idx]
-                                                 .node_and_registrations[i]
-                                                 .registration;
+    for (current_operator_index_ = 0; current_operator_index_ < operators_size;
+         ++current_operator_index_) {
+      TfLiteNode* node = &(subgraph_allocations_[subgraph_idx]
+                               .node_and_registrations[current_operator_index_]
+                               .node);
+      const TFLMRegistration* registration =
+          subgraph_allocations_[subgraph_idx]
+              .node_and_registrations[current_operator_index_]
+              .registration;
       size_t init_data_size;
       const char* init_data;
       if (registration->builtin_code == BuiltinOperator_CUSTOM) {
@@ -81,52 +86,62 @@
     }
   }
   current_subgraph_index_ = previous_subgraph_idx;
+  current_operator_index_ = previous_operator_idx;
 
   return kTfLiteOk;
 }
 
-TfLiteStatus MicroGraph::PrepareSubgraphs() {
+TfLiteStatus MicroInterpreterGraph::PrepareSubgraphs() {
   int previous_subgraph_idx = current_subgraph_index_;
-
+  uint32_t previous_operator_idx = current_operator_index_;
   for (size_t subgraph_idx = 0; subgraph_idx < subgraphs_->size();
        subgraph_idx++) {
     current_subgraph_index_ = subgraph_idx;
     uint32_t operators_size = NumSubgraphOperators(model_, subgraph_idx);
-    for (size_t i = 0; i < operators_size; ++i) {
-      TfLiteNode* node =
-          &(subgraph_allocations_[subgraph_idx].node_and_registrations[i].node);
-      const TFLMRegistration* registration = subgraph_allocations_[subgraph_idx]
-                                                 .node_and_registrations[i]
-                                                 .registration;
+    for (current_operator_index_ = 0; current_operator_index_ < operators_size;
+         ++current_operator_index_) {
+      TfLiteNode* node = &(subgraph_allocations_[subgraph_idx]
+                               .node_and_registrations[current_operator_index_]
+                               .node);
+      const TFLMRegistration* registration =
+          subgraph_allocations_[subgraph_idx]
+              .node_and_registrations[current_operator_index_]
+              .registration;
       if (registration->prepare != nullptr) {
         TfLiteStatus prepare_status = registration->prepare(context_, node);
         if (prepare_status != kTfLiteOk) {
           MicroPrintf("Node %s (number %df) failed to prepare with status %d",
-                      OpNameFromRegistration(registration), i, prepare_status);
+                      OpNameFromRegistration(registration),
+                      current_operator_index_, prepare_status);
           return kTfLiteError;
         }
       }
-      allocator_->FinishPrepareNodeAllocations(/*node_id=*/i);
+      allocator_->FinishPrepareNodeAllocations(
+          /*node_id=*/current_operator_index_);
     }
   }
   current_subgraph_index_ = previous_subgraph_idx;
-
+  current_operator_index_ = previous_operator_idx;
   return kTfLiteOk;
 }
 
-TfLiteStatus MicroGraph::ResetSubgraphs() {
+TfLiteStatus MicroInterpreterGraph::ResetSubgraphs() {
   int previous_subgraph_idx = current_subgraph_index_;
+  uint32_t previous_operator_idx = current_operator_index_;
 
   for (size_t subgraph_idx = 0; subgraph_idx < subgraphs_->size();
        subgraph_idx++) {
     current_subgraph_index_ = subgraph_idx;
     uint32_t operators_size = NumSubgraphOperators(model_, subgraph_idx);
-    for (size_t i = 0; i < operators_size; ++i) {
-      TfLiteNode* node =
-          &(subgraph_allocations_[subgraph_idx].node_and_registrations[i].node);
-      const TFLMRegistration* registration = subgraph_allocations_[subgraph_idx]
-                                                 .node_and_registrations[i]
-                                                 .registration;
+    for (current_operator_index_ = 0; current_operator_index_ < operators_size;
+         ++current_operator_index_) {
+      TfLiteNode* node = &(subgraph_allocations_[subgraph_idx]
+                               .node_and_registrations[current_operator_index_]
+                               .node);
+      const TFLMRegistration* registration =
+          subgraph_allocations_[subgraph_idx]
+              .node_and_registrations[current_operator_index_]
+              .registration;
       // registration is allocated outside the interpreter, so double check to
       // make sure it's not nullptr;
       if (registration != nullptr && registration->reset != nullptr) {
@@ -135,23 +150,28 @@
     }
   }
   current_subgraph_index_ = previous_subgraph_idx;
+  current_operator_index_ = previous_operator_idx;
 
   return kTfLiteOk;
 }
 
-TfLiteStatus MicroGraph::FreeSubgraphs() {
+TfLiteStatus MicroInterpreterGraph::FreeSubgraphs() {
   int previous_subgraph_idx = current_subgraph_index_;
+  uint32_t previous_operator_idx = current_operator_index_;
 
   for (size_t subgraph_idx = 0; subgraph_idx < subgraphs_->size();
        subgraph_idx++) {
     current_subgraph_index_ = subgraph_idx;
     uint32_t operators_size = NumSubgraphOperators(model_, subgraph_idx);
-    for (size_t i = 0; i < operators_size; ++i) {
-      TfLiteNode* node =
-          &(subgraph_allocations_[subgraph_idx].node_and_registrations[i].node);
-      const TFLMRegistration* registration = subgraph_allocations_[subgraph_idx]
-                                                 .node_and_registrations[i]
-                                                 .registration;
+    for (current_operator_index_ = 0; current_operator_index_ < operators_size;
+         ++current_operator_index_) {
+      TfLiteNode* node = &(subgraph_allocations_[subgraph_idx]
+                               .node_and_registrations[current_operator_index_]
+                               .node);
+      const TFLMRegistration* registration =
+          subgraph_allocations_[subgraph_idx]
+              .node_and_registrations[current_operator_index_]
+              .registration;
       // registration is allocated outside the interpreter, so double check to
       // make sure it's not nullptr;
       if (registration != nullptr && registration->free != nullptr) {
@@ -160,12 +180,14 @@
     }
   }
   current_subgraph_index_ = previous_subgraph_idx;
+  current_operator_index_ = previous_operator_idx;
 
   return kTfLiteOk;
 }
 
-TfLiteStatus MicroGraph::InvokeSubgraph(int subgraph_idx) {
+TfLiteStatus MicroInterpreterGraph::InvokeSubgraph(int subgraph_idx) {
   int previous_subgraph_idx = current_subgraph_index_;
+  uint32_t previous_operator_idx = current_operator_index_;
   current_subgraph_index_ = subgraph_idx;
 
   if (static_cast<size_t>(subgraph_idx) >= subgraphs_->size()) {
@@ -174,12 +196,15 @@
     return kTfLiteError;
   }
   uint32_t operators_size = NumSubgraphOperators(model_, subgraph_idx);
-  for (size_t i = 0; i < operators_size; ++i) {
-    TfLiteNode* node =
-        &(subgraph_allocations_[subgraph_idx].node_and_registrations[i].node);
-    const TFLMRegistration* registration = subgraph_allocations_[subgraph_idx]
-                                               .node_and_registrations[i]
-                                               .registration;
+  for (current_operator_index_ = 0; current_operator_index_ < operators_size;
+       ++current_operator_index_) {
+    TfLiteNode* node = &(subgraph_allocations_[subgraph_idx]
+                             .node_and_registrations[current_operator_index_]
+                             .node);
+    const TFLMRegistration* registration =
+        subgraph_allocations_[subgraph_idx]
+            .node_and_registrations[current_operator_index_]
+            .registration;
 
 // This ifdef is needed (even though ScopedMicroProfiler itself is a no-op with
 // -DTF_LITE_STRIP_ERROR_STRINGS) because the function OpNameFromRegistration is
@@ -201,17 +226,19 @@
 
     if (invoke_status == kTfLiteError) {
       MicroPrintf("Node %s (number %d) failed to invoke with status %d",
-                  OpNameFromRegistration(registration), i, invoke_status);
+                  OpNameFromRegistration(registration), current_operator_index_,
+                  invoke_status);
       return kTfLiteError;
     } else if (invoke_status != kTfLiteOk) {
       return invoke_status;
     }
   }
   current_subgraph_index_ = previous_subgraph_idx;
+  current_operator_index_ = previous_operator_idx;
   return kTfLiteOk;
 }
 
-TfLiteStatus MicroGraph::ResetVariableTensors() {
+TfLiteStatus MicroInterpreterGraph::ResetVariableTensors() {
   for (size_t subgraph_idx = 0; subgraph_idx < subgraphs_->size();
        subgraph_idx++) {
     const SubGraph* subgraph = (*subgraphs_)[subgraph_idx];
@@ -238,30 +265,34 @@
   return kTfLiteOk;
 }
 
-int MicroGraph::NumSubgraphs() { return model_->subgraphs()->size(); }
+int MicroInterpreterGraph::NumSubgraphs() {
+  return model_->subgraphs()->size();
+}
 
-void MicroGraph::SetSubgraphAllocations(
+void MicroInterpreterGraph::SetSubgraphAllocations(
     SubgraphAllocations* subgraph_allocations) {
   subgraph_allocations_ = subgraph_allocations;
 }
 
-size_t MicroGraph::NumSubgraphInputs(int subgraph_idx) {
+size_t MicroInterpreterGraph::NumSubgraphInputs(int subgraph_idx) {
   return model_->subgraphs()->Get(subgraph_idx)->inputs()->size();
 }
 
-TfLiteEvalTensor* MicroGraph::GetSubgraphInput(int subgraph_idx,
-                                               int input_idx) {
+TfLiteEvalTensor* MicroInterpreterGraph::GetSubgraphInput(int subgraph_idx,
+                                                          int input_idx) {
   int tensor_idx =
       model_->subgraphs()->Get(subgraph_idx)->inputs()->Get(input_idx);
   return &subgraph_allocations_[subgraph_idx].tensors[tensor_idx];
 }
 
-size_t MicroGraph::NumSubgraphOutputs(int subgraph_idx) {
-  return model_->subgraphs()->Get(subgraph_idx)->outputs()->size();
+size_t MicroInterpreterGraph::NumSubgraphOutputs(int subgraph_idx) {
+  return model_->subgraphs()->Get(subgraph_idx)->outputs() == nullptr
+             ? 0
+             : model_->subgraphs()->Get(subgraph_idx)->outputs()->size();
 }
 
-TfLiteEvalTensor* MicroGraph::GetSubgraphOutput(int subgraph_idx,
-                                                int output_idx) {
+TfLiteEvalTensor* MicroInterpreterGraph::GetSubgraphOutput(int subgraph_idx,
+                                                           int output_idx) {
   int tensor_idx =
       model_->subgraphs()->Get(subgraph_idx)->outputs()->Get(output_idx);
   return &subgraph_allocations_[subgraph_idx].tensors[tensor_idx];
diff --git a/tensorflow/lite/micro/micro_interpreter_graph.h b/tensorflow/lite/micro/micro_interpreter_graph.h
new file mode 100644
index 0000000..5fc3b4c
--- /dev/null
+++ b/tensorflow/lite/micro/micro_interpreter_graph.h
@@ -0,0 +1,118 @@
+/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_GRAPH_H_
+#define TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_GRAPH_H_
+
+#include "tensorflow/lite/micro/micro_allocator.h"
+#include "tensorflow/lite/micro/micro_common.h"
+#include "tensorflow/lite/micro/micro_graph.h"
+#include "tensorflow/lite/micro/micro_resource_variable.h"
+#include "tensorflow/lite/schema/schema_generated.h"
+
+namespace tflite {
+
+// Abstracts the details of interacting with the tflite::Model.
+//
+// Provides methods to access, initialize, prepare, invoke and free any
+// subgraph in the tflite::Graph.
+class MicroInterpreterGraph : public MicroGraph {
+ public:
+  // The lifetime of the context, model, allocator and resource_variables must
+  // be at least as long as that of the graph object, since the this class may
+  // need to access them at any time. If resource_variables is a nullptr,
+  // GetResourceVariables will return a nullptr.
+  MicroInterpreterGraph(TfLiteContext* context, const Model* model,
+                        MicroAllocator* allocator,
+                        MicroResourceVariables* resource_variables);
+  virtual ~MicroInterpreterGraph();
+
+  // Sets up builtin data and calls TFLMRegistration->Init for every
+  // operator in every subgraph in the model.
+  virtual TfLiteStatus InitSubgraphs();
+
+  // Calls TFLMRegistration->Prepare for every operator in every subgraph
+  // in the model.
+  virtual TfLiteStatus PrepareSubgraphs();
+
+  // Calls TFLMRegistration->Reset for every operator in every subgraph in
+  // the model.
+  virtual TfLiteStatus ResetSubgraphs();
+
+  // Calls TFLMRegistration->Free for every operator in every subgraph in
+  // the model.
+  virtual TfLiteStatus FreeSubgraphs();
+
+  // Calls TFLMRegistration->Invoke for every operator in a single subgraph
+  // in the model.
+  virtual TfLiteStatus InvokeSubgraph(int subgraph_idx);
+
+  // Zeros out all variable tensors in all subgraphs in the model.
+  virtual TfLiteStatus ResetVariableTensors();
+
+  // Number of tensor inputs to a specified subgraph in the model.
+  virtual size_t NumSubgraphInputs(int subgraph_idx);
+
+  // Get the specified input tensor of a specified subgraph in the model.
+  virtual TfLiteEvalTensor* GetSubgraphInput(int subgraph_idx, int input_idx);
+
+  // Number of tensor outputs from a specified subgraph in the model.
+  virtual size_t NumSubgraphOutputs(int subgraph_idx);
+
+  // Get the specified output tensor of a specified subgraph in the model.
+  virtual TfLiteEvalTensor* GetSubgraphOutput(int subgraph_idx, int output_idx);
+
+  // Number of subgraphs in the model.
+  virtual int NumSubgraphs();
+
+  // Hook to pass in subgraph allocations tracked within the interpreter,
+  // allowing MicroInterpreterGraph to init / prepare / invoke subgraphs in the
+  // model.
+  void SetSubgraphAllocations(SubgraphAllocations* subgraph_allocations);
+
+  // Get the current subgraph index. Within an on operator, this is guaranteed
+  // to be the subgraph of that operator.
+  int GetCurrentSubgraphIndex() { return current_subgraph_index_; }
+
+  // Get the current operator index inside a subgraph.
+  // The couple GetCurrentSubgraphIndex GetCurrentSubgraphIndex creates a unique
+  // identifier of the operator inside the subgraph
+  int GetCurrentOperatorIndex() { return current_operator_index_; }
+
+  // Gets the list of allocations for each subgraph. This is the source of truth
+  // for all per-subgraph allocation data.
+  SubgraphAllocations* GetAllocations() { return subgraph_allocations_; }
+
+  // Get the resource variables for this TFLM graph.
+  MicroResourceVariables* GetResourceVariables() { return resource_variables_; }
+
+ private:
+  TfLiteContext* context_;
+  const Model* model_;
+  MicroAllocator* allocator_;
+  SubgraphAllocations* subgraph_allocations_ = nullptr;
+  int current_subgraph_index_;
+  uint32_t current_operator_index_;
+  MicroResourceVariables* resource_variables_;
+  const flatbuffers::Vector<flatbuffers::Offset<SubGraph>>* subgraphs_ =
+      nullptr;  // Initialized as nullptr to prevent any possible issues
+                // related to accessing uninitialized memory.
+
+  TF_LITE_REMOVE_VIRTUAL_DELETE
+};
+
+}  // namespace tflite
+
+#endif  // TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_GRAPH_H_
diff --git a/tensorflow/lite/micro/micro_interpreter_test.cc b/tensorflow/lite/micro/micro_interpreter_test.cc
index 0ba31c4..e44de6b 100644
--- a/tensorflow/lite/micro/micro_interpreter_test.cc
+++ b/tensorflow/lite/micro/micro_interpreter_test.cc
@@ -548,4 +548,23 @@
   TF_LITE_MICRO_EXPECT_EQ(interpreter2.Invoke(), kTfLiteOk);
 }
 
+TF_LITE_MICRO_TEST(TestGetTensorFailsNoLinearMemoryPlanner) {
+  const tflite::Model* model = tflite::testing::GetModelWith256x256Tensor();
+  TF_LITE_MICRO_EXPECT(model != nullptr);
+
+  tflite::testing::TestingOpResolver op_resolver;
+  TF_LITE_MICRO_EXPECT_EQ(tflite::testing::GetTestingOpResolver(op_resolver),
+                          kTfLiteOk);
+  tflite::MicroInterpreter interpreter(model, op_resolver, tflite::arena_buffer,
+                                       tflite::buffer_arena_size);
+  TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
+
+  TF_LITE_MICRO_EXPECT_EQ(interpreter.Invoke(), kTfLiteOk);
+
+  // GetTensor Should return a null_ptr when a linear memory planner isn't used
+  // to initialize it. preserve_all_tensors() getter should also return false
+  TF_LITE_MICRO_EXPECT_EQ(interpreter.preserve_all_tensors(), false);
+  TF_LITE_MICRO_EXPECT(interpreter.GetTensor(0) == nullptr);
+}
+
 TF_LITE_MICRO_TESTS_END
diff --git a/tensorflow/lite/micro/micro_log.cc b/tensorflow/lite/micro/micro_log.cc
index 45f7051..a08a07d 100644
--- a/tensorflow/lite/micro/micro_log.cc
+++ b/tensorflow/lite/micro/micro_log.cc
@@ -46,4 +46,17 @@
   VMicroPrintf(format, args);
   va_end(args);
 }
+
+int MicroSnprintf(char* buffer, size_t buf_size, const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  int result = MicroVsnprintf(buffer, buf_size, format, args);
+  va_end(args);
+  return result;
+}
+
+int MicroVsnprintf(char* buffer, size_t buf_size, const char* format,
+                   va_list vlist) {
+  return DebugVsnprintf(buffer, buf_size, format, vlist);
+}
 #endif  // !defined(TF_LITE_STRIP_ERROR_STRINGS)
diff --git a/tensorflow/lite/micro/micro_log.h b/tensorflow/lite/micro/micro_log.h
index 669e9e6..af3c24a 100644
--- a/tensorflow/lite/micro/micro_log.h
+++ b/tensorflow/lite/micro/micro_log.h
@@ -17,15 +17,21 @@
 
 #if !defined(TF_LITE_STRIP_ERROR_STRINGS)
 #include <cstdarg>
+#include <cstddef>
 // These functions can be used independent of the MicroErrorReporter to get
 // printf-like functionalitys and are common to all target platforms.
 void MicroPrintf(const char* format, ...);
 void VMicroPrintf(const char* format, va_list args);
+int MicroSnprintf(char* buffer, size_t buf_size, const char* format, ...);
+int MicroVsnprintf(char* buffer, size_t buf_size, const char* format,
+                   va_list vlist);
 #else
 // We use a #define to ensure that the strings are completely stripped, to
 // prevent an unnecessary increase in the binary size.
 #define MicroPrintf(...) tflite::Unused(__VA_ARGS__)
 #define VMicroPrintf(...) tflite::Unused(__VA_ARGS__)
+#define MicroSnprintf(...) tflite::Unused<int>(__VA_ARGS__)
+#define MicroVsnprintf(...) tflite::Unused<int>(__VA_ARGS__)
 #endif
 
 namespace tflite {
@@ -37,6 +43,12 @@
   (void)(sizeof...(args));
 }
 
+template <typename T, typename... Args>
+T Unused(Args&&... args) {
+  (void)(sizeof...(args));
+  return static_cast<T>(0);
+}
+
 }  // namespace tflite
 
 #endif  // TENSORFLOW_LITE_MICRO_MICRO_LOG_H_
diff --git a/tensorflow/lite/micro/micro_log_test.cc b/tensorflow/lite/micro/micro_log_test.cc
index 97ac8be..7027e01 100644
--- a/tensorflow/lite/micro/micro_log_test.cc
+++ b/tensorflow/lite/micro/micro_log_test.cc
@@ -1,4 +1,4 @@
-/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -15,18 +15,42 @@
 
 #include "tensorflow/lite/micro/micro_log.h"
 
-#include "tensorflow/lite/micro/system_setup.h"
+#include <cstddef>
+#include <cstring>
 
-namespace tflite {
-inline void InitializeTest() { InitializeTarget(); }
-}  // namespace tflite
+#include "tensorflow/lite/micro/testing/micro_test.h"
 
-int main(int argc, char** argv) {
-  tflite::InitializeTest();
-#ifndef TF_LITE_STRIP_ERROR_STRINGS
-  MicroPrintf("Number: %d", 42);
-  MicroPrintf("Badly-formed format string %");
-  MicroPrintf("Another % badly-formed %% format string");
-  MicroPrintf("~~~%s~~~", "ALL TESTS PASSED");
+namespace {
+
+#if !defined(TF_LITE_STRIP_ERROR_STRINGS)
+constexpr int kMaxBufferSize = 128;
+const char* kFormat = "%2d%6.2f%#5x%5s";
+const char* kExpect = "42 42.42 0x42 \"42\"";
 #endif  // !defined(TF_LITE_STRIP_ERROR_STRINGS)
+
+}  // namespace
+
+TF_LITE_MICRO_TESTS_BEGIN
+
+#if !defined(TF_LITE_STRIP_ERROR_STRINGS)
+
+TF_LITE_MICRO_TEST(MicroPrintfTest) {
+  MicroPrintf("Integer 42: %d", 42);
+  MicroPrintf("Float 42.42: %2.2f", 42.42);
+  MicroPrintf("String \"Hello World!\": %s", "\"Hello World!\"");
+  MicroPrintf("Badly-formed format string %");
+  MicroPrintf("Another %# badly-formed %% format string");
 }
+
+TF_LITE_MICRO_TEST(MicroSnprintf) {
+  char buffer[kMaxBufferSize];
+  buffer[0] = '\0';
+  size_t result =
+      MicroSnprintf(buffer, kMaxBufferSize, kFormat, 42, 42.42, 0x42, "\"42\"");
+  TF_LITE_MICRO_EXPECT_EQ(result, strlen(buffer));
+  TF_LITE_MICRO_EXPECT_STRING_EQ(kExpect, buffer);
+}
+
+#endif  // !defined(TF_LITE_STRIP_ERROR_STRINGS)
+
+TF_LITE_MICRO_TESTS_END
diff --git a/tensorflow/lite/micro/micro_mutable_op_resolver.h b/tensorflow/lite/micro/micro_mutable_op_resolver.h
index fe4f5fb..f5f6e38 100644
--- a/tensorflow/lite/micro/micro_mutable_op_resolver.h
+++ b/tensorflow/lite/micro/micro_mutable_op_resolver.h
@@ -29,9 +29,11 @@
 #include "tensorflow/lite/micro/kernels/ethosu.h"
 #include "tensorflow/lite/micro/kernels/fully_connected.h"
 #include "tensorflow/lite/micro/kernels/micro_ops.h"
+#include "tensorflow/lite/micro/kernels/mul.h"
 #include "tensorflow/lite/micro/kernels/pooling.h"
 #include "tensorflow/lite/micro/kernels/reduce.h"
 #include "tensorflow/lite/micro/kernels/softmax.h"
+#include "tensorflow/lite/micro/kernels/transpose_conv.h"
 #include "tensorflow/lite/micro/micro_log.h"
 #include "tensorflow/lite/micro/micro_op_resolver.h"
 #include "tensorflow/lite/schema/schema_generated.h"
@@ -143,6 +145,11 @@
     return AddBuiltin(BuiltinOperator_AVERAGE_POOL_2D, registration, ParsePool);
   }
 
+  TfLiteStatus AddBatchMatMul() {
+    return AddBuiltin(BuiltinOperator_BATCH_MATMUL,
+                      tflite::Register_BATCH_MATMUL(), ParseBatchMatMul);
+  }
+
   TfLiteStatus AddBatchToSpaceNd() {
     return AddBuiltin(BuiltinOperator_BATCH_TO_SPACE_ND,
                       Register_BATCH_TO_SPACE_ND(), ParseBatchToSpaceNd);
@@ -462,6 +469,11 @@
     return AddBuiltin(BuiltinOperator_PADV2, Register_PADV2(), ParsePadV2);
   }
 
+  TfLiteStatus AddPCAN() {
+    // TODO(b/286250473): change back name to "PCAN" and remove namespace
+    return AddCustom("SignalPCAN", tflite::tflm_signal::Register_PCAN());
+  }
+
   TfLiteStatus AddPrelu() {
     return AddBuiltin(BuiltinOperator_PRELU, tflite::Register_PRELU(),
                       ParsePrelu);
@@ -606,9 +618,10 @@
     return AddBuiltin(BuiltinOperator_TANH, Register_TANH(), ParseTanh);
   }
 
-  TfLiteStatus AddTransposeConv() {
-    return AddBuiltin(BuiltinOperator_TRANSPOSE_CONV,
-                      tflite::Register_TRANSPOSE_CONV(), ParseTransposeConv);
+  TfLiteStatus AddTransposeConv(
+      const TFLMRegistration& registration = Register_TRANSPOSE_CONV()) {
+    return AddBuiltin(BuiltinOperator_TRANSPOSE_CONV, registration,
+                      ParseTransposeConv);
   }
 
   TfLiteStatus AddTranspose() {
diff --git a/tensorflow/lite/micro/micro_profiler.cc b/tensorflow/lite/micro/micro_profiler.cc
index e7743aa..ebead51 100644
--- a/tensorflow/lite/micro/micro_profiler.cc
+++ b/tensorflow/lite/micro/micro_profiler.cc
@@ -99,7 +99,7 @@
     }
     MicroPrintf("%s, %d", each_tag_entry.tag, each_tag_entry.ticks);
   }
-  MicroPrintf("total number of ticks, %d", total_ticks);
+  MicroPrintf("\"total number of ticks\", %d", total_ticks);
 #endif
 }
 
diff --git a/tensorflow/lite/micro/micro_profiler.h b/tensorflow/lite/micro/micro_profiler.h
index 1c39ea1..b52ebcb 100644
--- a/tensorflow/lite/micro/micro_profiler.h
+++ b/tensorflow/lite/micro/micro_profiler.h
@@ -40,7 +40,7 @@
   // only once per event_handle.
   //
   // If EndEvent is called more than once for the same event_handle, the last
-  // call will be used as the end of event marker.If EndEvent is called 0 times
+  // call will be used as the end of event marker. If EndEvent is called 0 times
   // for a particular event_handle, the duration of that event will be 0 ticks.
   virtual void EndEvent(uint32_t event_handle) override;
 
@@ -66,9 +66,9 @@
   void LogTicksPerTagCsv();
 
  private:
-  // Maximum number of events that this class can keep track of. If we call
-  // AddEvent more than kMaxEvents number of times, then the oldest event's
-  // profiling information will be overwritten.
+  // Maximum number of events that this class can keep track of. The
+  // MicroProfiler will abort if AddEvent is called more than kMaxEvents number
+  // of times. Increase this number if you need more events.
   static constexpr int kMaxEvents = 4096;
 
   const char* tags_[kMaxEvents];
@@ -87,7 +87,7 @@
 
   int FindExistingOrNextPosition(const char* tag_name);
 
-  TF_LITE_REMOVE_VIRTUAL_DELETE;
+  TF_LITE_REMOVE_VIRTUAL_DELETE
 };
 
 #if defined(TF_LITE_STRIP_ERROR_STRINGS)
diff --git a/tensorflow/lite/micro/micro_time.h b/tensorflow/lite/micro/micro_time.h
index 7a8ab45..5e8ad11 100644
--- a/tensorflow/lite/micro/micro_time.h
+++ b/tensorflow/lite/micro/micro_time.h
@@ -27,8 +27,11 @@
 uint32_t GetCurrentTimeTicks();
 
 inline uint32_t TicksToMs(int32_t ticks) {
+  uint32_t _ticks_per_second = ticks_per_second();
+  _ticks_per_second =
+      _ticks_per_second > 0 ? _ticks_per_second : 1;  // zero divide prevention
   return static_cast<uint32_t>(1000.0f * static_cast<float>(ticks) /
-                               static_cast<float>(ticks_per_second()));
+                               static_cast<float>(_ticks_per_second));
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/mock_micro_graph.cc b/tensorflow/lite/micro/mock_micro_graph.cc
index 438a406..9c652fb 100644
--- a/tensorflow/lite/micro/mock_micro_graph.cc
+++ b/tensorflow/lite/micro/mock_micro_graph.cc
@@ -20,11 +20,7 @@
 namespace tflite {
 
 MockMicroGraph::MockMicroGraph(SingleArenaBufferAllocator* allocator)
-    : MicroGraph(nullptr, nullptr, nullptr, nullptr),
-      allocator_(allocator),
-      init_count_(0),
-      prepare_count_(0),
-      free_count_(0) {
+    : allocator_(allocator), init_count_(0), prepare_count_(0), free_count_(0) {
   memset(invoke_counts_, 0, sizeof(invoke_counts_));
   mock_tensor_ =
       reinterpret_cast<TfLiteEvalTensor*>(allocator_->AllocatePersistentBuffer(
@@ -45,8 +41,6 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus MockMicroGraph::ResetVariableTensors() { return kTfLiteOk; }
-
 size_t MockMicroGraph::NumSubgraphInputs(int subgraph_idx) { return 1; }
 
 TfLiteEvalTensor* MockMicroGraph::GetSubgraphInput(int subgraph_idx,
@@ -63,4 +57,8 @@
 
 int MockMicroGraph::NumSubgraphs() { return kMaxSubgraphs; }
 
+MicroResourceVariables* MockMicroGraph::GetResourceVariables() {
+  return nullptr;
+}
+
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/mock_micro_graph.h b/tensorflow/lite/micro/mock_micro_graph.h
index 3ae7d7c..745e8f0 100644
--- a/tensorflow/lite/micro/mock_micro_graph.h
+++ b/tensorflow/lite/micro/mock_micro_graph.h
@@ -30,13 +30,13 @@
  public:
   explicit MockMicroGraph(SingleArenaBufferAllocator* allocator);
   TfLiteStatus InvokeSubgraph(int subgraph_idx) override;
-  TfLiteStatus ResetVariableTensors() override;
   size_t NumSubgraphInputs(int subgraph_idx) override;
   TfLiteEvalTensor* GetSubgraphInput(int subgraph_idx, int tensor_idx) override;
   size_t NumSubgraphOutputs(int subgraph_idx) override;
   TfLiteEvalTensor* GetSubgraphOutput(int subgraph_idx,
                                       int tensor_idx) override;
   int NumSubgraphs() override;
+  MicroResourceVariables* GetResourceVariables() override;
   int get_init_count() const { return init_count_; }
   int get_prepare_count() const { return prepare_count_; }
   int get_free_count() const { return free_count_; }
diff --git a/tensorflow/lite/micro/models/person_detect_vela.tflite b/tensorflow/lite/micro/models/person_detect_vela.tflite
new file mode 100644
index 0000000..95cf476
--- /dev/null
+++ b/tensorflow/lite/micro/models/person_detect_vela.tflite
Binary files differ
diff --git a/tensorflow/lite/micro/python/interpreter/src/BUILD b/tensorflow/lite/micro/python/interpreter/src/BUILD
index 601e3db..f8be0ed 100644
--- a/tensorflow/lite/micro/python/interpreter/src/BUILD
+++ b/tensorflow/lite/micro/python/interpreter/src/BUILD
@@ -17,24 +17,6 @@
     packages = tflm_python_op_resolver_friends(),
 )
 
-# tflm_runtime is deprecated, please use //python/tflite_micro:runtime instead.
-# TODO(b/286456378): remove once all usage is changed to the runtime target.
-py_library(
-    name = "tflm_runtime",
-    srcs = ["tflm_runtime.py"],
-    visibility = ["//visibility:public"],
-    deps = ["//python/tflite_micro:runtime"],
-)
-
-# runtime is deprecated, please use //python/tflite_micro:runtime instead.
-# TODO(b/286456378): remove once all usage is changed to the runtime target.
-py_library(
-    name = "runtime",
-    srcs = ["runtime.py"],
-    visibility = ["//visibility:public"],
-    deps = ["//python/tflite_micro:runtime"],
-)
-
 # TODO(b/286456378): remove once all internal usage is fixed.
 cc_library(
     name = "python_ops_resolver",
@@ -43,10 +25,7 @@
         "python_ops_resolver.h",
     ],
     copts = micro_copts(),
-    visibility = [
-        ":op_resolver_friends",
-        "//tensorflow/lite/micro/integration_tests:__subpackages__",
-    ],
+    visibility = [":op_resolver_friends"],
     deps = [
         "//python/tflite_micro:python_ops_resolver",
     ],
diff --git a/tensorflow/lite/micro/python/tflite_size/src/BUILD b/tensorflow/lite/micro/python/tflite_size/src/BUILD
index 66d9b50..b8f53a2 100644
--- a/tensorflow/lite/micro/python/tflite_size/src/BUILD
+++ b/tensorflow/lite/micro/python/tflite_size/src/BUILD
@@ -1,3 +1,4 @@
+load("@rules_python//python:defs.bzl", "py_binary", "py_library")
 load("@pybind11_bazel//:build_defs.bzl", "pybind_extension", "pybind_library")
 
 package(
diff --git a/tensorflow/lite/micro/python/tflite_size/tests/BUILD b/tensorflow/lite/micro/python/tflite_size/tests/BUILD
index 1b4c5b2..076a6ab 100644
--- a/tensorflow/lite/micro/python/tflite_size/tests/BUILD
+++ b/tensorflow/lite/micro/python/tflite_size/tests/BUILD
@@ -1,3 +1,4 @@
+load("@rules_python//python:defs.bzl", "py_test")
 load("@tflm_pip_deps//:requirements.bzl", "requirement")
 
 licenses(["notice"])
@@ -25,7 +26,7 @@
         "noubsan",
     ],
     deps = [
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
         "//tensorflow/lite/micro/python/tflite_size/src:flatbuffer_size",
     ],
 )
diff --git a/tensorflow/lite/micro/recording_micro_allocator.cc b/tensorflow/lite/micro/recording_micro_allocator.cc
index f41dba6..ee76196 100644
--- a/tensorflow/lite/micro/recording_micro_allocator.cc
+++ b/tensorflow/lite/micro/recording_micro_allocator.cc
@@ -78,9 +78,14 @@
       return recorded_node_and_registration_array_data_;
     case RecordedAllocationType::kOpData:
       return recorded_op_data_;
+    // the function MicroPrintf was never reached outside the switch, because
+    // each case has a return. As the intention of the MicroPrintf is to be
+    // called when no matching case is found, a default case was added to
+    // contemplate an invalid allocation type
+    default:
+      MicroPrintf("Invalid allocation type supplied: %d", allocation_type);
+      return RecordedAllocation();
   }
-  MicroPrintf("Invalid allocation type supplied: %d", allocation_type);
-  return RecordedAllocation();
 }
 
 const RecordingSingleArenaBufferAllocator*
diff --git a/tensorflow/lite/micro/riscv32_generic/debug_log.cc b/tensorflow/lite/micro/riscv32_generic/debug_log.cc
new file mode 100644
index 0000000..1d38fff
--- /dev/null
+++ b/tensorflow/lite/micro/riscv32_generic/debug_log.cc
@@ -0,0 +1,40 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/lite/micro/debug_log.h"
+
+#include <cstdio>
+
+#ifndef TF_LITE_STRIP_ERROR_STRINGS
+#include "eyalroz_printf/src/printf/printf.h"
+#endif
+
+extern "C" void DebugLog(const char* format, va_list args) {
+#ifndef TF_LITE_STRIP_ERROR_STRINGS
+  constexpr int kMaxLogLen = 256;
+  char log_buffer[kMaxLogLen];
+
+  vsnprintf_(log_buffer, kMaxLogLen, format, args);
+  std::fputs(log_buffer, stdout);
+#endif
+}
+
+#ifndef TF_LITE_STRIP_ERROR_STRINGS
+// Only called from MicroVsnprintf (micro_log.h)
+extern "C" int DebugVsnprintf(char* buffer, size_t buf_size, const char* format,
+                              va_list vlist) {
+  return vsnprintf_(buffer, buf_size, format, vlist);
+}
+#endif
diff --git a/tensorflow/lite/micro/span.h b/tensorflow/lite/micro/span.h
new file mode 100644
index 0000000..9399f1d
--- /dev/null
+++ b/tensorflow/lite/micro/span.h
@@ -0,0 +1,69 @@
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef TENSORFLOW_LITE_MICRO_SPAN_H_
+#define TENSORFLOW_LITE_MICRO_SPAN_H_
+
+#include <array>
+#include <cstddef>
+
+namespace tflite {
+
+// A poor man's std::span, we should consider using the Pigweed span instead.
+template <typename T>
+class Span {
+ public:
+  constexpr Span(T* data, size_t size) noexcept : data_(data), size_(size) {}
+
+  template <size_t N>
+  constexpr Span(T (&data)[N]) noexcept : data_(data), size_(N) {}
+
+  template <size_t N>
+  constexpr Span(std::array<T, N>& array) noexcept
+      : data_(array.data()), size_(N) {}
+
+  constexpr T& operator[](size_t idx) const noexcept { return *(data_ + idx); }
+
+  constexpr T* data() const noexcept { return data_; }
+  constexpr size_t size() const noexcept { return size_; }
+
+ private:
+  T* data_;
+  size_t size_;
+};
+
+template <typename A, typename B>
+bool operator==(const Span<A>& a, const Span<B>& b) {
+  if (a.size() != b.size()) {
+    return false;
+  }
+
+  for (size_t i = 0; i < a.size(); ++i) {
+    if (a[i] != b[i]) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+template <typename A, typename B>
+bool operator!=(const Span<A>& a, const Span<B>& b) {
+  return !(a == b);
+}
+
+}  // end namespace tflite
+
+#endif  // TENSORFLOW_LITE_MICRO_SPAN_H_
diff --git a/tensorflow/lite/micro/span_test.cc b/tensorflow/lite/micro/span_test.cc
new file mode 100644
index 0000000..ef906c6
--- /dev/null
+++ b/tensorflow/lite/micro/span_test.cc
@@ -0,0 +1,59 @@
+// Copyright 2024 The TensorFlow Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "tensorflow/lite/micro/span.h"
+
+#include <array>
+
+#include "tensorflow/lite/micro/testing/micro_test.h"
+
+TF_LITE_MICRO_TESTS_BEGIN
+
+TF_LITE_MICRO_TEST(TestArrayInitialization) {
+  int a[]{1, 2, 3};
+  tflite::Span<int> s{a};
+  TF_LITE_MICRO_EXPECT(s.data() == a);
+  TF_LITE_MICRO_EXPECT(s.size() == sizeof(a) / sizeof(int));
+}
+
+TF_LITE_MICRO_TEST(TestStdArrayInitialization) {
+  std::array<char, 20> a;
+  tflite::Span<char> s{a};
+  TF_LITE_MICRO_EXPECT(s.data() == a.data());
+  TF_LITE_MICRO_EXPECT(s.size() == a.size());
+}
+
+TF_LITE_MICRO_TEST(TestEquality) {
+  constexpr int a[]{1, 2, 3};
+  constexpr int b[]{1, 2, 3};
+  constexpr int c[]{3, 2, 1};
+  tflite::Span<const int> s_a{a};
+  tflite::Span<const int> s_b{b};
+  tflite::Span<const int> s_c{c};
+  TF_LITE_MICRO_EXPECT_TRUE(s_a == s_b);
+  TF_LITE_MICRO_EXPECT_FALSE(s_a == s_c);
+}
+
+TF_LITE_MICRO_TEST(TestInequality) {
+  constexpr int a[]{1, 2, 3};
+  constexpr int b[]{1, 2, 3};
+  constexpr int c[]{3, 2, 1};
+  tflite::Span<const int> s_a{a};
+  tflite::Span<const int> s_b{b};
+  tflite::Span<const int> s_c{c};
+  TF_LITE_MICRO_EXPECT_FALSE(s_a != s_b);
+  TF_LITE_MICRO_EXPECT_TRUE(s_a != s_c);
+}
+
+TF_LITE_MICRO_TESTS_END
diff --git a/tensorflow/lite/micro/static_vector.h b/tensorflow/lite/micro/static_vector.h
new file mode 100644
index 0000000..8b9e063
--- /dev/null
+++ b/tensorflow/lite/micro/static_vector.h
@@ -0,0 +1,83 @@
+// Copyright 2024 The TensorFlow Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TENSORFLOW_LITE_MICRO_STATIC_VECTOR_H_
+#define TENSORFLOW_LITE_MICRO_STATIC_VECTOR_H_
+
+#include <array>
+#include <cassert>
+#include <cstddef>
+
+#include "tensorflow/lite/kernels/op_macros.h"  // for TF_LITE_ASSERT
+
+namespace tflite {
+
+template <typename T, std::size_t MaxSize>
+class StaticVector {
+  // A staticlly-allocated vector. Add to the interface as needed.
+
+ private:
+  std::array<T, MaxSize> array_;
+  std::size_t size_{0};
+
+ public:
+  using iterator = typename decltype(array_)::iterator;
+  using const_iterator = typename decltype(array_)::const_iterator;
+  using pointer = typename decltype(array_)::pointer;
+  using reference = typename decltype(array_)::reference;
+  using const_reference = typename decltype(array_)::const_reference;
+
+  StaticVector() {}
+
+  StaticVector(std::initializer_list<T> values) {
+    for (const T& v : values) {
+      push_back(v);
+    }
+  }
+
+  static constexpr std::size_t max_size() { return MaxSize; }
+  std::size_t size() const { return size_; }
+  bool full() const { return size() == max_size(); }
+  iterator begin() { return array_.begin(); }
+  const_iterator begin() const { return array_.begin(); }
+  iterator end() { return begin() + size(); }
+  const_iterator end() const { return begin() + size(); }
+  pointer data() { return array_.data(); }
+  reference operator[](int i) { return array_[i]; }
+  const_reference operator[](int i) const { return array_[i]; }
+  void clear() { size_ = 0; }
+
+  template <std::size_t N>
+  bool operator==(const StaticVector<T, N>& other) const {
+    return std::equal(begin(), end(), other.begin(), other.end());
+  }
+
+  template <std::size_t N>
+  bool operator!=(const StaticVector<T, N>& other) const {
+    return !(*this == other);
+  }
+
+  void push_back(const T& t) {
+    TF_LITE_ASSERT(!full());
+    *end() = t;
+    ++size_;
+  }
+};
+
+template <typename T, typename... U>
+StaticVector(T, U...) -> StaticVector<T, 1 + sizeof...(U)>;
+
+}  // end namespace tflite
+
+#endif  // TENSORFLOW_LITE_MICRO_STATIC_VECTOR_H_
diff --git a/tensorflow/lite/micro/static_vector_test.cc b/tensorflow/lite/micro/static_vector_test.cc
new file mode 100644
index 0000000..6d601bc
--- /dev/null
+++ b/tensorflow/lite/micro/static_vector_test.cc
@@ -0,0 +1,82 @@
+// Copyright 2024 The TensorFlow Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "tensorflow/lite/micro/static_vector.h"
+
+#include "tensorflow/lite/micro/testing/micro_test.h"
+
+using tflite::StaticVector;
+
+TF_LITE_MICRO_TESTS_BEGIN
+
+TF_LITE_MICRO_TEST(StaticVectorPushBack) {
+  StaticVector<int, 4> a;
+  TF_LITE_MICRO_EXPECT(a.max_size() == 4);
+  TF_LITE_MICRO_EXPECT(a.size() == 0);
+
+  a.push_back(1);
+  TF_LITE_MICRO_EXPECT(a.size() == 1);
+  TF_LITE_MICRO_EXPECT(a[0] == 1);
+
+  a.push_back(2);
+  TF_LITE_MICRO_EXPECT(a.size() == 2);
+  TF_LITE_MICRO_EXPECT(a[1] == 2);
+
+  a.push_back(3);
+  TF_LITE_MICRO_EXPECT(a.size() == 3);
+  TF_LITE_MICRO_EXPECT(a[2] == 3);
+}
+
+TF_LITE_MICRO_TEST(StaticVectorInitializationPartial) {
+  const StaticVector<int, 4> a{1, 2, 3};
+  TF_LITE_MICRO_EXPECT(a.max_size() == 4);
+  TF_LITE_MICRO_EXPECT(a.size() == 3);
+  TF_LITE_MICRO_EXPECT(a[0] == 1);
+  TF_LITE_MICRO_EXPECT(a[1] == 2);
+  TF_LITE_MICRO_EXPECT(a[2] == 3);
+}
+
+TF_LITE_MICRO_TEST(StaticVectorInitializationFull) {
+  const StaticVector b{1, 2, 3};
+  TF_LITE_MICRO_EXPECT(b.max_size() == 3);
+  TF_LITE_MICRO_EXPECT(b.size() == 3);
+}
+
+TF_LITE_MICRO_TEST(StaticVectorEquality) {
+  const StaticVector a{1, 2, 3};
+  const StaticVector b{1, 2, 3};
+  TF_LITE_MICRO_EXPECT(a == b);
+  TF_LITE_MICRO_EXPECT(!(a != b));
+}
+
+TF_LITE_MICRO_TEST(StaticVectorInequality) {
+  const StaticVector a{1, 2, 3};
+  const StaticVector b{3, 2, 1};
+  TF_LITE_MICRO_EXPECT(a != b);
+  TF_LITE_MICRO_EXPECT(!(a == b));
+}
+
+TF_LITE_MICRO_TEST(StaticVectorSizeInequality) {
+  const StaticVector a{1, 2};
+  const StaticVector b{1, 2, 3};
+  TF_LITE_MICRO_EXPECT(a != b);
+}
+
+TF_LITE_MICRO_TEST(StaticVectorPartialSizeInequality) {
+  const StaticVector<int, 3> a{1, 2};
+  const StaticVector<int, 3> b{1, 2, 3};
+  TF_LITE_MICRO_EXPECT(a != b);
+}
+
+TF_LITE_MICRO_TESTS_END
diff --git a/tensorflow/lite/micro/test_helpers.cc b/tensorflow/lite/micro/test_helpers.cc
index 15d2382..3f0f5ec 100644
--- a/tensorflow/lite/micro/test_helpers.cc
+++ b/tensorflow/lite/micro/test_helpers.cc
@@ -1876,8 +1876,8 @@
 
 // Create a TfLiteIntArray from an array of ints.  The first element in the
 // supplied array must be the size of the array expressed as an int.
-TfLiteIntArray* IntArrayFromInts(int* int_array) {
-  return reinterpret_cast<TfLiteIntArray*>(int_array);
+TfLiteIntArray* IntArrayFromInts(const int* int_array) {
+  return reinterpret_cast<TfLiteIntArray*>(const_cast<int*>(int_array));
 }
 
 // Create a TfLiteFloatArray from an array of floats.  The first element in the
diff --git a/tensorflow/lite/micro/test_helpers.h b/tensorflow/lite/micro/test_helpers.h
index 578282e..6315b9f 100644
--- a/tensorflow/lite/micro/test_helpers.h
+++ b/tensorflow/lite/micro/test_helpers.h
@@ -1,4 +1,4 @@
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -17,6 +17,7 @@
 #define TENSORFLOW_LITE_MICRO_TEST_HELPERS_H_
 
 #include <algorithm>
+#include <cmath>
 #include <cstdint>
 #include <limits>
 #include <type_traits>
@@ -195,7 +196,7 @@
 
 // Create a TfLiteIntArray from an array of ints.  The first element in the
 // supplied array must be the size of the array expressed as an int.
-TfLiteIntArray* IntArrayFromInts(int* int_array);
+TfLiteIntArray* IntArrayFromInts(const int* int_array);
 
 // Create a TfLiteFloatArray from an array of floats.  The first element in the
 // supplied array must be the size of the array expressed as a float.
@@ -325,7 +326,7 @@
 template <typename T>
 inline int ZeroPointFromMinMax(const float min, const float max) {
   return static_cast<int>(std::numeric_limits<T>::min()) +
-         static_cast<int>(-min / ScaleFromMinMax<T>(min, max) + 0.5f);
+         static_cast<int>(roundf(-min / ScaleFromMinMax<T>(min, max)));
 }
 
 }  // namespace testing
diff --git a/tensorflow/lite/micro/testing/BUILD b/tensorflow/lite/micro/testing/BUILD
index 58914bc..7a24699 100644
--- a/tensorflow/lite/micro/testing/BUILD
+++ b/tensorflow/lite/micro/testing/BUILD
@@ -1,3 +1,4 @@
+load("@rules_python//python:defs.bzl", "py_binary", "py_library")
 load("@tflm_pip_deps//:requirements.bzl", "requirement")
 load(
     "//tensorflow:extra_rules.bzl",
@@ -32,7 +33,6 @@
     ],
     visibility = [
         ":kernel_test_friends",
-        ":microfrontend",
         ":tflite_micro",
     ],
     deps = [
@@ -77,7 +77,7 @@
     ],
     deps = [
         requirement("numpy"),
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
     ],
 )
 
@@ -93,6 +93,6 @@
     deps = [
         "@absl_py//absl:app",
         requirement("numpy"),
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
     ],
 )
diff --git a/tensorflow/lite/micro/testing/generate_test_models.py b/tensorflow/lite/micro/testing/generate_test_models.py
index 25902d0..2593653 100644
--- a/tensorflow/lite/micro/testing/generate_test_models.py
+++ b/tensorflow/lite/micro/testing/generate_test_models.py
@@ -71,6 +71,10 @@
   converter.inference_input_type = tf.int8
   converter.inference_output_type = tf.int8
   converter.representative_dataset = representative_dataset_gen
+  # TODO(b/324385802): Disable per channel quantization in FC layers (currently
+  # default behaviour) since it's not yet supported in TFLM.
+  converter._experimental_disable_per_channel_quantization_for_dense_layers = (  # pylint: disable=protected-access
+      True)
 
   tflite_model = converter.convert()
   if write_to_file:
diff --git a/tensorflow/lite/micro/testing/micro_test.h b/tensorflow/lite/micro/testing/micro_test.h
index 2e119e1..1e17531 100644
--- a/tensorflow/lite/micro/testing/micro_test.h
+++ b/tensorflow/lite/micro/testing/micro_test.h
@@ -1,4 +1,4 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -259,8 +259,16 @@
         MicroPrintf("FAIL: %s did not match %s", string1, string2, __FILE__, \
                     __LINE__);                                               \
         micro_test::did_test_fail = true;                                    \
+        break;                                                               \
       }                                                                      \
     }                                                                        \
   } while (false)
 
+#define TF_LITE_MICRO_CHECK_FAIL()   \
+  do {                               \
+    if (micro_test::did_test_fail) { \
+      return kTfLiteError;           \
+    }                                \
+  } while (false)
+
 #endif  // TENSORFLOW_LITE_MICRO_TESTING_MICRO_TEST_H_
diff --git a/tensorflow/lite/micro/testing/test_with_arm_corstone_300.sh b/tensorflow/lite/micro/testing/test_with_arm_corstone_300.sh
index 9b39ee4..27635ba 100755
--- a/tensorflow/lite/micro/testing/test_with_arm_corstone_300.sh
+++ b/tensorflow/lite/micro/testing/test_with_arm_corstone_300.sh
@@ -1,5 +1,5 @@
 #!/bin/bash -e
-# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -36,7 +36,8 @@
 FVP+="-C mps3_board.telnetterminal0.start_telnet=0 "
 FVP+='-C mps3_board.uart0.out_file="-" '
 FVP+='-C mps3_board.uart0.unbuffered_output=1 '
-FVP+='-C mps3_board.uart0.shutdown_on_eot=1'
+FVP+='-C mps3_board.uart0.shutdown_on_eot=1 '
+FVP+='--stat'
 ${FVP} ${BINARY_TO_TEST} | tee ${MICRO_LOG_FILENAME}
 
 if [[ ${2} != "non_test_binary" ]]
diff --git a/tensorflow/lite/micro/tflite_bridge/BUILD b/tensorflow/lite/micro/tflite_bridge/BUILD
index 518015a..ea5efdf 100644
--- a/tensorflow/lite/micro/tflite_bridge/BUILD
+++ b/tensorflow/lite/micro/tflite_bridge/BUILD
@@ -38,9 +38,6 @@
         "micro_error_reporter.h",
     ],
     copts = micro_copts(),
-    visibility = [
-        "//tensorflow/lite/micro/tflite_bridge:__pkg__",
-    ],
     deps = [
         "//tensorflow/lite/core/api:error_reporter",
         "//tensorflow/lite/micro:micro_compatibility",
diff --git a/tensorflow/lite/micro/tools/BUILD b/tensorflow/lite/micro/tools/BUILD
index 60e3c3d..e3c6f0c 100644
--- a/tensorflow/lite/micro/tools/BUILD
+++ b/tensorflow/lite/micro/tools/BUILD
@@ -1,3 +1,5 @@
+load("@rules_python//python:defs.bzl", "py_binary", "py_library", "py_test")
+load("@flatbuffers//:build_defs.bzl", "flatbuffer_cc_library", "flatbuffer_py_library")
 load("@tflm_pip_deps//:requirements.bzl", "requirement")
 load("@pybind11_bazel//:build_defs.bzl", "pybind_extension")
 load("//tensorflow:extra_rules.bzl", "tflm_application_friends")
@@ -21,8 +23,8 @@
     name = "generate_cc_arrays_lib",
     srcs = ["generate_cc_arrays.py"],
     deps = [
-        requirement("numpy"),
         requirement("pillow"),
+        requirement("numpy"),
     ],
 )
 
@@ -39,8 +41,8 @@
     name = "generate_cc_arrays",
     srcs = ["generate_cc_arrays.py"],
     deps = [
-        requirement("numpy"),
         requirement("pillow"),
+        requirement("numpy"),
     ],
 )
 
@@ -70,9 +72,9 @@
     ],
     deps = [
         ":requantize_flatbuffer",
-        "//python/tflite_micro:runtime",
         requirement("numpy"),
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
+        "//python/tflite_micro:runtime",
     ],
 )
 
@@ -131,11 +133,33 @@
     ],
     deps = [
         ":model_transforms_utils",
-        "//tensorflow/lite/micro/python/interpreter/src:runtime",
-        "//tensorflow/lite/tools:flatbuffer_utils",
         "@absl_py//absl/logging",
         requirement("numpy"),
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
+        "//python/tflite_micro:runtime",
+        "//tensorflow/lite/tools:flatbuffer_utils",
+    ],
+)
+
+cc_binary(
+    name = "layer_by_layer_output_tool",
+    srcs = ["layer_by_layer.cc"],
+    deps = [
+        ":layer_by_layer_schema",
+        "//tensorflow/lite/c:c_api_types",
+        "//tensorflow/lite/c:common",
+        "//tensorflow/lite/kernels:op_macros",
+        "//tensorflow/lite/micro:micro_allocator",
+        "//tensorflow/lite/micro:micro_context",
+        "//tensorflow/lite/micro:micro_framework",
+        "//tensorflow/lite/micro:micro_log",
+        "//tensorflow/lite/micro:micro_resource_variable",
+        "//tensorflow/lite/micro:micro_utils",
+        "//tensorflow/lite/micro:op_resolvers",
+        "//tensorflow/lite/micro/kernels:kernel_util",
+        "//tensorflow/lite/micro/tools/benchmarking:op_resolver",
+        "//tensorflow/lite/schema:schema_fbs",
+        "@flatbuffers",
     ],
 )
 
@@ -167,8 +191,34 @@
     ],
     deps = [
         ":tflm_model_transforms_lib",
-        "//tensorflow/lite/micro/examples/recipes:resource_variables_lib",
         "@absl_py//absl/testing:parameterized",
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
+        "//tensorflow/lite/micro/examples/recipes:resource_variables_lib",
     ],
 )
+
+py_binary(
+    name = "layer_by_layer_debugger",
+    srcs = ["layer_by_layer_debugger.py"],
+    python_version = "PY3",
+    srcs_version = "PY3",
+    deps = [
+        ":layer_by_layer_schema_py",
+        ":model_transforms_utils",
+        "@absl_py//absl:app",
+        "@absl_py//absl/flags",
+        requirement("tensorflow"),
+        "//python/tflite_micro:runtime",
+        "//tensorflow/lite/tools:flatbuffer_utils",
+    ],
+)
+
+flatbuffer_cc_library(
+    name = "layer_by_layer_schema",
+    srcs = ["layer_by_layer_schema.fbs"],
+)
+
+flatbuffer_py_library(
+    name = "layer_by_layer_schema_py",
+    srcs = ["layer_by_layer_schema.fbs"],
+)
diff --git a/tensorflow/lite/micro/tools/Makefile.inc b/tensorflow/lite/micro/tools/Makefile.inc
new file mode 100644
index 0000000..adbe73a
--- /dev/null
+++ b/tensorflow/lite/micro/tools/Makefile.inc
@@ -0,0 +1,12 @@
+MICROLITE_TOOL_ROOT_DIR := $(TENSORFLOW_ROOT)tensorflow/lite/micro/tools/
+
+LAYER_BY_LAYER_OUPUT_SRCS := \
+$(MICROLITE_TOOL_ROOT_DIR)/layer_by_layer.cc \
+
+LAYER_BY_LAYER_OUPUT_HDRS := \
+$(MICROLITE_TOOL_ROOT_DIR)benchmarking/op_resolver.h \
+
+ifneq ($(TARGET), bluepill cortex_m_corstone_300 riscv32_generic hexagon)
+    $(eval $(call microlite_test,layer_by_layer_output_tool,\
+    $(LAYER_BY_LAYER_OUPUT_SRCS),$(LAYER_BY_LAYER_OUPUT_HDRS),))
+endif
diff --git a/tensorflow/lite/micro/tools/benchmarking/BUILD b/tensorflow/lite/micro/tools/benchmarking/BUILD
index 9759546..6691ac3 100644
--- a/tensorflow/lite/micro/tools/benchmarking/BUILD
+++ b/tensorflow/lite/micro/tools/benchmarking/BUILD
@@ -1,6 +1,7 @@
 cc_library(
     name = "op_resolver",
     hdrs = ["op_resolver.h"],
+    visibility = ["//tensorflow/lite/micro/tools:__subpackages__"],
     deps = ["//tensorflow/lite/micro:op_resolvers"],
 )
 
@@ -9,31 +10,24 @@
     srcs = ["metrics.cc"],
     hdrs = ["metrics.h"],
     deps = [
-        ":log_utils",
+        "//tensorflow/lite/kernels/internal:compatibility",
+        "//tensorflow/lite/micro:micro_log",
         "//tensorflow/lite/micro:micro_profiler",
         "//tensorflow/lite/micro:recording_allocators",
-        "//tensorflow/lite/micro/arena_allocator:recording_simple_memory_allocator",
-    ],
-)
-
-cc_library(
-    name = "log_utils",
-    srcs = ["log_utils.cc"],
-    hdrs = ["log_utils.h"],
-    deps = [
-        "//tensorflow/lite/micro:micro_log",
     ],
 )
 
 cc_library(
     name = "generic_benchmark_lib",
     srcs = ["generic_model_benchmark.cc"],
+    hdrs = ["show_meta_data.h"],
+    defines = ["GENERIC_BENCHMARK_NO_META_DATA"],
     deps = [
-        ":log_utils",
         ":metrics",
         ":op_resolver",
         "//tensorflow/lite/c:c_api_types",
         "//tensorflow/lite/c:common",
+        "//tensorflow/lite/micro:micro_context",
         "//tensorflow/lite/micro:micro_log",
         "//tensorflow/lite/micro:micro_profiler",
         "//tensorflow/lite/micro:op_resolvers",
diff --git a/tensorflow/lite/micro/tools/benchmarking/Makefile.inc b/tensorflow/lite/micro/tools/benchmarking/Makefile.inc
index 178d88b..396e701 100644
--- a/tensorflow/lite/micro/tools/benchmarking/Makefile.inc
+++ b/tensorflow/lite/micro/tools/benchmarking/Makefile.inc
@@ -1,18 +1,64 @@
 MICROLITE_BENCHMARK_ROOT_DIR := $(TENSORFLOW_ROOT)tensorflow/lite/micro/tools/benchmarking
 
+ifneq ($(GENERIC_BENCHMARK_MODEL_PATH),)
+    GENERIC_BENCHMARK_MODEL_DIR := $(dir $(GENERIC_BENCHMARK_MODEL_PATH))
+    GENERIC_BENCHMARK_MODEL_NAME := $(notdir $(basename $(GENERIC_BENCHMARK_MODEL_PATH)))
+    CXXFLAGS += -DGENERIC_BENCHMARK_USING_BUILTIN_MODEL
+    CXXFLAGS += -DGENERIC_BENCHMARK_MODEL_HEADER_PATH=\"$(GENERIC_BENCHMARK_MODEL_DIR)$(GENERIC_BENCHMARK_MODEL_NAME)_model_data.h\"
+    CXXFLAGS += -DGENERIC_BENCHMARK_MODEL_NAME=$(GENERIC_BENCHMARK_MODEL_NAME)
+ifneq ($(GENERIC_BENCHMARK_ARENA_SIZE),)
+    CXXFLAGS += -DGENERIC_BENCHMARK_TENSOR_ARENA_SIZE=$(GENERIC_BENCHMARK_ARENA_SIZE)
+endif
+
+    # model path includes $(TENSORFLOW_ROOT) as part of the make invocation
+    GENERIC_BENCHMARK_GENERATOR_INPUTS := $(GENERIC_BENCHMARK_MODEL_PATH)
+
+    GENERIC_BENCHMARK_GENERATED_SRCS := \
+    $(GENERATED_SRCS_DIR)$(GENERIC_BENCHMARK_MODEL_DIR)$(GENERIC_BENCHMARK_MODEL_NAME)_model_data.cc
+
+    GENERIC_BENCHMARK_GENERATED_HDRS := \
+    $(GENERATED_SRCS_DIR)$(GENERIC_BENCHMARK_MODEL_DIR)$(GENERIC_BENCHMARK_MODEL_NAME)_model_data.h
+endif
+
 GENERIC_BENCHMARK_SRCS := \
 $(MICROLITE_BENCHMARK_ROOT_DIR)/generic_model_benchmark.cc \
-$(MICROLITE_BENCHMARK_ROOT_DIR)/log_utils.cc \
-$(MICROLITE_BENCHMARK_ROOT_DIR)/metrics.cc
+$(MICROLITE_BENCHMARK_ROOT_DIR)/metrics.cc \
+$(GENERATED_SRCS_DIR)$(MICROLITE_BENCHMARK_ROOT_DIR)/show_meta_data.cc
 
 GENERIC_BENCHMARK_HDRS := \
 $(MICROLITE_BENCHMARK_ROOT_DIR)/op_resolver.h \
-$(MICROLITE_BENCHMARK_ROOT_DIR)/log_utils.h \
-$(MICROLITE_BENCHMARK_ROOT_DIR)/metrics.h
+$(MICROLITE_BENCHMARK_ROOT_DIR)/metrics.h \
+$(MICROLITE_BENCHMARK_ROOT_DIR)/show_meta_data.h
+
+# always rebuild these to catch MODEL_PATH and ARENA_SIZE changes on command line
+.PHONY: $(GENERATED_SRCS_DIR)$(MICROLITE_BENCHMARK_ROOT_DIR)/show_meta_data.cc
+.PHONY: $(MICROLITE_BENCHMARK_ROOT_DIR)/generic_model_benchmark.cc
+
+$(GENERATED_SRCS_DIR)$(MICROLITE_BENCHMARK_ROOT_DIR)/show_meta_data.cc:
+	CC="$(CC)" \
+	CXX="$(CXX)" \
+	CC_FLAGS="$(CCFLAGS)" \
+	CXX_FLAGS="$(CXXFLAGS)" \
+	KERNEL_OPTIMIZATION="$(KERNEL_OPTIMIZATION_LEVEL)" \
+	CORE_OPTIMIZATION="$(CORE_OPTIMIZATION_LEVEL)" \
+	THIRD_PARTY_KERNEL_OPTIMIZATION="$(THIRD_PARTY_KERNEL_OPTIMIZATION_LEVEL)" \
+	TARGET=$(TARGET) \
+	TARGET_ARCH=$(TARGET_ARCH) \
+	TENSORFLOW_ROOT="$(TENSORFLOW_ROOT)" \
+	OPTIMIZED_KERNEL=$(OPTIMIZED_KERNEL_DIR) \
+	BUILD_TYPE=$(BUILD_TYPE) \
+	XTENSA_CORE=$(XTENSA_CORE) \
+	XTENSA_BASE=$(XTENSA_BASE) \
+	XTENSA_TOOLS_VERSION=$(XTENSA_TOOLS_VERSION) \
+	TEMPLATE_FILE="$(MICROLITE_BENCHMARK_ROOT_DIR)/show_meta_data.cc.template" \
+	GENERATED_FILE="$(GENERATED_SRCS_DIR)$(MICROLITE_BENCHMARK_ROOT_DIR)/show_meta_data.cc" \
+	MODEL_FILE="$(GENERIC_BENCHMARK_MODEL_PATH)" \
+	$(MICROLITE_BENCHMARK_ROOT_DIR)/collect_meta_data.sh
 
 ifneq ($(TARGET),bluepill)
-ifneq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), hifi5 hifimini))
+ifneq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), hifimini))
     $(eval $(call microlite_test,tflm_benchmark,\
-    $(GENERIC_BENCHMARK_SRCS),$(GENERIC_BENCHMARK_HDRS),))
+    $(GENERIC_BENCHMARK_SRCS),$(GENERIC_BENCHMARK_HDRS),\
+    $(GENERIC_BENCHMARK_GENERATOR_INPUTS)))
 endif
-endif
\ No newline at end of file
+endif
diff --git a/tensorflow/lite/micro/tools/benchmarking/README.md b/tensorflow/lite/micro/tools/benchmarking/README.md
new file mode 100644
index 0000000..c203820
--- /dev/null
+++ b/tensorflow/lite/micro/tools/benchmarking/README.md
@@ -0,0 +1,414 @@
+# Generic Benchmarking Tool build/run instructions
+This tool can be used to benchmark any TfLite format model.  The tool can be
+compiled in one of two ways:
+1. Such that it takes command line arguments, allowing the path to the model
+file to be specified as a program argument
+2. With a model compiled into the tool, allowing use in any simulator or on
+any hardware platform
+
+Building the tool with the model compiled in uses two additional Makefile
+variables:
+* `GENERIC_BENCHMARK_MODEL_PATH`: the path to the TfLite format model file.  This
+can be a relative or absolute path.  This variable is required.
+* `GENERIC_BENCHMARK_ARENA_SIZE`: the size of the TFLM interpreter arena, in bytes.
+This variable is optional.
+
+## Tested, working targets
+* x86
+* cortex_m_qemu (no timing data)
+* Xtensa (p6, hifi3)
+* cortex_m_corstone_300
+
+## Tested, non-working targets
+* none currently
+
+## Build and run for x86
+Build for command line arguments:
+```
+make -f tensorflow/lite/micro/tools/make/Makefile tflm_benchmark -j$(nproc)
+```
+Run with command line arguments:
+```
+gen/linux_x86_64_default/bin/tflm_benchmark tensorflow/lite/micro/models/person_detect.tflite
+```
+
+Build and run with model compiled into tool:
+```
+make -f tensorflow/lite/micro/tools/make/Makefile BUILD_TYPE=default run_tflm_benchmark -j$(nproc) GENERIC_BENCHMARK_MODEL_PATH=tensorflow/lite/micro/models/person_detect.tflite GENERIC_BENCHMARK_ARENA_SIZE=`expr 150 \* 1024`
+```
+
+## Build and run for Xtensa
+Build and run with model compiled into tool:
+```
+make -f tensorflow/lite/micro/tools/make/Makefile TARGET=xtensa TARGET_ARCH=vision_p6 OPTIMIZED_KERNEL_DIR=xtensa XTENSA_CORE=P6_200528 BUILD_TYPE=default run_tflm_benchmark -j$(nproc) GENERIC_BENCHMARK_MODEL_PATH=/tmp/keyword_scrambled.tflite GENERIC_BENCHMARK_ARENA_SIZE=`expr 50 \* 1024`
+```
+
+## Build and run for Cortex-M using Corstone 300 simulator
+Build and run with model compiled into tool:
+```
+make -f tensorflow/lite/micro/tools/make/Makefile   TARGET=cortex_m_corstone_300 TARGET_ARCH=cortex-m4   OPTIMIZED_KERNEL_DIR=cmsis_nn   BUILD_TYPE=default run_tflm_benchmark -j$(nproc) GENERIC_BENCHMARK_MODEL_PATH=tensorflow/lite/micro/models/person_detect.tflite GENERIC_BENCHMARK_ARENA_SIZE=`expr 150 \* 1024`
+```
+
+## Build and run using Bazel
+
+This is only for the x86 command line argument build, and does not contain meta-data:
+```
+bazel build tensorflow/lite/micro/tools/benchmarking:tflm_benchmark
+bazel-bin/tensorflow/lite/micro/tools/benchmarking/tflm_benchmark tensorflow/lite/micro/models/person_detect.tflite
+```
+
+## Example output with meta-data and built-in model layer information
+
+This sample output is for Cortex-M using Corstone 300:
+```
+Configured arena size = 153600
+
+--------------------
+Compiled on:
+
+Fri May 17 03:36:59 PM PDT 2024
+--------------------
+Git SHA: a4390a1d73edf5a8d3affa1da60e1eba88e0cb13
+
+Git status:
+
+On branch main
+Your branch is up to date with 'origin/main'.
+--------------------
+C compiler: tensorflow/lite/micro/tools/make/downloads/gcc_embedded/bin/arm-none-eabi-gcc
+Version:
+
+arm-none-eabi-gcc (Arm GNU Toolchain 13.2.rel1 (Build arm-13.7)) 13.2.1 20231009
+Copyright (C) 2023 Free Software Foundation, Inc.
+This is free software; see the source for copying conditions.  There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+Flags:
+
+-Wimplicit-function-declaration -std=c11 -Werror -fno-unwind-tables -ffunction-sections 
+-fdata-sections -fmessage-length=0 -DTF_LITE_STATIC_MEMORY -DTF_LITE_DISABLE_X86_NEON 
+-DCMSIS_NN -DKERNELS_OPTIMIZED_FOR_SPEED -mcpu=cortex-m4+nofp -mfpu=auto 
+-DTF_LITE_MCU_DEBUG_LOG -mthumb -mfloat-abi=soft -funsigned-char -mlittle-endian 
+-fomit-frame-pointer -MD -DARMCM4
+
+C++ compiler: tensorflow/lite/micro/tools/make/downloads/gcc_embedded/bin/arm-none-eabi-g++
+Version:
+
+arm-none-eabi-g++ (Arm GNU Toolchain 13.2.rel1 (Build arm-13.7)) 13.2.1 20231009
+Copyright (C) 2023 Free Software Foundation, Inc.
+This is free software; see the source for copying conditions.  There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+Flags:
+
+-std=c++11 -fno-rtti -fno-exceptions -fno-threadsafe-statics -Wnon-virtual-dtor -Werror 
+-fno-unwind-tables -ffunction-sections -fdata-sections -fmessage-length=0 
+-DTF_LITE_STATIC_MEMORY -DTF_LITE_DISABLE_X86_NEON -Wsign-compare -Wdouble-promotion 
+-Wunused-variable -Wunused-function -Wswitch -Wvla -Wall -Wextra 
+-Wmissing-field-initializers -Wstrict-aliasing -Wno-unused-parameter -DCMSIS_NN 
+-DKERNELS_OPTIMIZED_FOR_SPEED -mcpu=cortex-m4+nofp -mfpu=auto -DTF_LITE_MCU_DEBUG_LOG 
+-mthumb -mfloat-abi=soft -funsigned-char -mlittle-endian -fomit-frame-pointer -MD 
+-DARMCM4 -DCMSIS_DEVICE_ARM_CORTEX_M_XX_HEADER_FILE="ARMCM4.h" 
+-DGENERIC_BENCHMARK_USING_BUILTIN_MODEL 
+-DGENERIC_BENCHMARK_MODEL_HEADER_PATH="tensorflow/lite/micro/models/person_detect_model_da
+ta.h" -DGENERIC_BENCHMARK_MODEL_NAME=person_detect 
+-DGENERIC_BENCHMARK_TENSOR_ARENA_SIZE=153600
+
+Optimization: kernel= -O2  core= -Os  third-party-kernel= -O2
+--------------------
+Target information:
+
+TARGET=cortex_m_corstone_300
+TARGET_ARCH=cortex-m4
+OPTIMIZATION=cmsis_nn
+BUILD_TYPE=default
+--------------------
+NN library download URLs:
+
+http://github.com/ARM-software/CMSIS-NN/archive/01dee38e6d6bfbbf202f0cd425bbea1731747d51.z
+ip
+
+NN library MD5 checksums:
+
+f20be93ededf42bb704c19f699a24313
+--------------------
+Model SHA1:
+
+bcafcaa99d2eaf089f0ca25d66f56a2177e93f76
+
+Model analysis:
+
+=== tensorflow/lite/micro/models/person_detect.tflite ===
+Your TFLite model has '1' subgraph(s). In the subgraph description below,
+T# represents the Tensor numbers. For example, in Subgraph#0, the DEPTHWISE_CONV_2D op 
+takes
+tensor #88 and tensor #0 and tensor #33 as input and produces tensor #34 as output.
+Subgraph#0(T#88) -> [T#87]
+  Op#0 DEPTHWISE_CONV_2D(T#88, T#0, T#33[3774, -107, -84394, -13908, 20697, ...]) -> 
+[T#34]
+  Op#1 DEPTHWISE_CONV_2D(T#34, T#9, T#52[31132, 28, 273, -2692, 7409, ...]) -> [T#51]
+  Op#2 CONV_2D(T#51, T#10, T#53[10064, 1130, -13056, -30284, -23349, ...]) -> [T#54]
+  Op#3 DEPTHWISE_CONV_2D(T#54, T#11, T#56[306, -158, 19181, -364, 6237, ...]) -> [T#55]
+  Op#4 CONV_2D(T#55, T#12, T#57[-7649, 12287, -4433, 5851, -188, ...]) -> [T#58]
+  Op#5 DEPTHWISE_CONV_2D(T#58, T#13, T#60[7297, -498, 263, -1975, 2260, ...]) -> [T#59]
+  Op#6 CONV_2D(T#59, T#14, T#61[-4742, -4160, 6985, 8647, 29773, ...]) -> [T#62]
+  Op#7 DEPTHWISE_CONV_2D(T#62, T#15, T#64[28588, 363, 27592, 22294, -4344, ...]) -> [T#63]
+  Op#8 CONV_2D(T#63, T#16, T#65[12683, 36581, 6206, 1236, 15834, ...]) -> [T#66]
+  Op#9 DEPTHWISE_CONV_2D(T#66, T#17, T#68[-6353, 9090, -30, -1019, -496, ...]) -> [T#67]
+  Op#10 CONV_2D(T#67, T#18, T#69[3895, -6563, -8843, -2066, -1372, ...]) -> [T#70]
+  Op#11 DEPTHWISE_CONV_2D(T#70, T#19, T#72[20437, -365, -2518, 20827, -904, ...]) -> 
+[T#71]
+  Op#12 CONV_2D(T#71, T#20, T#73[-10120, 9768, 3524, 3796, 6896, ...]) -> [T#74]
+  Op#13 DEPTHWISE_CONV_2D(T#74, T#21, T#76[-3969, -1910, -2425, -114, 4456, ...]) -> 
+[T#75]
+  Op#14 CONV_2D(T#75, T#22, T#77[-13202, 13929, -4357, 19492, 1971, ...]) -> [T#78]
+  Op#15 DEPTHWISE_CONV_2D(T#78, T#23, T#80[-6169, -10, -2788, 14420, -7457, ...]) -> 
+[T#79]
+  Op#16 CONV_2D(T#79, T#24, T#81[155, -3073, 291, -902, -9942, ...]) -> [T#82]
+  Op#17 DEPTHWISE_CONV_2D(T#82, T#25, T#84[-2063, 10755, -12037, -6417, 2147, ...]) -> 
+[T#83]
+  Op#18 CONV_2D(T#83, T#26, T#85[-1872, -7549, 13994, 3191, -614, ...]) -> [T#86]
+  Op#19 DEPTHWISE_CONV_2D(T#86, T#1, T#36[-6485, 294, 686, -6011, -5196, ...]) -> [T#35]
+  Op#20 CONV_2D(T#35, T#2, T#37[7116, 8066, 11755, 11674, 9983, ...]) -> [T#38]
+  Op#21 DEPTHWISE_CONV_2D(T#38, T#3, T#40[7735, 5235, 4334, -6485, 9397, ...]) -> [T#39]
+  Op#22 CONV_2D(T#39, T#4, T#41[2947, 10152, -7865, -554, -13760, ...]) -> [T#42]
+  Op#23 DEPTHWISE_CONV_2D(T#42, T#5, T#44[-4755, 7899, -488, -2954, 2990, ...]) -> [T#43]
+  Op#24 CONV_2D(T#43, T#6, T#45[-6269, -22458, 13332, -16368, 4435, ...]) -> [T#46]
+  Op#25 DEPTHWISE_CONV_2D(T#46, T#7, T#48[333, -4743, -310, -2471, 4804, ...]) -> [T#47]
+  Op#26 CONV_2D(T#47, T#8, T#49[6677, -3593, 3754, 26316, -4761, ...]) -> [T#50]
+  Op#27 AVERAGE_POOL_2D(T#50) -> [T#27]
+  Op#28 CONV_2D(T#27, T#30, T#29[16267, -17079]) -> [T#28]
+  Op#29 RESHAPE(T#28, T#32[1, 2]) -> [T#31]
+  Op#30 SOFTMAX(T#31) -> [T#87]
+Tensors of Subgraph#0
+  T#0(MobilenetV1/Conv2d_0/weights/read) shape:[1, 3, 3, 8], type:INT8 RO 72 bytes, 
+buffer: 68, data:[., y, ., g, ., ...]
+  T#1(MobilenetV1/Conv2d_10_depthwise/depthwise_weights/read) shape:[1, 3, 3, 128], 
+type:INT8 RO 1152 bytes, buffer: 72, data:[W, ., d, ., ., ...]
+  T#2(MobilenetV1/Conv2d_10_pointwise/weights/read) shape:[128, 1, 1, 128], type:INT8 RO 
+16384 bytes, buffer: 14, data:[., ., 
+, ., ., ...]
+  T#3(MobilenetV1/Conv2d_11_depthwise/depthwise_weights/read) shape:[1, 3, 3, 128], 
+type:INT8 RO 1152 bytes, buffer: 13, data:[., `, ., :, ., ...]
+  T#4(MobilenetV1/Conv2d_11_pointwise/weights/read) shape:[128, 1, 1, 128], type:INT8 RO 
+16384 bytes, buffer: 12, data:[., ., ., ., ., ...]
+  T#5(MobilenetV1/Conv2d_12_depthwise/depthwise_weights/read) shape:[1, 3, 3, 128], 
+type:INT8 RO 1152 bytes, buffer: 10, data:[z, ., ., ?, ., ...]
+  T#6(MobilenetV1/Conv2d_12_pointwise/weights/read) shape:[256, 1, 1, 128], type:INT8 RO 
+32768 bytes, buffer: 69, data:[/, ., ., ., #, ...]
+  T#7(MobilenetV1/Conv2d_13_depthwise/depthwise_weights/read) shape:[1, 3, 3, 256], 
+type:INT8 RO 2304 bytes, buffer: 7, data:[., ., w, ., ., ...]
+  T#8(MobilenetV1/Conv2d_13_pointwise/weights/read) shape:[256, 1, 1, 256], type:INT8 RO 
+65536 bytes, buffer: 5, data:[&, ., ., ., ., ...]
+  T#9(MobilenetV1/Conv2d_1_depthwise/depthwise_weights/read) shape:[1, 3, 3, 8], 
+type:INT8 RO 72 bytes, buffer: 60, data:[., ., ., ., ., ...]
+  T#10(MobilenetV1/Conv2d_1_pointwise/weights/read) shape:[16, 1, 1, 8], type:INT8 RO 128 
+bytes, buffer: 63, data:[., ., ., ., ., ...]
+  T#11(MobilenetV1/Conv2d_2_depthwise/depthwise_weights/read) shape:[1, 3, 3, 16], 
+type:INT8 RO 144 bytes, buffer: 58, data:[O, *, ., !, ., ...]
+  T#12(MobilenetV1/Conv2d_2_pointwise/weights/read) shape:[32, 1, 1, 16], type:INT8 RO 
+512 bytes, buffer: 61, data:[., 4, ., ., 8, ...]
+  T#13(MobilenetV1/Conv2d_3_depthwise/depthwise_weights/read) shape:[1, 3, 3, 32], 
+type:INT8 RO 288 bytes, buffer: 35, data:[., 1, ;, M, ., ...]
+  T#14(MobilenetV1/Conv2d_3_pointwise/weights/read) shape:[32, 1, 1, 32], type:INT8 RO 
+1024 bytes, buffer: 33, data:[., ., ., ., ., ...]
+  T#15(MobilenetV1/Conv2d_4_depthwise/depthwise_weights/read) shape:[1, 3, 3, 32], 
+type:INT8 RO 288 bytes, buffer: 32, data:[., ;, ., ., ., ...]
+  T#16(MobilenetV1/Conv2d_4_pointwise/weights/read) shape:[64, 1, 1, 32], type:INT8 RO 
+2048 bytes, buffer: 30, data:[., ., ., 5, ., ...]
+  T#17(MobilenetV1/Conv2d_5_depthwise/depthwise_weights/read) shape:[1, 3, 3, 64], 
+type:INT8 RO 576 bytes, buffer: 77, data:[G, ., ., ., ., ...]
+  T#18(MobilenetV1/Conv2d_5_pointwise/weights/read) shape:[64, 1, 1, 64], type:INT8 RO 
+4096 bytes, buffer: 28, data:[., 2, ., $, ., ...]
+  T#19(MobilenetV1/Conv2d_6_depthwise/depthwise_weights/read) shape:[1, 3, 3, 64], 
+type:INT8 RO 576 bytes, buffer: 27, data:[., 1, z, ., U, ...]
+  T#20(MobilenetV1/Conv2d_6_pointwise/weights/read) shape:[128, 1, 1, 64], type:INT8 RO 
+8192 bytes, buffer: 25, data:[5, ., ., ., V, ...]
+  T#21(MobilenetV1/Conv2d_7_depthwise/depthwise_weights/read) shape:[1, 3, 3, 128], 
+type:INT8 RO 1152 bytes, buffer: 23, data:[., ., ., ., ., ...]
+  T#22(MobilenetV1/Conv2d_7_pointwise/weights/read) shape:[128, 1, 1, 128], type:INT8 RO 
+16384 bytes, buffer: 21, data:[., ., ., ., ., ...]
+  T#23(MobilenetV1/Conv2d_8_depthwise/depthwise_weights/read) shape:[1, 3, 3, 128], 
+type:INT8 RO 1152 bytes, buffer: 71, data:[., ., ., ., Q, ...]
+  T#24(MobilenetV1/Conv2d_8_pointwise/weights/read) shape:[128, 1, 1, 128], type:INT8 RO 
+16384 bytes, buffer: 20, data:[@, ., 2, ., 8, ...]
+  T#25(MobilenetV1/Conv2d_9_depthwise/depthwise_weights/read) shape:[1, 3, 3, 128], 
+type:INT8 RO 1152 bytes, buffer: 80, data:[^, ., ~, ., ., ...]
+  T#26(MobilenetV1/Conv2d_9_pointwise/weights/read) shape:[128, 1, 1, 128], type:INT8 RO 
+16384 bytes, buffer: 16, data:[., .,  , ., %, ...]
+  T#27(MobilenetV1/Logits/AvgPool_1a/AvgPool) shape:[1, 1, 1, 256], type:INT8
+  T#28(MobilenetV1/Logits/Conv2d_1c_1x1/BiasAdd) shape:[1, 1, 1, 2], type:INT8
+  T#29(MobilenetV1/Logits/Conv2d_1c_1x1/Conv2D_bias) shape:[2], type:INT32 RO 8 bytes, 
+buffer: 2, data:[16267, -17079]
+  T#30(MobilenetV1/Logits/Conv2d_1c_1x1/weights/read) shape:[2, 1, 1, 256], type:INT8 RO 
+512 bytes, buffer: 3, data:[., %, ., ., ., ...]
+  T#31(MobilenetV1/Logits/SpatialSqueeze) shape:[1, 2], type:INT8
+  T#32(MobilenetV1/Logits/SpatialSqueeze_shape) shape:[2], type:INT32 RO 8 bytes, buffer: 
+1, data:[1, 2]
+  T#33(MobilenetV1/MobilenetV1/Conv2d_0/Conv2D_bias) shape:[8], type:INT32 RO 32 bytes, 
+buffer: 82, data:[3774, -107, -84394, -13908, 20697, ...]
+  T#34(MobilenetV1/MobilenetV1/Conv2d_0/Relu6) shape:[1, 48, 48, 8], type:INT8
+  T#35(MobilenetV1/MobilenetV1/Conv2d_10_depthwise/Relu6) shape:[1, 6, 6, 128], type:INT8
+  T#36(MobilenetV1/MobilenetV1/Conv2d_10_depthwise/depthwise_bias) shape:[128], 
+type:INT32 RO 512 bytes, buffer: 22, data:[-6485, 294, 686, -6011, -5196, ...]
+  T#37(MobilenetV1/MobilenetV1/Conv2d_10_pointwise/Conv2D_bias) shape:[128], type:INT32 
+RO 512 bytes, buffer: 70, data:[7116, 8066, 11755, 11674, 9983, ...]
+  T#38(MobilenetV1/MobilenetV1/Conv2d_10_pointwise/Relu6) shape:[1, 6, 6, 128], type:INT8
+  T#39(MobilenetV1/MobilenetV1/Conv2d_11_depthwise/Relu6) shape:[1, 6, 6, 128], type:INT8
+  T#40(MobilenetV1/MobilenetV1/Conv2d_11_depthwise/depthwise_bias) shape:[128], 
+type:INT32 RO 512 bytes, buffer: 19, data:[7735, 5235, 4334, -6485, 9397, ...]
+  T#41(MobilenetV1/MobilenetV1/Conv2d_11_pointwise/Conv2D_bias) shape:[128], type:INT32 
+RO 512 bytes, buffer: 11, data:[2947, 10152, -7865, -554, -13760, ...]
+  T#42(MobilenetV1/MobilenetV1/Conv2d_11_pointwise/Relu6) shape:[1, 6, 6, 128], type:INT8
+  T#43(MobilenetV1/MobilenetV1/Conv2d_12_depthwise/Relu6) shape:[1, 3, 3, 128], type:INT8
+  T#44(MobilenetV1/MobilenetV1/Conv2d_12_depthwise/depthwise_bias) shape:[128], 
+type:INT32 RO 512 bytes, buffer: 9, data:[-4755, 7899, -488, -2954, 2990, ...]
+  T#45(MobilenetV1/MobilenetV1/Conv2d_12_pointwise/Conv2D_bias) shape:[256], type:INT32 
+RO 1024 bytes, buffer: 8, data:[-6269, -22458, 13332, -16368, 4435, ...]
+  T#46(MobilenetV1/MobilenetV1/Conv2d_12_pointwise/Relu6) shape:[1, 3, 3, 256], type:INT8
+  T#47(MobilenetV1/MobilenetV1/Conv2d_13_depthwise/Relu6) shape:[1, 3, 3, 256], type:INT8
+  T#48(MobilenetV1/MobilenetV1/Conv2d_13_depthwise/depthwise_bias) shape:[256], 
+type:INT32 RO 1024 bytes, buffer: 6, data:[333, -4743, -310, -2471, 4804, ...]
+  T#49(MobilenetV1/MobilenetV1/Conv2d_13_pointwise/Conv2D_bias) shape:[256], type:INT32 
+RO 1024 bytes, buffer: 4, data:[6677, -3593, 3754, 26316, -4761, ...]
+  T#50(MobilenetV1/MobilenetV1/Conv2d_13_pointwise/Relu6) shape:[1, 3, 3, 256], type:INT8
+  T#51(MobilenetV1/MobilenetV1/Conv2d_1_depthwise/Relu6) shape:[1, 48, 48, 8], type:INT8
+  T#52(MobilenetV1/MobilenetV1/Conv2d_1_depthwise/depthwise_bias) shape:[8], type:INT32 
+RO 32 bytes, buffer: 56, data:[31132, 28, 273, -2692, 7409, ...]
+  T#53(MobilenetV1/MobilenetV1/Conv2d_1_pointwise/Conv2D_bias) shape:[16], type:INT32 RO 
+64 bytes, buffer: 36, data:[10064, 1130, -13056, -30284, -23349, ...]
+  T#54(MobilenetV1/MobilenetV1/Conv2d_1_pointwise/Relu6) shape:[1, 48, 48, 16], type:INT8
+  T#55(MobilenetV1/MobilenetV1/Conv2d_2_depthwise/Relu6) shape:[1, 24, 24, 16], type:INT8
+  T#56(MobilenetV1/MobilenetV1/Conv2d_2_depthwise/depthwise_bias) shape:[16], type:INT32 
+RO 64 bytes, buffer: 48, data:[306, -158, 19181, -364, 6237, ...]
+  T#57(MobilenetV1/MobilenetV1/Conv2d_2_pointwise/Conv2D_bias) shape:[32], type:INT32 RO 
+128 bytes, buffer: 62, data:[-7649, 12287, -4433, 5851, -188, ...]
+  T#58(MobilenetV1/MobilenetV1/Conv2d_2_pointwise/Relu6) shape:[1, 24, 24, 32], type:INT8
+  T#59(MobilenetV1/MobilenetV1/Conv2d_3_depthwise/Relu6) shape:[1, 24, 24, 32], type:INT8
+  T#60(MobilenetV1/MobilenetV1/Conv2d_3_depthwise/depthwise_bias) shape:[32], type:INT32 
+RO 128 bytes, buffer: 34, data:[7297, -498, 263, -1975, 2260, ...]
+  T#61(MobilenetV1/MobilenetV1/Conv2d_3_pointwise/Conv2D_bias) shape:[32], type:INT32 RO 
+128 bytes, buffer: 59, data:[-4742, -4160, 6985, 8647, 29773, ...]
+  T#62(MobilenetV1/MobilenetV1/Conv2d_3_pointwise/Relu6) shape:[1, 24, 24, 32], type:INT8
+  T#63(MobilenetV1/MobilenetV1/Conv2d_4_depthwise/Relu6) shape:[1, 12, 12, 32], type:INT8
+  T#64(MobilenetV1/MobilenetV1/Conv2d_4_depthwise/depthwise_bias) shape:[32], type:INT32 
+RO 128 bytes, buffer: 31, data:[28588, 363, 27592, 22294, -4344, ...]
+  T#65(MobilenetV1/MobilenetV1/Conv2d_4_pointwise/Conv2D_bias) shape:[64], type:INT32 RO 
+256 bytes, buffer: 76, data:[12683, 36581, 6206, 1236, 15834, ...]
+  T#66(MobilenetV1/MobilenetV1/Conv2d_4_pointwise/Relu6) shape:[1, 12, 12, 64], type:INT8
+  T#67(MobilenetV1/MobilenetV1/Conv2d_5_depthwise/Relu6) shape:[1, 12, 12, 64], type:INT8
+  T#68(MobilenetV1/MobilenetV1/Conv2d_5_depthwise/depthwise_bias) shape:[64], type:INT32 
+RO 256 bytes, buffer: 29, data:[-6353, 9090, -30, -1019, -496, ...]
+  T#69(MobilenetV1/MobilenetV1/Conv2d_5_pointwise/Conv2D_bias) shape:[64], type:INT32 RO 
+256 bytes, buffer: 84, data:[3895, -6563, -8843, -2066, -1372, ...]
+  T#70(MobilenetV1/MobilenetV1/Conv2d_5_pointwise/Relu6) shape:[1, 12, 12, 64], type:INT8
+  T#71(MobilenetV1/MobilenetV1/Conv2d_6_depthwise/Relu6) shape:[1, 6, 6, 64], type:INT8
+  T#72(MobilenetV1/MobilenetV1/Conv2d_6_depthwise/depthwise_bias) shape:[64], type:INT32 
+RO 256 bytes, buffer: 26, data:[20437, -365, -2518, 20827, -904, ...]
+  T#73(MobilenetV1/MobilenetV1/Conv2d_6_pointwise/Conv2D_bias) shape:[128], type:INT32 RO 
+512 bytes, buffer: 24, data:[-10120, 9768, 3524, 3796, 6896, ...]
+  T#74(MobilenetV1/MobilenetV1/Conv2d_6_pointwise/Relu6) shape:[1, 6, 6, 128], type:INT8
+  T#75(MobilenetV1/MobilenetV1/Conv2d_7_depthwise/Relu6) shape:[1, 6, 6, 128], type:INT8
+  T#76(MobilenetV1/MobilenetV1/Conv2d_7_depthwise/depthwise_bias) shape:[128], type:INT32 
+RO 512 bytes, buffer: 78, data:[-3969, -1910, -2425, -114, 4456, ...]
+  T#77(MobilenetV1/MobilenetV1/Conv2d_7_pointwise/Conv2D_bias) shape:[128], type:INT32 RO 
+512 bytes, buffer: 83, data:[-13202, 13929, -4357, 19492, 1971, ...]
+  T#78(MobilenetV1/MobilenetV1/Conv2d_7_pointwise/Relu6) shape:[1, 6, 6, 128], type:INT8
+  T#79(MobilenetV1/MobilenetV1/Conv2d_8_depthwise/Relu6) shape:[1, 6, 6, 128], type:INT8
+  T#80(MobilenetV1/MobilenetV1/Conv2d_8_depthwise/depthwise_bias) shape:[128], type:INT32 
+RO 512 bytes, buffer: 55, data:[-6169, -10, -2788, 14420, -7457, ...]
+  T#81(MobilenetV1/MobilenetV1/Conv2d_8_pointwise/Conv2D_bias) shape:[128], type:INT32 RO 
+512 bytes, buffer: 18, data:[155, -3073, 291, -902, -9942, ...]
+  T#82(MobilenetV1/MobilenetV1/Conv2d_8_pointwise/Relu6) shape:[1, 6, 6, 128], type:INT8
+  T#83(MobilenetV1/MobilenetV1/Conv2d_9_depthwise/Relu6) shape:[1, 6, 6, 128], type:INT8
+  T#84(MobilenetV1/MobilenetV1/Conv2d_9_depthwise/depthwise_bias) shape:[128], type:INT32 
+RO 512 bytes, buffer: 17, data:[-2063, 10755, -12037, -6417, 2147, ...]
+  T#85(MobilenetV1/MobilenetV1/Conv2d_9_pointwise/Conv2D_bias) shape:[128], type:INT32 RO 
+512 bytes, buffer: 15, data:[-1872, -7549, 13994, 3191, -614, ...]
+  T#86(MobilenetV1/MobilenetV1/Conv2d_9_pointwise/Relu6) shape:[1, 6, 6, 128], type:INT8
+  T#87(MobilenetV1/Predictions/Reshape_1) shape:[1, 2], type:INT8
+  T#88(input) shape:[1, 96, 96, 1], type:INT8
+---------------------------------------------------------------
+              Model size:     300568 bytes
+    Non-data buffer size:      81640 bytes (27.16 %)
+  Total data buffer size:     218928 bytes (72.84 %)
+    (Zero value buffers):          0 bytes (00.00 %)
+* Buffers of TFLite model are mostly used for constant tensors.
+  And zero value buffers are buffers filled with zeros.
+  Non-data buffers area are used to store operators, subgraphs and etc.
+  You can find more details from 
+https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/schema/schema.fbs
+--------------------
+TfliteGetModel took 4 ticks (0 ms).
+
+DEPTHWISE_CONV_2D took 224622 ticks (8 ms).
+DEPTHWISE_CONV_2D took 175917 ticks (7 ms).
+CONV_2D took 249560 ticks (9 ms).
+DEPTHWISE_CONV_2D took 84958 ticks (3 ms).
+CONV_2D took 145817 ticks (5 ms).
+DEPTHWISE_CONV_2D took 164915 ticks (6 ms).
+CONV_2D took 197283 ticks (7 ms).
+DEPTHWISE_CONV_2D took 41304 ticks (1 ms).
+CONV_2D took 99472 ticks (3 ms).
+DEPTHWISE_CONV_2D took 79969 ticks (3 ms).
+CONV_2D took 151505 ticks (6 ms).
+DEPTHWISE_CONV_2D took 20053 ticks (0 ms).
+CONV_2D took 78521 ticks (3 ms).
+DEPTHWISE_CONV_2D took 38127 ticks (1 ms).
+CONV_2D took 132862 ticks (5 ms).
+DEPTHWISE_CONV_2D took 38127 ticks (1 ms).
+CONV_2D took 132865 ticks (5 ms).
+DEPTHWISE_CONV_2D took 38127 ticks (1 ms).
+CONV_2D took 132859 ticks (5 ms).
+DEPTHWISE_CONV_2D took 38127 ticks (1 ms).
+CONV_2D took 132851 ticks (5 ms).
+DEPTHWISE_CONV_2D took 38127 ticks (1 ms).
+CONV_2D took 132853 ticks (5 ms).
+DEPTHWISE_CONV_2D took 9585 ticks (0 ms).
+CONV_2D took 78470 ticks (3 ms).
+DEPTHWISE_CONV_2D took 17473 ticks (0 ms).
+CONV_2D took 143615 ticks (5 ms).
+AVERAGE_POOL_2D took 2229 ticks (0 ms).
+CONV_2D took 386 ticks (0 ms).
+RESHAPE took 28 ticks (0 ms).
+SOFTMAX took 163 ticks (0 ms).
+
+"Unique Tag","Total ticks across all events with that tag."
+DEPTHWISE_CONV_2D, 1009431
+CONV_2D, 1808919
+AVERAGE_POOL_2D, 2229
+RESHAPE, 28
+SOFTMAX, 163
+"total number of ticks", 2820770
+
+[[ Table ]]: Arena
+        Arena   Bytes   % Arena
+        Total | 84436 |   100.00
+NonPersistent | 55296 |    65.49
+   Persistent | 29140 |    34.51
+
+[[ Table ]]: Allocations
+                  Allocation   Id    Used   Requested   Count   % Memory
+            Eval tensor data |  0 |  1068 |      1068 |    89 |      1.26
+      Persistent tensor data |  1 |    64 |        64 |     2 |      0.08
+Persistent quantization data |  2 |    40 |        40 |     4 |      0.05
+      Persistent buffer data |  3 | 25872 |     25704 |    90 |     30.64
+ Tensor variable buffer data |  4 |     0 |         0 |     0 |      0.00
+ Node and registration array |  5 |   992 |       992 |    31 |      1.17
+              Operation data |  6 |     0 |         0 |     0 |      0.00
+
+Application exit code: 0.
+
+Info: /OSCI/SystemC: Simulation stopped by user.
+[warning ][main@0][01 ns] Simulation stopped by user
+
+--- FVP_MPS3_Corstone_SSE_300 statistics: -------------------------------------
+Simulated time                          : 2.879993s
+User time                               : 2.027100s
+System time                             : 0.135914s
+Wall time                               : 2.663214s
+Performance index                       : 1.08
+cpu0                                    :  27.03 MIPS (    71999848 Inst)
+Memory highwater mark                   : 0x11919000 bytes ( 0.275 GB )
+-------------------------------------------------------------------------------
+```
diff --git a/tensorflow/lite/micro/tools/benchmarking/analyze_model.py b/tensorflow/lite/micro/tools/benchmarking/analyze_model.py
new file mode 100644
index 0000000..f2ff013
--- /dev/null
+++ b/tensorflow/lite/micro/tools/benchmarking/analyze_model.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python3
+# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+from absl import app
+from absl import flags
+
+import tensorflow as tf
+
+_MODEL_PATH = flags.DEFINE_string(
+    name='model_file',
+    default='',
+    help='path for the .tflite model file.',
+)
+
+
+def _main(_):
+  """outputs model analysis to stdout/stderr"""
+  tf.lite.experimental.Analyzer.analyze(model_path=_MODEL_PATH.value)
+
+
+if __name__ == '__main__':
+  app.run(_main)
diff --git a/tensorflow/lite/micro/tools/benchmarking/collect_meta_data.sh b/tensorflow/lite/micro/tools/benchmarking/collect_meta_data.sh
new file mode 100755
index 0000000..c60bdf3
--- /dev/null
+++ b/tensorflow/lite/micro/tools/benchmarking/collect_meta_data.sh
@@ -0,0 +1,177 @@
+#!/usr/bin/env bash
+# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+#
+# Collect generic benchmark meta data and insert resulting strings into
+# the file designated by TEMPLATE_FILE.
+#
+# Takes no arguments.
+#
+# Uses the following environment variables:
+# TEMPLATE_FILE - path to the template source file
+# GENERATED_FILE - path to the generated source file with substituted strings
+# TENSORFLOW_ROOT - path to the root of the source tree
+# MODEL_FILE - path to the .tflite model file
+# CC - path to C compiler
+# CXX - path to C++ compiler
+# CC_FLAGS - C compiler flags
+# CXX_FLAGS - C++ compiler flags
+# KERNEL_OPTIMIZATION - kernel optimization flags
+# CORE_OPTIMIZATION - core optimization flags
+# THIRD_PARTY_KERNEL_OPTIMIZATION - third pary kernel optimization flags
+# TARGET - target platform (xtensa, cortex_m_corstone_300, etc.)
+# TARGET_ARCH - target architecture (hifi5, cortex-m0, etc.)
+# OPTIMIZED_KERNEL - optimized kernel (xtensa, cmsis_nn, etc.)
+# BUILD_TYPE - type of build (default, release, etc.)
+# XTENSA_CORE - Xtensa core specification
+# XTENSA_BASE - Xtensa base install directory
+# XTENSA_TOOLS_VERSION - Xtensa tooling version
+
+
+set -e
+
+source ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/ci_build/helper_functions.sh
+
+function substitute_strings() {
+  search="// %%%_$1_%%%"
+  lines=$(fold -w 90 -s <<< "$2")
+  SAVED_IFS=${IFS}
+  IFS=$'\n' lines_array=( ${lines} )
+  IFS=${SAVED_IFS}
+  replacement=()
+  for line in "${lines_array[@]}"; do
+    line=$(sed -e 's/"/\\"/g' <<< "${line}")
+    line=$(printf '"%s",\n    ' "${line}")
+    replacement+=( "${line}" )
+  done
+
+  tempfile=$(mktemp)
+
+  SEARCH_PATTERN="$search" REPLACEMENT_PATTERN="${replacement[@]}" awk '
+    BEGIN {
+        search = ENVIRON["SEARCH_PATTERN"]
+        replacement = ENVIRON["REPLACEMENT_PATTERN"]
+    }
+    s = index($0,search) {
+        $0 = substr($0,1,s-1) replacement substr($0,s+length(search))
+    }
+    { print }
+  ' "${GENERATED_FILE}" > ${tempfile}
+  mv ${tempfile} "${GENERATED_FILE}"
+}
+
+mkdir -p $(dirname ${GENERATED_FILE})
+cp -p ${TEMPLATE_FILE} ${GENERATED_FILE}
+
+# model analysis and SHA1
+if [[ ${MODEL_FILE} ]]; then
+  python3 -m pip install absl-py tensorflow
+  result=$(python3 \
+    "${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/benchmarking/analyze_model.py" \
+    --model_file="${MODEL_FILE}" \
+    )
+  substitute_strings model_analysis_strings "${result}"
+
+  result=$(shasum -b "${MODEL_FILE}" | cut -f 1 -d ' ')
+  substitute_strings model_sha1_strings "${result}"
+fi
+
+# compile date
+result=$(date)
+substitute_strings compilation_date_strings "${result}"
+
+GIT_TENSORFLOW_ROOT="${TENSORFLOW_ROOT:-./}"
+set +e
+# Git repo commit information
+result=$(cd ${GIT_TENSORFLOW_ROOT} && git rev-parse --verify HEAD)
+if [[ $? != 0 ]]; then
+  result="<git commit information not available>"
+fi
+substitute_strings git_commit_strings "${result}"
+
+# Git repo status information
+result=$(cd ${GIT_TENSORFLOW_ROOT} && git status)
+if [[ $? != 0 ]]; then
+  result="<git status information not available>"
+fi
+substitute_strings git_status_strings "${result}"
+set -e
+
+# Compiler information
+result="${CC}"
+substitute_strings cc_name_strings "${result}"
+result=$("${CC}" --version)
+substitute_strings cc_version_strings "${result}"
+result="${CC_FLAGS}"
+substitute_strings cc_flags_strings "${result}"
+
+result="${CXX}"
+substitute_strings cxx_name_strings "${result}"
+result=$("${CXX}" --version)
+substitute_strings cxx_version_strings "${result}"
+result="${CXX_FLAGS}"
+substitute_strings cxx_flags_strings "${result}"
+
+result="kernel= ${KERNEL_OPTIMIZATION}"
+result+="  core= ${CORE_OPTIMIZATION}"
+result+="  third-party-kernel= ${THIRD_PARTY_KERNEL_OPTIMIZATION}"
+substitute_strings optimization_flag_strings "${result}"
+
+# Target information
+TARGET="${TARGET:-linux}"
+TARGET_ARCH="${TARGET_ARCH:-x86}"
+OPTIMIZED_KERNEL="${OPTIMIZED_KERNEL:-none}"
+BUILD_TYPE="${BUILD_TYPE:-default}"
+result=$(printf 'TARGET=%s\nTARGET_ARCH=%s\nOPTIMIZATION=%s\nBUILD_TYPE=%s\n' \
+  "${TARGET}" \
+  "${TARGET_ARCH}" \
+  "${OPTIMIZED_KERNEL}" \
+  "${BUILD_TYPE}" \
+)
+if [[ ${XTENSA_CORE} ]]; then
+  result+=$(printf '\nXTENSA_CORE=%s' "${XTENSA_CORE}")
+  result+=$(printf '\nXTENSA_BASE=%s' "${XTENSA_BASE}")
+  result+=$(printf '\nXTENSA_TOOLS_VERSION=%s' "${XTENSA_TOOLS_VERSION}")
+fi
+substitute_strings target_info_strings "${result}"
+
+download_scripts=()
+download_script_args=( "--no-downloads" )
+if [[ ${OPTIMIZED_KERNEL} == "cmsis_nn" ]]; then
+  download_scripts+=( "${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/ext_libs/cmsis_nn_download.sh" )
+  download_script_args+=( "${TENSORFLOW_ROOT}" )
+elif [[ ${OPTIMIZED_KERNEL} == "xtensa" ]]; then
+  download_script_args+=( "${TARGET_ARCH}" "${TENSORFLOW_ROOT}" )
+  if [[ ${TARGET_ARCH} =~ ^(vision_p6)$ ]]; then
+    download_scripts+=( "${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/ext_libs/xtensa_download.sh" )
+  elif [[ ${TARGET_ARCH} =~ ^(hifi3|hifi4|hifi5)$ ]]; then
+    download_scripts+=( "${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/ext_libs/xtensa_download.sh" )
+    download_scripts+=( "${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/ext_libs/xtensa_ndsp_download.sh" )
+  fi
+fi
+
+if [[ ${#download_scripts[@]} -gt 0 ]]; then
+  results_url=
+  results_md5=
+  for script in "${download_scripts[@]}"; do
+    results=$("${script}" "${download_script_args[@]}" 2>&1)
+    url=$(sed -rn 's/^LIBRARY_URL=(.*)$/\1/p' <<< "${results}")
+    results_url+=$(printf '\n%s' "${url}")
+    md5=$(sed -rn 's/^LIBRARY_MD5=(.*)$/\1/p' <<< "${results}")
+    results_md5+=$(printf '\n%s' "${md5}")
+  done
+  substitute_strings nn_library_url_strings "${results_url}"
+  substitute_strings nn_library_md5_strings "${results_md5}"
+fi
diff --git a/tensorflow/lite/micro/tools/benchmarking/generic_model_benchmark.cc b/tensorflow/lite/micro/tools/benchmarking/generic_model_benchmark.cc
index c1db8fa..f398963 100644
--- a/tensorflow/lite/micro/tools/benchmarking/generic_model_benchmark.cc
+++ b/tensorflow/lite/micro/tools/benchmarking/generic_model_benchmark.cc
@@ -1,4 +1,4 @@
-/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -17,11 +17,14 @@
 #include <sys/stat.h>
 #include <sys/types.h>
 
+#include <cstring>
 #include <memory>
 #include <random>
+#include <type_traits>
 
 #include "tensorflow/lite/c/c_api_types.h"
 #include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/micro/micro_context.h"
 #include "tensorflow/lite/micro/micro_log.h"
 #include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
 #include "tensorflow/lite/micro/micro_op_resolver.h"
@@ -29,11 +32,30 @@
 #include "tensorflow/lite/micro/recording_micro_allocator.h"
 #include "tensorflow/lite/micro/recording_micro_interpreter.h"
 #include "tensorflow/lite/micro/system_setup.h"
-#include "tensorflow/lite/micro/tools/benchmarking/log_utils.h"
 #include "tensorflow/lite/micro/tools/benchmarking/metrics.h"
 #include "tensorflow/lite/micro/tools/benchmarking/op_resolver.h"
+#include "tensorflow/lite/micro/tools/benchmarking/show_meta_data.h"
 #include "tensorflow/lite/schema/schema_generated.h"
 
+#if defined(GENERIC_BENCHMARK_USING_BUILTIN_MODEL)
+#if !defined(GENERIC_BENCHMARK_MODEL_HEADER_PATH)
+#error "GENERIC_BENCHMARK_MODEL_HEADER_PATH missing from CXXFLAGS"
+#endif  // !defined(GENERIC_BENCHMARK_MODEL_HEADER_PATH)
+#if !defined(GENERIC_BENCHMARK_MODEL_NAME)
+#error "GENERIC_BENCHMARK_MODEL_NAME missing from CXXFLAGS"
+#endif  // !defined(GENERIC_BENCHMARK_MODEL_NAME)
+
+#include GENERIC_BENCHMARK_MODEL_HEADER_PATH
+
+#define __MODEL_DATA(x) g_##x##_model_data
+#define _MODEL_DATA(x) __MODEL_DATA(x)
+#define MODEL_DATA _MODEL_DATA(GENERIC_BENCHMARK_MODEL_NAME)
+#define __MODEL_SIZE(x) g_##x##_model_data_size
+#define _MODEL_SIZE(x) __MODEL_SIZE(x)
+#define MODEL_SIZE _MODEL_SIZE(GENERIC_BENCHMARK_MODEL_NAME)
+
+#endif  // defind(GENERIC_BENCHMARK_USING_BUILTIN_MODEL)
+
 /*
  * Generic model benchmark.  Evaluates runtime performance of a provided model
  * with random inputs.
@@ -45,20 +67,20 @@
 
 using Profiler = ::tflite::MicroProfiler;
 
-using TflmOpResolver = tflite::MicroMutableOpResolver<96>;
-
-constexpr int kTfLiteAbort = -9;
-
 // Seed used for the random input. Input data shouldn't affect invocation timing
 // so randomness isn't really needed.
 constexpr uint32_t kRandomSeed = 0xFB;
 
-// Which format should be used to output debug information.
-constexpr PrettyPrintType kPrintType = PrettyPrintType::kTable;
+#if !defined(GENERIC_BENCHMARK_USING_BUILTIN_MODEL)
+constexpr size_t kTensorArenaSize = 3e6;
+constexpr size_t kModelSize = 2e6;
+#elif defined(GENERIC_BENCHMARK_TENSOR_ARENA_SIZE)
+constexpr size_t kTensorArenaSize = GENERIC_BENCHMARK_TENSOR_ARENA_SIZE;
+#else
+constexpr size_t kTensorArenaSize = 5e6 - MODEL_SIZE;
+#endif  // !defined(GENERIC_BENCHMARK_USING_BUILTIN_MODEL)
 
-constexpr size_t kTensorArenaSize = 1024 * 1024;
 constexpr int kNumResourceVariable = 100;
-constexpr size_t kModelSize = 511408;
 
 void SetRandomInput(const uint32_t random_seed,
                     tflite::MicroInterpreter& interpreter) {
@@ -76,8 +98,14 @@
   }
 }
 
+#if !defined(GENERIC_BENCHMARK_USING_BUILTIN_MODEL)
+
+struct FileCloser {
+  void operator()(FILE* file) { fclose(file); }
+};
+
 bool ReadFile(const char* file_name, void* buffer, size_t buffer_size) {
-  std::unique_ptr<FILE, decltype(&fclose)> file(fopen(file_name, "rb"), fclose);
+  std::unique_ptr<FILE, FileCloser> file(fopen(file_name, "rb"));
 
   const size_t bytes_read =
       fread(buffer, sizeof(char), buffer_size, file.get());
@@ -86,7 +114,11 @@
     return false;
   }
   if (!feof(file.get())) {
-    MicroPrintf("Model buffer is too small for the model.\n");
+    // Note that http://b/297592546 can mean that this error message is
+    // confusing.
+    MicroPrintf(
+        "Model buffer (%d bytes) is too small for the model (%d bytes).\n",
+        buffer_size, bytes_read);
     return false;
   }
   if (bytes_read == 0) {
@@ -96,17 +128,14 @@
 
   return true;
 }
+#endif  // !defined(GENERIC_BENCHMARK_USING_BUILTIN_MODEL)
 
-int Benchmark(const char* model_file_name) {
+int Benchmark(const uint8_t* model_data, tflite::PrettyPrintType print_type) {
   Profiler profiler;
   alignas(16) static uint8_t tensor_arena[kTensorArenaSize];
-  alignas(16) unsigned char model_file_content[kModelSize];
 
-  if (!ReadFile(model_file_name, model_file_content, kModelSize)) {
-    return -1;
-  }
   uint32_t event_handle = profiler.BeginEvent("TfliteGetModel");
-  const tflite::Model* model = tflite::GetModel(model_file_content);
+  const tflite::Model* model = tflite::GetModel(model_data);
   profiler.EndEvent(event_handle);
 
   TflmOpResolver op_resolver;
@@ -150,11 +179,50 @@
     }
   }
 
-  LogAllocatorEvents(*allocator, kPrintType);
+  LogAllocatorEvents(*allocator, print_type);
 
   return 0;
 }
 }  // namespace
 }  // namespace tflite
 
-int main(int argc, char** argv) { return tflite::Benchmark(argv[1]); }
+#if !defined(GENERIC_BENCHMARK_USING_BUILTIN_MODEL)
+void usage(const char* prog_name) {
+  MicroPrintf("usage: %s filename [--csv]", prog_name);
+}
+#endif  // !defined(GENERIC_BENCHMARK_USING_BUILTIN_MODEL)
+
+int main(int argc, char** argv) {
+  // Which format should be used to output debug information.
+  tflite::PrettyPrintType print_type = tflite::PrettyPrintType::kTable;
+  tflite::InitializeTarget();
+
+#if !defined(GENERIC_BENCHMARK_USING_BUILTIN_MODEL)
+  if (argc < 2 || argc > 3) {
+    usage(argv[0]);
+    return -1;
+  }
+  const char* model_filename = argv[1];
+
+  if (argc == 3) {
+    if (std::strcmp(argv[2], "--csv") == 0) {
+      print_type = tflite::PrettyPrintType::kCsv;
+    } else {
+      usage(argv[0]);
+      return -1;
+    }
+  }
+
+  alignas(16) static uint8_t model_data[tflite::kModelSize];
+
+  if (!tflite::ReadFile(model_filename, model_data, tflite::kModelSize)) {
+    return -1;
+  }
+#else
+  const uint8_t* model_data = MODEL_DATA;
+#endif  // !defined(GENERIC_BENCHMARK_USING_BUILTIN_MODEL)
+
+  MicroPrintf("\nConfigured arena size = %d\n", tflite::kTensorArenaSize);
+  tflite::GenericBenchmarkShowMetaData();
+  return tflite::Benchmark(model_data, print_type);
+}
diff --git a/tensorflow/lite/micro/tools/benchmarking/log_utils.cc b/tensorflow/lite/micro/tools/benchmarking/log_utils.cc
deleted file mode 100644
index 808e465..0000000
--- a/tensorflow/lite/micro/tools/benchmarking/log_utils.cc
+++ /dev/null
@@ -1,115 +0,0 @@
-/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/lite/micro/tools/benchmarking/log_utils.h"
-
-#include <inttypes.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-namespace tflite {
-
-int GetLongestStringLength(const char strings[][kMaxStringLength],
-                           const int count) {
-  int max_length = 0;
-
-  for (int i = 0; i < count; ++i) {
-    int size = strlen(strings[i]);
-    if (size > max_length) {
-      max_length = size;
-    }
-  }
-
-  return max_length;
-}
-
-void FillColumnPadding(char* string, const int size, const int max_size,
-                       const int padding) {
-  FillString(string, max_size - size + padding, kMaxStringLength);
-}
-
-void FillString(char* string, const int size, const int buffer_size,
-                const char value) {
-  if (buffer_size <= (static_cast<int>(strlen(string)))) {
-    for (int i = 0; i < buffer_size; ++i) {
-      string[i] = (i < size) ? value : 0;
-    }
-  }
-}
-
-void MicroStrcat(char* output, const char* input, const int size) {
-  if (size < 0) {
-    strcat(output, input);  // NOLINT: strcat required due to no dynamic memory.
-  } else {
-    strncat(output, input, size);
-  }
-}
-
-void MicroStrcpy(char* output, const char* input) {
-  strcpy(output, input);  // NOLINT: strcpy required due to no dynamic memory.
-}
-
-void FormatIntegerDivide(char* output, const int64_t numerator,
-                         const int64_t denominator, const int decimal_places) {
-  int64_t multiplier = 1;
-  for (int i = 0; i < decimal_places; ++i) {
-    multiplier *= 10;
-  }
-
-  const int64_t total = numerator * multiplier / denominator;
-  const int whole = static_cast<int>(total / multiplier);
-  const int fractional = static_cast<int>(total % multiplier);
-  sprintf(output, "%d.%d", whole, fractional);  // NOLINT: sprintf is required.
-}
-
-void FormatAsPercentage(char* output, const int64_t numerator,
-                        const int64_t denominator, const int decimal_places) {
-  FormatIntegerDivide(output, numerator * 100, denominator, decimal_places);
-}
-
-void PrettyPrintTableHeader(PrettyPrintType type, const char* table_name) {
-  switch (type) {
-    case PrettyPrintType::kCsv:
-      MicroPrintf("[[ CSV ]]: %s", table_name);
-      break;
-    case PrettyPrintType::kTable:
-      MicroPrintf("[[ TABLE ]]: %s", table_name);
-  }
-}
-
-template <>
-void FormatNumber<int32_t>(char* output, int32_t value) {
-  sprintf(output, "%" PRId32, value);  // NOLINT: sprintf required.
-}
-
-template <>
-void FormatNumber<size_t>(char* output, size_t value) {
-  sprintf(output, "%zu", value);  // NOLINT: sprintf required.
-}
-
-template <>
-void FormatNumber<float>(char* output, float value) {
-  constexpr int64_t kDenominator = 1000;
-  FormatIntegerDivide(output, static_cast<int64_t>(value * kDenominator),
-                      kDenominator, 3);
-}
-
-template <>
-void FormatNumber<double>(char* output, double value) {
-  constexpr int64_t kDenominator = 1000;
-  FormatIntegerDivide(output, static_cast<int64_t>(value * kDenominator),
-                      kDenominator, 3);
-}
-}  // namespace tflite
diff --git a/tensorflow/lite/micro/tools/benchmarking/log_utils.h b/tensorflow/lite/micro/tools/benchmarking/log_utils.h
deleted file mode 100644
index ee73f48..0000000
--- a/tensorflow/lite/micro/tools/benchmarking/log_utils.h
+++ /dev/null
@@ -1,273 +0,0 @@
-/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef TFLM_BENCHMARK_INTERNAL_LOG_UTILS_H_
-#define TFLM_BENCHMARK_INTERNAL_LOG_UTILS_H_
-
-#include <algorithm>
-#include <cstdarg>
-#include <cstdint>
-#include <cstring>
-
-#include "tensorflow/lite/micro/micro_log.h"
-
-namespace tflite {
-
-// The maxmimum length of a string.
-static constexpr int kMaxStringLength = 32;
-
-// The maximum length of a table row, applies to the header as well.
-static constexpr int kMaxRowLength = 100;
-
-// The default padding between columns in a table.
-static constexpr int kDefaultColumnPadding = 4;
-
-// Defines how formatted data is printed to stdout.
-enum class PrettyPrintType {
-  // Prints as a CSV file.
-  kCsv,
-  // Prints as a formatted table.
-  kTable,
-};
-
-// Returns the length of the longest string in an array.
-// Args:
-// - strings: An array of strings.
-// - count: The number of strings in the array.
-int GetLongestStringLength(const char strings[][kMaxStringLength], int count);
-
-// Adds padding between two columns in a table.
-// ex) "hello" is being inserted into a column. The largest value in that column
-//     is 10, and there's a global padding of 4 spaces. Therefore, 9 spaces (10
-//     - 5 + 4) are added as padding.
-// Args:
-// - string: The input padding string.
-// - size: The size of the string that's being inserted into a column.
-// - max_size: The size of the largest string in the column.
-// - padding: The amount of padding to add to each column regardless of its
-//     size.
-void FillColumnPadding(char* string, int size, int max_size,
-                       int padding = kDefaultColumnPadding);
-
-// Fills a string with a specified value.
-// Args:
-// - string: The input string. This is filled in with the specified value.
-// - size: The size of the string after being filled in. This must be less than
-//     the allocated space for the string.
-// - buffer_size: The size of the string's buffer.
-// - value: The value to insert into the string. Defaults to a space.
-void FillString(char* string, int size, int buffer_size, char value = ' ');
-
-// Concatenates the input string onto the first.
-// Args:
-// - output: The destination string for where to append input.
-// - input: The input string to concatenate.
-// - size: The number of characters to concatenate from the first string. If
-//     negative, the whole input string will be concatenated.
-void MicroStrcat(char* output, const char* input, int size = -1);
-
-// Copies the input string into the output.
-void MicroStrcpy(char* output, const char* input);
-
-// Formats a division operation to have a specified number of decimal places.
-// Args:
-// - output: The output string to be formatted.
-// - numerator: The numerator in the division operation.
-// - denominator: The denominator in the division operation.
-// - decimal places: The number of decimal places to print to.
-void FormatIntegerDivide(char* output, int64_t numerator, int64_t denominator,
-                         int decimal_places);
-
-// Formats a division operation as a percentage.
-// Args:
-// - output: The output string to be formatted.
-// - numerator: The numerator in the division operation.
-// - denominator: The denominator in the division operation.
-// - decimal places: The number of decimal places to print to.
-void FormatAsPercentage(char* output, int64_t numerator, int64_t denominator,
-                        int decimal_places);
-
-void PrettyPrintTableHeader(PrettyPrintType type, const char* table_name);
-
-// Formats a number as a string.
-// Args:
-// - output: The location of where to write the formatted number.
-// - value: The value to write to a string.
-template <typename T>
-void FormatNumber(char* output, T value);
-
-// Pretty prints a table to stdout.
-// Note: kMaxRows and kColumns should describe the allocated size of the table,
-//       not the amount of data that is populated. It is required that all
-//       columns are filled out, but not all rows.
-//
-// ex) PrintTable<3, 25>(headers, data, 4);
-//     This will print a table with 3 columns and 4 rows. In this example, it
-//     is required that data is defined as char[3][25][kMaxStringLength] to
-//     properly print.
-//
-// op        cycles    cpu %
-// -------------------------
-// foo     | 1000     | 10
-// bar     | 2500     | 25
-// baz     | 1000     | 10
-// lorem   | 2000     | 20
-//
-// Args:
-// - headers: A 1D array of strings containing the headers of the table. This
-//     must be equal in size to kColumns.
-// - data: A 2D array of string data organized in [columns, rows]. As stated
-//     above, it is required that all columns are populated, but not all rows.
-// - rows: The number of populated rows in `data`.
-template <int kMaxRows, int kColumns>
-void PrintTable(const char headers[kColumns][kMaxStringLength],
-                const char data[kColumns][kMaxRows][kMaxStringLength],
-                const int rows) {
-  // Get the maximum width for each column in the table.
-  int max_column_width[kColumns];
-  for (int i = 0; i < kColumns; ++i) {
-    max_column_width[i] = std::max(GetLongestStringLength(data[i], rows),
-                                   static_cast<int>(strlen(headers[i])));
-  }
-
-  // Add padding between each item in the header so it can be printed on one
-  // line.
-  char header_spaces[kColumns][kMaxStringLength];
-  for (int i = 0; i < kColumns; ++i) {
-    FillColumnPadding(header_spaces[i], strlen(headers[i]), max_column_width[i],
-                      kDefaultColumnPadding + 2);
-  }
-
-  // Print the header.
-  char header[kMaxRowLength];
-  memset(header, 0, kMaxRowLength);
-  for (int i = 0; i < kColumns; ++i) {
-    MicroStrcat(header, headers[i]);
-    MicroStrcat(header, header_spaces[i]);
-  }
-  MicroPrintf("%s", header);
-
-  // Print a separator to separate the header from the data.
-  char separator[kMaxRowLength];
-  FillString(separator, strlen(header) - 1, kMaxRowLength, '-');
-  MicroPrintf("%s", separator);
-
-  for (int i = 0; i < rows; ++i) {
-    char spaces[kColumns][kMaxStringLength];
-    for (int j = 0; j < kColumns; ++j) {
-      FillColumnPadding(spaces[j], strlen(data[j][i]), max_column_width[j]);
-    }
-
-    char row[kMaxRowLength];
-    memset(row, 0, kMaxRowLength);
-
-    // Concatenate each column in a row with the format "[data][padding]| "
-    for (int j = 0; j < kColumns; ++j) {
-      MicroStrcat(row, data[j][i]);
-      MicroStrcat(row, spaces[j]);
-      MicroStrcat(row, "| ");
-    }
-
-    MicroPrintf("%s", row);
-  }
-
-  MicroPrintf(separator);
-  MicroPrintf("");
-}
-
-// Pretty prints a csv to stdout.
-// Note: kMaxRows and kColumns should describe the allocated size of the table,
-//       not the amount of data that is populated. It is required that all
-//       columns are filled out, but not all rows.
-//
-// ex)
-// op,cycles,%cpu
-// foo,1000,10
-// bar,2500,25
-// baz,1000,10
-//
-// Args:
-// - headers: A 1D array of strings containing the headers of the table. This
-//     must be equal in size to kColumns.
-// - data: A 2D array of string data organized in [columns, rows]. As stated
-//     above, it is required that all columns are populated, but not all rows.
-// - rows: The number of populated rows in `data`.
-template <int kMaxRows, int kColumns>
-void PrintCsv(const char headers[kColumns][kMaxStringLength],
-              const char data[kColumns][kMaxRows][kMaxStringLength],
-              const int rows) {
-  char header[kMaxRowLength];
-  memset(header, 0, kMaxRowLength);
-  for (int i = 0; i < kColumns; ++i) {
-    MicroStrcat(header, headers[i]);
-    if (i < kColumns - 1) {
-      MicroStrcat(header, ",");
-    }
-  }
-
-  MicroPrintf("%s", header);
-
-  char row[kMaxRowLength];
-  for (int i = 0; i < rows; ++i) {
-    memset(row, 0, kMaxRowLength);
-    for (int j = 0; j < kColumns; ++j) {
-      MicroStrcat(row, data[j][i]);
-      if (j < kColumns - 1) {
-        MicroStrcat(row, ",");
-      }
-    }
-
-    MicroPrintf("%s", row);
-  }
-
-  MicroPrintf("");  // Serves as a new line.
-}
-
-// Prints a 2D array of strings in a formatted manner along with a table name
-// that includes the table type.
-//
-// Note: kMaxRows and kColumns should describe the allocated size of the table,
-//       not the amount of data that is populated. It is required that all
-//       columns are filled out, but not all rows.
-//
-// ex) PrettyPrint::kCsv will print a csv with a [[ CSV ]]: table_name header.
-//
-// Args:
-// - headers: A 1D array of strings containing the headers of the table. This
-//     must be equal in size to kColumns.
-// - data: A 2D array of string data organized in [columns, rows]. As stated
-//     above, it is required that all columns are populated, but not all rows.
-// - rows: The number of populated rows in `data`.
-// - type: The format type that should be used to pretty print.
-// - table_name: The name of the table to be printed alongside the format type.
-template <int kMaxRows, int kColumns>
-void PrintFormattedData(const char headers[kColumns][kMaxStringLength],
-                        const char data[kColumns][kMaxRows][kMaxStringLength],
-                        const int rows, const PrettyPrintType type,
-                        const char* table_name) {
-  PrettyPrintTableHeader(type, table_name);
-  switch (type) {
-    case PrettyPrintType::kCsv:
-      PrintCsv<kMaxRows, kColumns>(headers, data, rows);
-      break;
-    case PrettyPrintType::kTable:
-      PrintTable<kMaxRows, kColumns>(headers, data, rows);
-      break;
-  }
-}
-
-}  // namespace tflite
-
-#endif  // TFLM_BENCHMARK_INTERNAL_LOG_UTILS_H_
diff --git a/tensorflow/lite/micro/tools/benchmarking/metrics.cc b/tensorflow/lite/micro/tools/benchmarking/metrics.cc
index 115d879..3a4bf7e 100644
--- a/tensorflow/lite/micro/tools/benchmarking/metrics.cc
+++ b/tensorflow/lite/micro/tools/benchmarking/metrics.cc
@@ -15,44 +15,70 @@
 
 #include "tensorflow/lite/micro/tools/benchmarking/metrics.h"
 
-#include <sys/types.h>
-
 #include <cstddef>
+#include <cstring>
+#include <iterator>
+#include <type_traits>
+#include <utility>
 
-#include "tensorflow/lite/micro/tools/benchmarking/log_utils.h"
+#include "tensorflow/lite/kernels/internal/compatibility.h"
+#include "tensorflow/lite/micro/micro_log.h"
 
 namespace tflite {
 
-void LogArenaAllocations(
+namespace {
+
+struct LogArenaRecord {
+  const char* title;
+  int allocations;
+  float percentage;
+};
+
+struct LogAllocationRecord {
+  const char* title;
+  int type;
+  int used_bytes;
+  int requested_bytes;
+  int count;
+  float percentage;
+};
+
+constexpr int kArenaRows = 3;
+constexpr int kArenaColumns = 3;
+
+constexpr int kAllocationTypes = 7;
+constexpr int kAllocationColumns = 6;
+
+constexpr int kMaxBufSize = 100;
+
+LogArenaRecord GetLogArenaRecord(
     const tflite::RecordingSingleArenaBufferAllocator* allocator,
-    const PrettyPrintType type) {
-  constexpr int kArenaRows = 3;
-  constexpr int kArenaCols = 3;
+    int row_index) {
+  TFLITE_DCHECK(row_index < kArenaRows);
 
   const size_t total_bytes = allocator->GetUsedBytes();
+  const size_t allocations[] = {total_bytes,
+                                allocator->GetNonPersistentUsedBytes(),
+                                allocator->GetPersistentUsedBytes()};
+  static_assert(std::extent<decltype(allocations)>::value == kArenaRows,
+                "kArenaRows mismatch");
+  const char* titles[] = {"Total", "NonPersistent", "Persistent"};
+  static_assert(std::extent<decltype(titles)>::value == kArenaRows,
+                "kArenaRows mismatch");
 
-  size_t allocations[kArenaRows] = {total_bytes,
-                                    allocator->GetNonPersistentUsedBytes(),
-                                    allocator->GetPersistentUsedBytes()};
-  char titles[kArenaRows][kMaxStringLength] = {"Total", "Head", "Tail"};
-  char headers[kArenaRows][kMaxStringLength] = {"Arena", "Bytes", "% Arena"};
+  LogArenaRecord record = {};
+  record.title = titles[row_index];
+  record.allocations = allocations[row_index];
+  record.percentage = record.allocations * 100.0f / total_bytes;
 
-  char data[kArenaCols][kArenaRows][kMaxStringLength];
-  for (int i = 0; i < kArenaRows; ++i) {
-    MicroStrcpy(data[0][i], titles[i]);
-    FormatNumber<int32_t>(data[1][i], allocations[i]);
-    FormatAsPercentage(data[2][i], static_cast<int64_t>(allocations[i]),
-                       static_cast<int64_t>(total_bytes), 2);
-  }
-
-  PrintFormattedData<kArenaRows, kArenaCols>(headers, data, kArenaRows, type,
-                                             "Arena");
+  return record;
 }
 
-void LogAllocations(const tflite::RecordingMicroAllocator& allocator,
-                    const PrettyPrintType type) {
-  constexpr int kAllocationTypes = 7;
-  tflite::RecordedAllocationType types[kAllocationTypes] = {
+LogAllocationRecord GetLogAllocationRecord(
+    const tflite::RecordingMicroAllocator& allocator, int row_index) {
+  TFLITE_DCHECK(row_index < kAllocationTypes);
+
+  const tflite::RecordedAllocationType types[] = {
       tflite::RecordedAllocationType::kTfLiteEvalTensorData,
       tflite::RecordedAllocationType::kPersistentTfLiteTensorData,
       tflite::RecordedAllocationType::kPersistentTfLiteTensorQuantizationData,
@@ -60,38 +86,227 @@
       tflite::RecordedAllocationType::kTfLiteTensorVariableBufferData,
       tflite::RecordedAllocationType::kNodeAndRegistrationArray,
       tflite::RecordedAllocationType::kOpData};
-
-  char titles[kAllocationTypes][kMaxStringLength] = {
-      "Eval tensor data",
-      "Persistent tensor data",
-      "Persistent quantization data",
-      "Persistent buffer data",
-      "Tensor variable buffer data",
-      "Node and registration array",
-      "Operation data"};
-
-  constexpr int kColumns = 6;
-  const char headers[kColumns][kMaxStringLength] = {
-      "Allocation", "Id", "Used", "Requested", "Count", "% Memory"};
-
+  static_assert(std::extent<decltype(types)>::value == kAllocationTypes,
+                "kAllocationTypes mismatch");
+  const char* titles[] = {"Eval tensor data",
+                          "Persistent tensor data",
+                          "Persistent quantization data",
+                          "Persistent buffer data",
+                          "Tensor variable buffer data",
+                          "Node and registration array",
+                          "Operation data"};
+  static_assert(std::extent<decltype(titles)>::value == kAllocationTypes,
+                "kAllocationTypes mismatch");
   const size_t total_bytes =
       allocator.GetSimpleMemoryAllocator()->GetUsedBytes();
+  tflite::RecordedAllocation allocation =
+      allocator.GetRecordedAllocation(types[row_index]);
 
-  char data[kColumns][kAllocationTypes][kMaxStringLength];
-  for (int i = 0; i < kAllocationTypes; ++i) {
-    tflite::RecordedAllocation allocation =
-        allocator.GetRecordedAllocation(types[i]);
-    MicroStrcpy(data[0][i], titles[i]);
-    FormatNumber<int32_t>(data[1][i], static_cast<int>(types[i]));
-    FormatNumber<int32_t>(data[2][i], allocation.used_bytes);
-    FormatNumber<int32_t>(data[3][i], allocation.requested_bytes);
-    FormatNumber<int32_t>(data[4][i], allocation.count);
-    FormatAsPercentage(data[5][i], static_cast<int64_t>(allocation.used_bytes),
-                       static_cast<int64_t>(total_bytes), 2);
+  LogAllocationRecord record = {};
+  record.title = titles[row_index];
+  record.type = static_cast<int>(types[row_index]);
+  record.used_bytes = allocation.used_bytes;
+  record.requested_bytes = allocation.requested_bytes;
+  record.count = allocation.count;
+  record.percentage = allocation.used_bytes * 100.0f / total_bytes;
+
+  return record;
+}
+
+template <int kColumns>
+void UpdateColumnWidths(int (&widths)[kColumns], const char* s[kColumns]) {
+  for (int i = 0; i < kColumns; i++) {
+    widths[i] = std::max(widths[i], static_cast<int>(std::strlen(s[i])));
+  }
+}
+
+void UpdateColumnWidths(int (&widths)[kArenaColumns],
+                        const LogArenaRecord& record) {
+  char buf[kMaxBufSize];
+  int count;
+
+  count = MicroSnprintf(buf, kMaxBufSize, "%s", record.title);
+  widths[0] = std::max(widths[0], count);
+  count = MicroSnprintf(buf, kMaxBufSize, "%d", record.allocations);
+  widths[1] = std::max(widths[1], count);
+  count = MicroSnprintf(buf, kMaxBufSize, "%.2f",
+                        static_cast<double>(record.percentage));
+  widths[2] = std::max(widths[2], count);
+}
+
+void UpdateColumnWidths(int (&widths)[kAllocationColumns],
+                        const LogAllocationRecord& record) {
+  char buf[kMaxBufSize];
+  int count;
+
+  count = MicroSnprintf(buf, kMaxBufSize, "%s", record.title);
+  widths[0] = std::max(widths[0], count);
+  count = MicroSnprintf(buf, kMaxBufSize, "%d", record.type);
+  widths[1] = std::max(widths[1], count);
+  count = MicroSnprintf(buf, kMaxBufSize, "%d", record.used_bytes);
+  widths[2] = std::max(widths[2], count);
+  count = MicroSnprintf(buf, kMaxBufSize, "%d", record.requested_bytes);
+  widths[3] = std::max(widths[3], count);
+  count = MicroSnprintf(buf, kMaxBufSize, "%d", record.count);
+  widths[4] = std::max(widths[4], count);
+  count = MicroSnprintf(buf, kMaxBufSize, "%.2f",
+                        static_cast<double>(record.percentage));
+  widths[5] = std::max(widths[5], count);
+}
+
+using BufferDatum = std::tuple<char*, char*>;
+
+template <typename T>
+BufferDatum AddTableColumnValue(const BufferDatum& buffer, const char* format,
+                                int column_width, T value,
+                                const char* separator = nullptr) {
+  char* p;
+  char* p_end;
+  std::tie(p, p_end) = buffer;
+  int count = MicroSnprintf(p, p_end - p, format, column_width, value);
+  p += count;
+  if (separator != nullptr && p < p_end) {
+    count = MicroSnprintf(p, p_end - p, separator);
+    p += count;
   }
 
-  PrintFormattedData<kAllocationTypes, kColumns>(
-      headers, data, kAllocationTypes, type, "Allocations");
+  if (p > p_end) {
+    p = p_end;
+  }
+
+  return std::make_tuple(p, p_end);
+}
+
+}  // namespace
+
+void LogArenaAllocations(
+    const tflite::RecordingSingleArenaBufferAllocator* allocator,
+    const PrettyPrintType type) {
+  const char* headers[] = {"Arena", "Bytes", "%% Arena"};
+  static_assert(std::extent<decltype(headers)>::value == kArenaColumns,
+                "kArenaColumns mismatch");
+  char buffer[kMaxBufSize];
+  BufferDatum buffer_datum =
+      std::make_tuple(std::begin(buffer), std::end(buffer));
+  int column_widths[kArenaColumns] = {};
+
+  const char* output_type;
+  const char* string_format;
+  if (type == PrettyPrintType::kCsv) {
+    output_type = "CSV";
+    string_format = "\"%*s\"";
+  } else {
+    output_type = "Table";
+    string_format = "%*s";
+
+    UpdateColumnWidths<kArenaColumns>(column_widths, headers);
+    for (int i = 0; i < kArenaRows; i++) {
+      LogArenaRecord record = GetLogArenaRecord(allocator, i);
+      UpdateColumnWidths(column_widths, record);
+    }
+  }
+
+  MicroPrintf("[[ %s ]]: Arena", output_type);
+
+  for (int i = 0; i < kArenaColumns; i++) {
+    // create header
+    const char* separator = nullptr;
+    if (i != kArenaColumns - 1) {
+      // separator for all but last column value
+      if (type == PrettyPrintType::kCsv) {
+        separator = ",";
+      } else {
+        separator = "   ";
+      }
+    }
+    buffer_datum = AddTableColumnValue(buffer_datum, string_format,
+                                       column_widths[i], headers[i], separator);
+  }
+  MicroPrintf(buffer);
+
+  for (int i = 0; i < kArenaRows; ++i) {
+    // create rows
+    const char* separator = (type == PrettyPrintType::kCsv) ? "," : " | ";
+    buffer_datum = std::make_tuple(std::begin(buffer), std::end(buffer));
+    LogArenaRecord record = GetLogArenaRecord(allocator, i);
+    buffer_datum = AddTableColumnValue(
+        buffer_datum, string_format, column_widths[0], record.title, separator);
+    buffer_datum = AddTableColumnValue(buffer_datum, "%*d", column_widths[1],
+                                       record.allocations, separator);
+    buffer_datum = AddTableColumnValue(buffer_datum, "%*.2f", column_widths[2],
+                                       static_cast<double>(record.percentage));
+    MicroPrintf(buffer);
+  }
+
+  MicroPrintf("");  // output newline
+}
+
+void LogAllocations(const tflite::RecordingMicroAllocator& allocator,
+                    const PrettyPrintType type) {
+  const char* headers[] = {"Allocation", "Id",    "Used",
+                           "Requested",  "Count", "%% Memory"};
+  static_assert(std::extent<decltype(headers)>::value == kAllocationColumns,
+                "kAllocationColumns mismatch");
+  char buffer[kMaxBufSize];
+  BufferDatum buffer_datum =
+      std::make_tuple(std::begin(buffer), std::end(buffer));
+  int column_widths[kAllocationColumns] = {};
+
+  const char* output_type;
+  const char* string_format;
+  if (type == PrettyPrintType::kCsv) {
+    output_type = "CSV";
+    string_format = "\"%*s\"";
+  } else {
+    output_type = "Table";
+    string_format = "%*s";
+
+    UpdateColumnWidths<kAllocationColumns>(column_widths, headers);
+    for (int i = 0; i < kAllocationTypes; i++) {
+      LogAllocationRecord record = GetLogAllocationRecord(allocator, i);
+      UpdateColumnWidths(column_widths, record);
+    }
+  }
+
+  MicroPrintf("[[ %s ]]: Allocations", output_type);
+
+  for (int i = 0; i < kAllocationColumns; i++) {
+    // create header
+    const char* separator = nullptr;
+    if (i != kAllocationColumns - 1) {
+      // separator for all but last column value
+      if (type == PrettyPrintType::kCsv) {
+        separator = ",";
+      } else {
+        separator = "   ";
+      }
+    }
+    buffer_datum = AddTableColumnValue(buffer_datum, string_format,
+                                       column_widths[i], headers[i], separator);
+  }
+  MicroPrintf(buffer);
+
+  for (int i = 0; i < kAllocationTypes; ++i) {
+    // create rows
+    const char* separator = (type == PrettyPrintType::kCsv) ? "," : " | ";
+    buffer_datum = std::make_tuple(std::begin(buffer), std::end(buffer));
+    LogAllocationRecord record = GetLogAllocationRecord(allocator, i);
+    buffer_datum = AddTableColumnValue(
+        buffer_datum, string_format, column_widths[0], record.title, separator);
+    buffer_datum = AddTableColumnValue(buffer_datum, "%*d", column_widths[1],
+                                       record.type, separator);
+    buffer_datum = AddTableColumnValue(buffer_datum, "%*d", column_widths[2],
+                                       record.used_bytes, separator);
+    buffer_datum = AddTableColumnValue(buffer_datum, "%*d", column_widths[3],
+                                       record.requested_bytes, separator);
+    buffer_datum = AddTableColumnValue(buffer_datum, "%*d", column_widths[4],
+                                       record.count, separator);
+    buffer_datum = AddTableColumnValue(buffer_datum, "%*.2f", column_widths[5],
+                                       static_cast<double>(record.percentage));
+    MicroPrintf(buffer);
+  }
+
+  MicroPrintf("");  // output newline
 }
 
 void LogAllocatorEvents(const tflite::RecordingMicroAllocator& allocator,
@@ -99,4 +314,5 @@
   LogArenaAllocations(allocator.GetSimpleMemoryAllocator(), type);
   LogAllocations(allocator, type);
 }
+
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/tools/benchmarking/metrics.h b/tensorflow/lite/micro/tools/benchmarking/metrics.h
index 5c0c880..996cde1 100644
--- a/tensorflow/lite/micro/tools/benchmarking/metrics.h
+++ b/tensorflow/lite/micro/tools/benchmarking/metrics.h
@@ -16,17 +16,19 @@
 #ifndef TFLM_BENCHMARK_INTERNAL_METRICS_H_
 #define TFLM_BENCHMARK_INTERNAL_METRICS_H_
 
-#include <stdio.h>
-
-#include <cmath>
-#include <cstdint>
-
 #include "tensorflow/lite/micro/micro_profiler.h"
 #include "tensorflow/lite/micro/recording_micro_allocator.h"
-#include "tensorflow/lite/micro/tools/benchmarking/log_utils.h"
 
 namespace tflite {
 
+// Defines how formatted data is printed to stdout.
+enum class PrettyPrintType {
+  // Prints as a CSV file.
+  kCsv,
+  // Prints as a formatted table.
+  kTable,
+};
+
 // Logs the allocation events. Prints out two tables, one for the arena
 // allocations, and one for each type of TFLM allocation type.
 // Args:
diff --git a/tensorflow/lite/micro/tools/benchmarking/op_resolver.h b/tensorflow/lite/micro/tools/benchmarking/op_resolver.h
index 447741b..9b98849 100644
--- a/tensorflow/lite/micro/tools/benchmarking/op_resolver.h
+++ b/tensorflow/lite/micro/tools/benchmarking/op_resolver.h
@@ -23,15 +23,17 @@
 
 namespace tflite {
 
-inline TfLiteStatus CreateOpResolver(
-    tflite::MicroMutableOpResolver<96>& op_resolver) {
-  TF_LITE_ENSURE_STATUS(op_resolver.AddFullyConnected());
-  TF_LITE_ENSURE_STATUS(op_resolver.AddAdd());
+using TflmOpResolver = MicroMutableOpResolver<113>;
+
+inline TfLiteStatus CreateOpResolver(TflmOpResolver& op_resolver) {
   TF_LITE_ENSURE_STATUS(op_resolver.AddAbs());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddAdd());
   TF_LITE_ENSURE_STATUS(op_resolver.AddAddN());
   TF_LITE_ENSURE_STATUS(op_resolver.AddArgMax());
   TF_LITE_ENSURE_STATUS(op_resolver.AddArgMin());
   TF_LITE_ENSURE_STATUS(op_resolver.AddAssignVariable());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddAveragePool2D());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddBatchMatMul());
   TF_LITE_ENSURE_STATUS(op_resolver.AddBatchToSpaceNd());
   TF_LITE_ENSURE_STATUS(op_resolver.AddBroadcastArgs());
   TF_LITE_ENSURE_STATUS(op_resolver.AddBroadcastTo());
@@ -40,44 +42,63 @@
   TF_LITE_ENSURE_STATUS(op_resolver.AddCeil());
   TF_LITE_ENSURE_STATUS(op_resolver.AddCircularBuffer());
   TF_LITE_ENSURE_STATUS(op_resolver.AddConcatenation());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddConv2D());
   TF_LITE_ENSURE_STATUS(op_resolver.AddCos());
   TF_LITE_ENSURE_STATUS(op_resolver.AddCumSum());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddDelay());
   TF_LITE_ENSURE_STATUS(op_resolver.AddDepthToSpace());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddDepthwiseConv2D());
   TF_LITE_ENSURE_STATUS(op_resolver.AddDequantize());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddDetectionPostprocess());
   TF_LITE_ENSURE_STATUS(op_resolver.AddDiv());
   TF_LITE_ENSURE_STATUS(op_resolver.AddElu());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddEmbeddingLookup());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddEnergy());
   TF_LITE_ENSURE_STATUS(op_resolver.AddEqual());
   TF_LITE_ENSURE_STATUS(op_resolver.AddEthosU());
   TF_LITE_ENSURE_STATUS(op_resolver.AddExp());
   TF_LITE_ENSURE_STATUS(op_resolver.AddExpandDims());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddFftAutoScale());
   TF_LITE_ENSURE_STATUS(op_resolver.AddFill());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddFilterBank());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddFilterBankLog());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddFilterBankSpectralSubtraction());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddFilterBankSquareRoot());
   TF_LITE_ENSURE_STATUS(op_resolver.AddFloor());
   TF_LITE_ENSURE_STATUS(op_resolver.AddFloorDiv());
   TF_LITE_ENSURE_STATUS(op_resolver.AddFloorMod());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddFramer());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddFullyConnected());
   TF_LITE_ENSURE_STATUS(op_resolver.AddGather());
   TF_LITE_ENSURE_STATUS(op_resolver.AddGatherNd());
   TF_LITE_ENSURE_STATUS(op_resolver.AddGreater());
   TF_LITE_ENSURE_STATUS(op_resolver.AddGreaterEqual());
   TF_LITE_ENSURE_STATUS(op_resolver.AddHardSwish());
   TF_LITE_ENSURE_STATUS(op_resolver.AddIf());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddIrfft());
   TF_LITE_ENSURE_STATUS(op_resolver.AddL2Normalization());
   TF_LITE_ENSURE_STATUS(op_resolver.AddL2Pool2D());
   TF_LITE_ENSURE_STATUS(op_resolver.AddLeakyRelu());
   TF_LITE_ENSURE_STATUS(op_resolver.AddLess());
   TF_LITE_ENSURE_STATUS(op_resolver.AddLessEqual());
   TF_LITE_ENSURE_STATUS(op_resolver.AddLog());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddLogSoftmax());
   TF_LITE_ENSURE_STATUS(op_resolver.AddLogicalAnd());
   TF_LITE_ENSURE_STATUS(op_resolver.AddLogicalNot());
   TF_LITE_ENSURE_STATUS(op_resolver.AddLogicalOr());
   TF_LITE_ENSURE_STATUS(op_resolver.AddLogistic());
-  TF_LITE_ENSURE_STATUS(op_resolver.AddLogSoftmax());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddMaxPool2D());
   TF_LITE_ENSURE_STATUS(op_resolver.AddMaximum());
-  TF_LITE_ENSURE_STATUS(op_resolver.AddMirrorPad());
   TF_LITE_ENSURE_STATUS(op_resolver.AddMean());
   TF_LITE_ENSURE_STATUS(op_resolver.AddMinimum());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddMirrorPad());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddMul());
   TF_LITE_ENSURE_STATUS(op_resolver.AddNeg());
   TF_LITE_ENSURE_STATUS(op_resolver.AddNotEqual());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddOverlapAdd());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddPCAN());
   TF_LITE_ENSURE_STATUS(op_resolver.AddPack());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddPad());
   TF_LITE_ENSURE_STATUS(op_resolver.AddPadV2());
   TF_LITE_ENSURE_STATUS(op_resolver.AddPrelu());
   TF_LITE_ENSURE_STATUS(op_resolver.AddQuantize());
@@ -88,6 +109,7 @@
   TF_LITE_ENSURE_STATUS(op_resolver.AddReshape());
   TF_LITE_ENSURE_STATUS(op_resolver.AddResizeBilinear());
   TF_LITE_ENSURE_STATUS(op_resolver.AddResizeNearestNeighbor());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddRfft());
   TF_LITE_ENSURE_STATUS(op_resolver.AddRound());
   TF_LITE_ENSURE_STATUS(op_resolver.AddRsqrt());
   TF_LITE_ENSURE_STATUS(op_resolver.AddSelectV2());
@@ -99,29 +121,27 @@
   TF_LITE_ENSURE_STATUS(op_resolver.AddSpaceToDepth());
   TF_LITE_ENSURE_STATUS(op_resolver.AddSplit());
   TF_LITE_ENSURE_STATUS(op_resolver.AddSplitV());
-  TF_LITE_ENSURE_STATUS(op_resolver.AddSqueeze());
   TF_LITE_ENSURE_STATUS(op_resolver.AddSqrt());
   TF_LITE_ENSURE_STATUS(op_resolver.AddSquare());
   TF_LITE_ENSURE_STATUS(op_resolver.AddSquaredDifference());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddSqueeze());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddStacker());
   TF_LITE_ENSURE_STATUS(op_resolver.AddStridedSlice());
   TF_LITE_ENSURE_STATUS(op_resolver.AddSub());
   TF_LITE_ENSURE_STATUS(op_resolver.AddSum());
   TF_LITE_ENSURE_STATUS(op_resolver.AddSvdf());
   TF_LITE_ENSURE_STATUS(op_resolver.AddTanh());
-  TF_LITE_ENSURE_STATUS(op_resolver.AddTransposeConv());
   TF_LITE_ENSURE_STATUS(op_resolver.AddTranspose());
-  TF_LITE_ENSURE_STATUS(op_resolver.AddUnpack());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddTransposeConv());
   TF_LITE_ENSURE_STATUS(op_resolver.AddUnidirectionalSequenceLSTM());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddUnpack());
   TF_LITE_ENSURE_STATUS(op_resolver.AddVarHandle());
   TF_LITE_ENSURE_STATUS(op_resolver.AddWhile());
+  TF_LITE_ENSURE_STATUS(op_resolver.AddWindow());
   TF_LITE_ENSURE_STATUS(op_resolver.AddZerosLike());
-  TF_LITE_ENSURE_STATUS(op_resolver.AddDepthwiseConv2D());
-  TF_LITE_ENSURE_STATUS(op_resolver.AddConv2D());
-  TF_LITE_ENSURE_STATUS(op_resolver.AddAveragePool2D());
-  TF_LITE_ENSURE_STATUS(op_resolver.AddPad());
-  TF_LITE_ENSURE_STATUS(op_resolver.AddMaxPool2D());
-  TF_LITE_ENSURE_STATUS(op_resolver.AddMul());
+
   return kTfLiteOk;
 }
+
 }  // namespace tflite
 #endif  // TFLM_BENCHMARK_OP_RESOLVER_H_
diff --git a/tensorflow/lite/micro/tools/benchmarking/show_meta_data.cc.template b/tensorflow/lite/micro/tools/benchmarking/show_meta_data.cc.template
new file mode 100644
index 0000000..a2102a4
--- /dev/null
+++ b/tensorflow/lite/micro/tools/benchmarking/show_meta_data.cc.template
@@ -0,0 +1,177 @@
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <cstddef>
+#include <cstring>
+#include <type_traits>
+
+#include "tensorflow/lite/micro/micro_log.h"
+#include "tensorflow/lite/micro/tools/benchmarking/show_meta_data.h"
+
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
+#include "NatureDSP_Signal_id.h"
+#include "xa_nnlib_standards.h"
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
+
+namespace tflite {
+namespace {
+
+#if defined(GENERIC_BENCHMARK_USING_BUILTIN_MODEL)
+const char* model_analysis_strings[] = {
+    // %%%_model_analysis_strings_%%%
+};
+
+const char* model_sha1_strings[] = {
+    // %%%_model_sha1_strings_%%%
+};
+#endif  // defind(GENERIC_BENCHMARK_USING_BUILTIN_MODEL)
+
+const char* compilation_date_strings[] = {
+    // %%%_compilation_date_strings_%%%
+};
+
+const char* git_commit_strings[] = {
+    // %%%_git_commit_strings_%%%
+};
+
+const char* git_status_strings[] = {
+    // %%%_git_status_strings_%%%
+};
+
+const char* cc_name_strings[] = {
+    // %%%_cc_name_strings_%%%
+};
+
+const char* cc_version_strings[] = {
+    // %%%_cc_version_strings_%%%
+};
+
+const char* cc_flags_strings[] = {
+    // %%%_cc_flags_strings_%%%
+};
+
+const char* cxx_name_strings[] = {
+    // %%%_cxx_name_strings_%%%
+};
+
+const char* cxx_version_strings[] = {
+    // %%%_cxx_version_strings_%%%
+};
+
+const char* cxx_flags_strings[] = {
+    // %%%_cxx_flags_strings_%%%
+};
+
+const char* optimization_flag_strings[] = {
+    // %%%_optimization_flag_strings_%%%
+};
+
+const char* target_info_strings[] = {
+    // %%%_target_info_strings_%%%
+};
+
+#if defined(CMSIS_NN) || defined(HIFI3) || defined(HIFI4) || defined(HIFI5) || \
+    defined(VISION_P6)
+const char* nn_library_url_strings[] = {
+    // %%%_nn_library_url_strings_%%%
+};
+
+const char* nn_library_md5_strings[] = {
+    // %%%_nn_library_md5_strings_%%%
+};
+#endif  // defined(CMSIS_NN) || defined(HIFI3) || defined(HIFI4) ||
+        // defined(HIFI5) || defined(VISION_P6)
+
+void ShowStrings(const char* title, const char** str, const size_t count) {
+  MicroPrintf("%s%s", title, str[0]);
+  for (size_t i = 1; i < count; i++) {
+    MicroPrintf("%s", str[i]);
+  }
+}
+
+void ShowSeparator() { MicroPrintf("--------------------"); }
+
+}  // namespace
+
+void GenericBenchmarkShowMetaData() {
+  ShowSeparator();
+  ShowStrings("Compiled on:\n\n", compilation_date_strings,
+              std::extent<decltype(compilation_date_strings)>::value);
+
+  ShowSeparator();
+  ShowStrings("Git SHA: ", git_commit_strings,
+              std::extent<decltype(git_commit_strings)>::value);
+  ShowStrings("\nGit status:\n\n", git_status_strings,
+              std::extent<decltype(git_status_strings)>::value);
+
+  ShowSeparator();
+  ShowStrings("C compiler: ", cc_name_strings,
+              std::extent<decltype(cc_name_strings)>::value);
+  ShowStrings("Version:\n\n", cc_version_strings,
+              std::extent<decltype(cc_version_strings)>::value);
+  ShowStrings("\nFlags:\n\n", cc_flags_strings,
+              std::extent<decltype(cc_flags_strings)>::value);
+  ShowStrings("\nC++ compiler: ", cxx_name_strings,
+              std::extent<decltype(cxx_name_strings)>::value);
+  ShowStrings("Version:\n\n", cxx_version_strings,
+              std::extent<decltype(cxx_version_strings)>::value);
+  ShowStrings("\nFlags:\n\n", cxx_flags_strings,
+              std::extent<decltype(cxx_flags_strings)>::value);
+  ShowStrings("\nOptimization: ", optimization_flag_strings,
+              std::extent<decltype(optimization_flag_strings)>::value);
+
+  ShowSeparator();
+  ShowStrings("Target information:\n\n", target_info_strings,
+              std::extent<decltype(target_info_strings)>::value);
+
+#if defined(CMSIS_NN) || defined(HIFI3) || defined(HIFI4) || defined(HIFI5) || \
+    defined(VISION_P6)
+  ShowSeparator();
+  ShowStrings("NN library download URLs:\n\n", nn_library_url_strings,
+              std::extent<decltype(nn_library_url_strings)>::value);
+  ShowStrings("\nNN library MD5 checksums:\n\n", nn_library_md5_strings,
+              std::extent<decltype(nn_library_md5_strings)>::value);
+#endif  // defined(CMSIS_NN) || defined(HIFI3) || defined(HIFI4) ||
+        // defined(HIFI5) || defined(VISION_P6)
+
+#if defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
+  ShowSeparator();
+
+  char version_buffer[30 + 1];
+  memset(version_buffer, 0, sizeof(version_buffer));
+  NatureDSP_Signal_get_library_version(version_buffer);
+  MicroPrintf("NatureDSP library version: %s", version_buffer);
+  memset(version_buffer, 0, sizeof(version_buffer));
+  NatureDSP_Signal_get_library_api_version(version_buffer);
+  MicroPrintf("NatureDSP API version: %s", version_buffer);
+
+  const char* nnlib_library_version = xa_nnlib_get_lib_version_string();
+  const char* nnlib_api_version = xa_nnlib_get_lib_api_version_string();
+  MicroPrintf("NNLIB library version: %s", nnlib_library_version);
+  MicroPrintf("NNLIB API version: %s", nnlib_api_version);
+#endif  // defined(HIFI3) || defined(HIFI4) || defined(HIFI5)
+
+#if defined(GENERIC_BENCHMARK_USING_BUILTIN_MODEL)
+  ShowSeparator();
+  ShowStrings("Model SHA1:\n\n", model_sha1_strings,
+              std::extent<decltype(model_sha1_strings)>::value);
+  ShowStrings("\nModel analysis:\n\n", model_analysis_strings,
+              std::extent<decltype(model_analysis_strings)>::value);
+#endif  // defind(GENERIC_BENCHMARK_USING_BUILTIN_MODEL)
+
+  ShowSeparator();
+}
+
+}  // namespace tflite
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.cc b/tensorflow/lite/micro/tools/benchmarking/show_meta_data.h
similarity index 67%
copy from tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.cc
copy to tensorflow/lite/micro/tools/benchmarking/show_meta_data.h
index e2cf661..37cf616 100644
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.cc
+++ b/tensorflow/lite/micro/tools/benchmarking/show_meta_data.h
@@ -1,4 +1,4 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -13,11 +13,12 @@
 limitations under the License.
 ==============================================================================*/
 
-#include "tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.h"
+namespace tflite {
 
-const char* kCategoryLabels[kCategoryCount] = {
-    "silence",
-    "unknown",
-    "yes",
-    "no",
-};
+#if !defined(GENERIC_BENCHMARK_NO_META_DATA)
+void GenericBenchmarkShowMetaData();
+#else
+inline void GenericBenchmarkShowMetaData() {}
+#endif  // defined(GENERIC_BENCHMARK_NO_META_DATA)
+
+}  // namespace tflite
diff --git a/tensorflow/lite/micro/tools/ci_build/test_code_style.sh b/tensorflow/lite/micro/tools/ci_build/test_code_style.sh
index 0d25c9f..d49f429 100755
--- a/tensorflow/lite/micro/tools/ci_build/test_code_style.sh
+++ b/tensorflow/lite/micro/tools/ci_build/test_code_style.sh
@@ -77,6 +77,8 @@
   FIX_FORMAT_OPTIONS=""
 fi
 
+EXCLUDE_SHARED_TFL_CODE=$(sed 's/^/-e /' ci/tflite_files.txt)
+
 tensorflow/lite/micro/tools/make/downloads/pigweed/pw_presubmit/py/pw_presubmit/format_code.py \
   ${FIX_FORMAT_OPTIONS} \
   -e "\.github" \
@@ -84,6 +86,8 @@
   -e third_party/xtensa \
   -e ci \
   -e c/common.c \
+  -e codegen/preprocessor/preprocessor_schema_generated.h \
+  -e codegen/preprocessor/preprocessor_schema_py_generated.py \
   -e core/api/error_reporter.cc \
   -e kernels/internal/reference/integer_ops/ \
   -e kernels/internal/reference/reference_ops.h \
@@ -93,8 +97,10 @@
   -e experimental \
   -e schema/schema_generated.h \
   -e schema/schema_utils.h \
+  -e tensorflow/lite/micro/tools/layer_by_layer_schema_generated.h \
   -e "\.inc" \
-  -e "\.md"
+  -e "\.md" \
+  ${EXCLUDE_SHARED_TFL_CODE}
 
 CODE_FORMAT_RESULT=$?
 
diff --git a/tensorflow/lite/micro/tools/ci_build/test_cortex_m_corstone_300.sh b/tensorflow/lite/micro/tools/ci_build/test_cortex_m_corstone_300.sh
index 516c181..ab136e5 100755
--- a/tensorflow/lite/micro/tools/ci_build/test_cortex_m_corstone_300.sh
+++ b/tensorflow/lite/micro/tools/ci_build/test_cortex_m_corstone_300.sh
@@ -1,5 +1,5 @@
 #!/usr/bin/env bash
-# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
+# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -42,3 +42,14 @@
 readable_run make -f tensorflow/lite/micro/tools/make/Makefile clean
 readable_run make -j$(nproc) -f tensorflow/lite/micro/tools/make/Makefile CO_PROCESSOR=ethos_u OPTIMIZED_KERNEL_DIR=${OPTIMIZED_KERNEL_DIR} TARGET=${TARGET} TARGET_ARCH=${TARGET_ARCH} TOOLCHAIN=${TOOLCHAIN} build
 readable_run make -f tensorflow/lite/micro/tools/make/Makefile CO_PROCESSOR=ethos_u OPTIMIZED_KERNEL_DIR=${OPTIMIZED_KERNEL_DIR} TARGET=${TARGET} TARGET_ARCH=${TARGET_ARCH} TOOLCHAIN=${TOOLCHAIN} test
+
+# Run generic benchmark.
+readable_run make -j$(nproc) -f tensorflow/lite/micro/tools/make/Makefile \
+  CO_PROCESSOR=ethos_u \
+  OPTIMIZED_KERNEL_DIR=${OPTIMIZED_KERNEL_DIR} \
+  TARGET=${TARGET} \
+  TARGET_ARCH=${TARGET_ARCH} \
+  TOOLCHAIN=${TOOLCHAIN} \
+  GENERIC_BENCHMARK_MODEL_PATH=tensorflow/lite/micro/models/person_detect_vela.tflite \
+  GENERIC_BENCHMARK_ARENA_SIZE=`expr 150 \* 1024` \
+  run_tflm_benchmark
diff --git a/tensorflow/lite/micro/tools/ci_build/test_x86_default.sh b/tensorflow/lite/micro/tools/ci_build/test_x86_default.sh
index 623238e..998827f 100755
--- a/tensorflow/lite/micro/tools/ci_build/test_x86_default.sh
+++ b/tensorflow/lite/micro/tools/ci_build/test_x86_default.sh
@@ -40,3 +40,10 @@
 readable_run make -s -j8 -f ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/Makefile build TENSORFLOW_ROOT=${TENSORFLOW_ROOT} EXTERNAL_DIR=${EXTERNAL_DIR}
 readable_run make -s -j8 -f ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/Makefile test TENSORFLOW_ROOT=${TENSORFLOW_ROOT} EXTERNAL_DIR=${EXTERNAL_DIR}
 readable_run make -s -j8 -f ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/Makefile integration_tests TENSORFLOW_ROOT=${TENSORFLOW_ROOT} EXTERNAL_DIR=${EXTERNAL_DIR}
+
+# run generic benchmark
+readable_run make -j$(nproc) -f ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/Makefile \
+  TENSORFLOW_ROOT=${TENSORFLOW_ROOT} \
+  EXTERNAL_DIR=${EXTERNAL_DIR} \
+  GENERIC_BENCHMARK_MODEL_PATH=${TENSORFLOW_ROOT}tensorflow/lite/micro/models/person_detect.tflite \
+  run_tflm_benchmark
diff --git a/tensorflow/lite/micro/tools/ci_build/test_xtensa_fusion_f1.sh b/tensorflow/lite/micro/tools/ci_build/test_xtensa_fusion_f1.sh
index 8416792..2fd6bf8 100755
--- a/tensorflow/lite/micro/tools/ci_build/test_xtensa_fusion_f1.sh
+++ b/tensorflow/lite/micro/tools/ci_build/test_xtensa_fusion_f1.sh
@@ -35,7 +35,7 @@
 if [[ ${1} == "INTERNAL" ]]; then
 readable_run make -f ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/Makefile \
   TARGET=xtensa \
-  TARGET_ARCH=hifi4 \
+  TARGET_ARCH=hifi3 \
   OPTIMIZED_KERNEL_DIR=xtensa \
   XTENSA_CORE=F1_190305_swupgrade \
   TENSORFLOW_ROOT=${TENSORFLOW_ROOT} \
@@ -44,7 +44,7 @@
 
 readable_run make -f ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/Makefile \
   TARGET=xtensa \
-  TARGET_ARCH=hifi4 \
+  TARGET_ARCH=hifi3 \
   OPTIMIZED_KERNEL_DIR=xtensa \
   XTENSA_CORE=F1_190305_swupgrade \
   TENSORFLOW_ROOT=${TENSORFLOW_ROOT} \
@@ -53,7 +53,7 @@
 else
 readable_run make -f ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/Makefile \
   TARGET=xtensa \
-  TARGET_ARCH=hifi4 \
+  TARGET_ARCH=hifi3 \
   OPTIMIZED_KERNEL_DIR=xtensa \
   XTENSA_CORE=F1_190305_swupgrade \
   TENSORFLOW_ROOT=${TENSORFLOW_ROOT} \
@@ -62,10 +62,21 @@
 
 readable_run make -f ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/Makefile \
   TARGET=xtensa \
-  TARGET_ARCH=hifi4 \
+  TARGET_ARCH=hifi3 \
   OPTIMIZED_KERNEL_DIR=xtensa \
   XTENSA_CORE=F1_190305_swupgrade \
   TENSORFLOW_ROOT=${TENSORFLOW_ROOT} \
   EXTERNAL_DIR=${EXTERNAL_DIR} \
   test -j$(nproc)
+
+# run generic benchmark
+readable_run make -f ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/Makefile \
+  TARGET=xtensa \
+  TARGET_ARCH=hifi3 \
+  OPTIMIZED_KERNEL_DIR=xtensa \
+  XTENSA_CORE=F1_190305_swupgrade \
+  TENSORFLOW_ROOT=${TENSORFLOW_ROOT} \
+  EXTERNAL_DIR=${EXTERNAL_DIR} \
+  GENERIC_BENCHMARK_MODEL_PATH=${TENSORFLOW_ROOT}tensorflow/lite/micro/models/person_detect.tflite \
+  run_tflm_benchmark -j$(nproc)
 fi
diff --git a/tensorflow/lite/micro/tools/ci_build/test_xtensa_hifi3z.sh b/tensorflow/lite/micro/tools/ci_build/test_xtensa_hifi3z.sh
index 1ddf0d9..ff3d600 100755
--- a/tensorflow/lite/micro/tools/ci_build/test_xtensa_hifi3z.sh
+++ b/tensorflow/lite/micro/tools/ci_build/test_xtensa_hifi3z.sh
@@ -35,7 +35,7 @@
 if [[ ${1} == "INTERNAL" ]]; then
   readable_run make -f ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/Makefile \
     TARGET=xtensa \
-    TARGET_ARCH=hifi4 \
+    TARGET_ARCH=hifi3 \
     OPTIMIZED_KERNEL_DIR=xtensa \
     XTENSA_CORE=HIFI_190304_swupgrade \
     TENSORFLOW_ROOT=${TENSORFLOW_ROOT} \
@@ -44,7 +44,7 @@
 
   readable_run make -f ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/Makefile \
     TARGET=xtensa \
-    TARGET_ARCH=hifi4 \
+    TARGET_ARCH=hifi3 \
     OPTIMIZED_KERNEL_DIR=xtensa \
     XTENSA_CORE=HIFI_190304_swupgrade \
     TENSORFLOW_ROOT=${TENSORFLOW_ROOT} \
@@ -53,7 +53,7 @@
 
   readable_run make -f ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/Makefile \
     TARGET=xtensa \
-    TARGET_ARCH=hifi4 \
+    TARGET_ARCH=hifi3 \
     OPTIMIZED_KERNEL_DIR=xtensa \
     XTENSA_CORE=HIFI_190304_swupgrade \
     TENSORFLOW_ROOT=${TENSORFLOW_ROOT} \
@@ -62,7 +62,7 @@
 
   readable_run make -f ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/Makefile \
     TARGET=xtensa \
-    TARGET_ARCH=hifi4 \
+    TARGET_ARCH=hifi3 \
     OPTIMIZED_KERNEL_DIR=xtensa \
     XTENSA_CORE=HIFI_190304_swupgrade \
     TENSORFLOW_ROOT=${TENSORFLOW_ROOT} \
@@ -71,7 +71,7 @@
 
   readable_run make -f ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/Makefile \
     TARGET=xtensa \
-    TARGET_ARCH=hifi4 \
+    TARGET_ARCH=hifi3 \
     OPTIMIZED_KERNEL_DIR=xtensa \
     XTENSA_CORE=HIFI_190304_swupgrade \
     TENSORFLOW_ROOT=${TENSORFLOW_ROOT} \
@@ -80,7 +80,7 @@
 else
   readable_run make -f ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/Makefile \
     TARGET=xtensa \
-    TARGET_ARCH=hifi4 \
+    TARGET_ARCH=hifi3 \
     OPTIMIZED_KERNEL_DIR=xtensa \
     XTENSA_CORE=HIFI_190304_swupgrade \
     TENSORFLOW_ROOT=${TENSORFLOW_ROOT} \
@@ -89,10 +89,21 @@
 
   readable_run make -f ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/Makefile \
     TARGET=xtensa \
-    TARGET_ARCH=hifi4 \
+    TARGET_ARCH=hifi3 \
     OPTIMIZED_KERNEL_DIR=xtensa \
     XTENSA_CORE=HIFI_190304_swupgrade \
     TENSORFLOW_ROOT=${TENSORFLOW_ROOT} \
     EXTERNAL_DIR=${EXTERNAL_DIR} \
     test -j$(nproc)
+
+  # run generic benchmark
+  readable_run make -f ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/Makefile \
+    TARGET=xtensa \
+    TARGET_ARCH=hifi3 \
+    OPTIMIZED_KERNEL_DIR=xtensa \
+    XTENSA_CORE=HIFI_190304_swupgrade \
+    TENSORFLOW_ROOT=${TENSORFLOW_ROOT} \
+    EXTERNAL_DIR=${EXTERNAL_DIR} \
+    GENERIC_BENCHMARK_MODEL_PATH=${TENSORFLOW_ROOT}tensorflow/lite/micro/models/person_detect.tflite \
+    run_tflm_benchmark -j$(nproc)
 fi
diff --git a/tensorflow/lite/micro/tools/ci_build/test_xtensa_hifi5.sh b/tensorflow/lite/micro/tools/ci_build/test_xtensa_hifi5.sh
index 82a04a9..0ad29e8 100755
--- a/tensorflow/lite/micro/tools/ci_build/test_xtensa_hifi5.sh
+++ b/tensorflow/lite/micro/tools/ci_build/test_xtensa_hifi5.sh
@@ -47,3 +47,14 @@
   TENSORFLOW_ROOT=${TENSORFLOW_ROOT} \
   EXTERNAL_DIR=${EXTERNAL_DIR} \
   test -j$(nproc)
+
+# run generic benchmark
+readable_run make -f ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/Makefile \
+  TARGET=xtensa \
+  TARGET_ARCH=hifi5 \
+  OPTIMIZED_KERNEL_DIR=xtensa \
+  XTENSA_CORE=PRD_H5_RDO_07_01_2022 \
+  TENSORFLOW_ROOT=${TENSORFLOW_ROOT} \
+  EXTERNAL_DIR=${EXTERNAL_DIR} \
+  GENERIC_BENCHMARK_MODEL_PATH=${TENSORFLOW_ROOT}tensorflow/lite/micro/models/person_detect.tflite \
+  run_tflm_benchmark -j$(nproc)
\ No newline at end of file
diff --git a/tensorflow/lite/micro/tools/ci_build/test_xtensa_vision_p6.sh b/tensorflow/lite/micro/tools/ci_build/test_xtensa_vision_p6.sh
index a2744b5..1c6de93 100755
--- a/tensorflow/lite/micro/tools/ci_build/test_xtensa_vision_p6.sh
+++ b/tensorflow/lite/micro/tools/ci_build/test_xtensa_vision_p6.sh
@@ -54,4 +54,15 @@
     TENSORFLOW_ROOT=${TENSORFLOW_ROOT} \
     EXTERNAL_DIR=${EXTERNAL_DIR} \
     test -j$(nproc)
+
+  # run generic benchmark
+  readable_run make -f ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/Makefile \
+    TARGET=xtensa \
+    TARGET_ARCH=vision_p6 \
+    OPTIMIZED_KERNEL_DIR=xtensa \
+    XTENSA_CORE=P6_200528 \
+    TENSORFLOW_ROOT=${TENSORFLOW_ROOT} \
+    EXTERNAL_DIR=${EXTERNAL_DIR} \
+    GENERIC_BENCHMARK_MODEL_PATH=${TENSORFLOW_ROOT}tensorflow/lite/micro/models/person_detect.tflite \
+    run_tflm_benchmark -j$(nproc)
 fi
diff --git a/tensorflow/lite/micro/tools/gen_micro_mutable_op_resolver/BUILD b/tensorflow/lite/micro/tools/gen_micro_mutable_op_resolver/BUILD
index 276e9c2..e590939 100644
--- a/tensorflow/lite/micro/tools/gen_micro_mutable_op_resolver/BUILD
+++ b/tensorflow/lite/micro/tools/gen_micro_mutable_op_resolver/BUILD
@@ -1,3 +1,4 @@
+load("@rules_python//python:defs.bzl", "py_binary")
 load("@tflm_pip_deps//:requirements.bzl", "requirement")
 
 package(
@@ -18,7 +19,6 @@
     deps = [
         "@absl_py//absl:app",
         "@absl_py//absl/flags",
-        requirement("tensorflow-cpu"),
         requirement("mako"),
         "//tensorflow/lite/python:schema_py",
         "//tensorflow/lite/python:schema_util",
@@ -42,7 +42,7 @@
     deps = [
         "@absl_py//absl:app",
         "@absl_py//absl/flags",
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
         requirement("mako"),
         "//tensorflow/lite/micro/tools:generate_test_for_model",
         "//tensorflow/lite/python:schema_py",
diff --git a/tensorflow/lite/micro/tools/gen_micro_mutable_op_resolver/README.md b/tensorflow/lite/micro/tools/gen_micro_mutable_op_resolver/README.md
index 95a0c43..1837995 100644
--- a/tensorflow/lite/micro/tools/gen_micro_mutable_op_resolver/README.md
+++ b/tensorflow/lite/micro/tools/gen_micro_mutable_op_resolver/README.md
@@ -4,6 +4,8 @@
 This generally requires manually finding out which operators are used in the model through the use of a visualization tool, which may be impractical in some cases.
 This script will automatically generate a MicroMutableOpResolver with only the used operators for a given model or set of models.
 
+Note: Check ci/Dockerfile.micro for supported python version.
+
 ## How to run
 
 bazel run tensorflow/lite/micro/tools/gen_micro_mutable_op_resolver:generate_micro_mutable_op_resolver_from_model -- \
diff --git a/tensorflow/lite/micro/tools/gen_micro_mutable_op_resolver/generate_micro_mutable_op_resolver_from_model.py b/tensorflow/lite/micro/tools/gen_micro_mutable_op_resolver/generate_micro_mutable_op_resolver_from_model.py
index de583da..d7e6140 100644
--- a/tensorflow/lite/micro/tools/gen_micro_mutable_op_resolver/generate_micro_mutable_op_resolver_from_model.py
+++ b/tensorflow/lite/micro/tools/gen_micro_mutable_op_resolver/generate_micro_mutable_op_resolver_from_model.py
@@ -23,7 +23,7 @@
 from absl import flags
 from mako import template
 
-from tflite_micro.tensorflow.lite.tools import visualize as visualize
+from tensorflow.lite.tools import visualize
 
 TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), 'templates')
 TEMPLATE_DIR = os.path.abspath(TEMPLATE_DIR)
@@ -69,8 +69,9 @@
     else:
       formated_op_string += part.upper()
 
-  # Edge case for AddUnidirectionalSequenceLSTM().
+  # Edge cases
   formated_op_string = formated_op_string.replace('Lstm', 'LSTM')
+  formated_op_string = formated_op_string.replace('BatchMatmul', 'BatchMatMul')
 
   return 'Add' + formated_op_string
 
diff --git a/tensorflow/lite/micro/tools/generate_cc_arrays.py b/tensorflow/lite/micro/tools/generate_cc_arrays.py
index 759db56..2a77b4d 100644
--- a/tensorflow/lite/micro/tools/generate_cc_arrays.py
+++ b/tensorflow/lite/micro/tools/generate_cc_arrays.py
@@ -35,8 +35,6 @@
     out_cc_file.write('#include <cstdint>\n\n')
     out_cc_file.write('#include "{}"\n\n'.format(
         out_fname.split('genfiles/')[-1].replace('.cc', '.h')))
-    out_cc_file.write('const unsigned int {}_size = {};\n'.format(
-        array_name, str(size)))
     out_cc_file.write('alignas(16) const {} {}[] = {{'.format(
         array_type, array_name))
     out_cc_file.write(array_contents)
@@ -45,8 +43,8 @@
   elif out_fname.endswith('.h'):
     out_hdr_file = open(out_fname, 'w')
     out_hdr_file.write('#include <cstdint>\n\n')
-    out_hdr_file.write(
-        'extern const unsigned int {}_size;\n'.format(array_name))
+    out_hdr_file.write('constexpr unsigned int {}_size = {};\n'.format(
+        array_name, str(size)))
     out_hdr_file.write('extern const {} {}[];\n'.format(
         array_type, array_name))
     out_hdr_file.close()
diff --git a/tensorflow/lite/micro/tools/layer_by_layer.cc b/tensorflow/lite/micro/tools/layer_by_layer.cc
new file mode 100644
index 0000000..91d325e
--- /dev/null
+++ b/tensorflow/lite/micro/tools/layer_by_layer.cc
@@ -0,0 +1,332 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <stdio.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <fstream>
+#include <ios>
+#include <memory>
+#include <random>
+#include <utility>
+
+#include "flatbuffers/flatbuffer_builder.h"
+#include "flatbuffers/util.h"
+#include "tensorflow/lite/c/c_api_types.h"
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/kernels/op_macros.h"
+#include "tensorflow/lite/micro/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/micro_allocator.h"
+#include "tensorflow/lite/micro/micro_context.h"
+#include "tensorflow/lite/micro/micro_interpreter.h"
+#include "tensorflow/lite/micro/micro_log.h"
+#include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
+#include "tensorflow/lite/micro/micro_resource_variable.h"
+#include "tensorflow/lite/micro/micro_utils.h"
+#include "tensorflow/lite/micro/tools/benchmarking/op_resolver.h"
+#include "tensorflow/lite/micro/tools/layer_by_layer_schema_generated.h"
+#include "tensorflow/lite/schema/schema_generated.h"
+
+namespace tflite {
+
+namespace {
+
+// Seed used for the random input. Input data shouldn't affect invocation timing
+// so randomness isn't really needed.
+constexpr uint32_t kRandomSeed = 0xFB;
+
+constexpr size_t kTensorArenaSize = 3e6;
+constexpr int kNumResourceVariable = 100;
+
+bool SaveFile(const char* name, const char* buf, size_t len) {
+  std::ofstream ofs(name, std::ofstream::binary);
+  if (!ofs.is_open()) return false;
+  ofs.write(buf, len);
+  return !ofs.bad();
+}
+
+TfLiteStatus ConvertTensorType(TfLiteType type, TensorTypes& tensor_type) {
+  switch (type) {
+    case kTfLiteFloat16:
+      tensor_type = TensorTypes_FLOAT16;
+      return kTfLiteOk;
+    case kTfLiteBFloat16:
+      tensor_type = TensorTypes_BFLOAT16;
+      return kTfLiteOk;
+    case kTfLiteFloat32:
+      tensor_type = TensorTypes_FLOAT32;
+      return kTfLiteOk;
+    case kTfLiteFloat64:
+      tensor_type = TensorTypes_FLOAT64;
+      return kTfLiteOk;
+    case kTfLiteInt16:
+      tensor_type = TensorTypes_INT16;
+      return kTfLiteOk;
+    case kTfLiteUInt16:
+      tensor_type = TensorTypes_UINT16;
+      return kTfLiteOk;
+    case kTfLiteInt32:
+      tensor_type = TensorTypes_INT32;
+      return kTfLiteOk;
+    case kTfLiteUInt32:
+      tensor_type = TensorTypes_UINT32;
+      return kTfLiteOk;
+    case kTfLiteUInt8:
+      tensor_type = TensorTypes_UINT8;
+      return kTfLiteOk;
+    case kTfLiteInt8:
+      tensor_type = TensorTypes_INT8;
+      return kTfLiteOk;
+    case kTfLiteInt64:
+      tensor_type = TensorTypes_INT64;
+      return kTfLiteOk;
+    case kTfLiteUInt64:
+      tensor_type = TensorTypes_UINT64;
+      return kTfLiteOk;
+    case kTfLiteString:
+      tensor_type = TensorTypes_STRING;
+      return kTfLiteOk;
+    case kTfLiteBool:
+      tensor_type = TensorTypes_BOOL;
+      return kTfLiteOk;
+    case kTfLiteComplex64:
+      tensor_type = TensorTypes_COMPLEX64;
+      return kTfLiteOk;
+    case kTfLiteComplex128:
+      tensor_type = TensorTypes_COMPLEX128;
+      return kTfLiteOk;
+    case kTfLiteResource:
+      tensor_type = TensorTypes_RESOURCE;
+      return kTfLiteOk;
+    case kTfLiteVariant:
+      tensor_type = TensorTypes_VARIANT;
+      return kTfLiteOk;
+    case kTfLiteInt4:
+      tensor_type = TensorTypes_INT4;
+      return kTfLiteOk;
+    case kTfLiteNoType:
+      MicroPrintf("Unsupported data type %d in tensor\n", tensor_type);
+      return kTfLiteError;
+  }
+  return kTfLiteOk;
+}
+
+TfLiteStatus SetRandomInput(const uint32_t random_seed,
+                            const ModelT& unpacked_model,
+                            MicroInterpreter& interpreter,
+                            ModelTestDataT& output_data) {
+  std::mt19937 eng(random_seed);
+  std::uniform_int_distribution<uint32_t> dist(0, 255);
+  for (size_t i = 0; i < interpreter.inputs_size(); ++i) {
+    TfLiteTensor* input = interpreter.input_tensor(i);
+    std::unique_ptr<TensorDataT> test_data(new TensorDataT());
+    test_data->input_index = i;
+    test_data->layer_number = -1;
+    test_data->tensor_index = -1;
+    test_data->num_bytes = input->bytes;
+    // make this share tensortype with tflite schema later
+    TF_LITE_ENSURE_STATUS(ConvertTensorType(input->type, test_data->dtype));
+    for (int x = 0; x < input->dims->size; ++x) {
+      test_data->shape.push_back(input->dims->data[x]);
+    }
+
+    // Pre-populate input tensor with random values.
+    uint8_t* input_values = GetTensorData<uint8_t>(input);
+    for (size_t j = 0; j < input->bytes; ++j) {
+      input_values[j] = dist(eng);
+      test_data->data.push_back(input_values[j]);
+    }
+    output_data.input_data.push_back(std::move(test_data));
+  }
+
+  // Get tensor indices for all model input_tensors
+  for (size_t i = 0; i < unpacked_model.subgraphs[0]->inputs.size(); ++i) {
+    output_data.input_data[i]->tensor_index =
+        unpacked_model.subgraphs[0]->inputs[i];
+  }
+  return kTfLiteOk;
+}
+
+std::unique_ptr<char[]> ReadModelFile(const char* model_file_name) {
+  std::ifstream model_file(model_file_name, std::ios::binary);
+  if (!model_file.is_open()) {
+    MicroPrintf("could not open model file \n ");
+    return nullptr;
+  }
+
+  model_file.seekg(0, std::ios::end);
+  size_t num_bytes = model_file.tellg();
+  model_file.seekg(0, std::ios::beg);
+  std::unique_ptr<char[]> model_data(new char[num_bytes]);
+  model_file.read(model_data.get(), num_bytes);
+
+  return model_data;
+}
+
+// Stores the Intermediate Tensor data for each layer into the unpacked
+// ModelTestDataT class which is packed into the flatbuffer !
+TfLiteStatus StoreLayerByLayerData(MicroInterpreter& interpreter,
+                                   const ModelT& tflite_model,
+                                   ModelTestDataT& output_data) {
+  for (size_t i = 0; i < tflite_model.subgraphs.size(); ++i) {
+    std::unique_ptr<SubgraphDataT> subgraph_data(new SubgraphDataT());
+    subgraph_data->subgraph_index = i;
+
+    for (size_t j = 0; j < tflite_model.subgraphs[i]->operators.size(); ++j) {
+      for (size_t k = 0;
+           k < tflite_model.subgraphs[i]->operators[j]->outputs.size(); ++k) {
+        subgraph_data->outputs.emplace_back(new TensorDataT());
+        std::unique_ptr<TensorDataT>& tensor_data =
+            subgraph_data->outputs.back();
+
+        // input_index
+        tensor_data->input_index = -1;
+
+        // tensor index
+        tensor_data->tensor_index =
+            tflite_model.subgraphs[i]->operators[j]->outputs[k];
+
+        TfLiteEvalTensor* layer_output_tensor =
+            interpreter.GetTensor(subgraph_data->outputs.back()->tensor_index,
+                                  subgraph_data->subgraph_index);
+
+        // dims
+        tensor_data->shape.assign(
+            layer_output_tensor->dims->data,
+            layer_output_tensor->dims->data + layer_output_tensor->dims->size);
+
+        // dtype
+        TF_LITE_ENSURE_STATUS(
+            ConvertTensorType(layer_output_tensor->type, tensor_data->dtype));
+        // num_bytes
+        tensor_data->num_bytes = EvalTensorBytes(layer_output_tensor);
+
+        uint8_t* tensor_values =
+            micro::GetTensorData<uint8_t>(layer_output_tensor);
+
+        // data
+        tensor_data->data.assign(
+            tensor_values,
+            tensor_values + EvalTensorBytes(layer_output_tensor));
+
+        // layer_number
+        tensor_data->layer_number = j;
+      }
+    }
+    output_data.subgraph_data.push_back(std::move(subgraph_data));
+  }
+
+  return kTfLiteOk;
+}
+
+bool WriteToFile(const char* output_file_name, ModelTestDataT& output_data) {
+  flatbuffers::DefaultAllocator allocator;
+  flatbuffers::FlatBufferBuilder fbb{2048, &allocator};
+  auto new_model = ModelTestData::Pack(fbb, &output_data);
+  fbb.Finish(new_model);
+  return SaveFile(output_file_name,
+                  reinterpret_cast<char*>(fbb.GetBufferPointer()),
+                  fbb.GetSize());
+}
+
+TfLiteStatus Invoke(const Model* model, ModelTestDataT& output_data) {
+  const tflite::ModelT unpacked_model = *model->UnPack();
+  alignas(16) static uint8_t tensor_arena[kTensorArenaSize];
+
+  TflmOpResolver op_resolver;
+  TF_LITE_ENSURE_STATUS(CreateOpResolver(op_resolver));
+
+  MicroAllocator* allocator = MicroAllocator::Create(
+      tensor_arena, kTensorArenaSize, MemoryPlannerType::kLinear);
+
+  MicroInterpreter interpreter(
+      model, op_resolver, allocator,
+      MicroResourceVariables::Create(allocator, kNumResourceVariable), nullptr);
+  TF_LITE_ENSURE_STATUS(interpreter.AllocateTensors());
+
+  TF_LITE_ASSERT(interpreter.preserve_all_tensors());
+
+  MicroPrintf("");  // null MicroPrintf serves as a newline.
+
+  // For streaming models, the interpreter will return kTfLiteAbort if the model
+  // does not yet have enough data to make an inference. As such, we need to
+  // invoke the interpreter multiple times until we either receive an error or
+  // kTfLiteOk. This loop also works for non-streaming models, as they'll just
+  // return kTfLiteOk after the first invocation.
+  uint32_t seed = kRandomSeed;
+  while (true) {
+    TF_LITE_ENSURE_STATUS(
+        SetRandomInput(seed++, unpacked_model, interpreter, output_data));
+    TfLiteStatus status = interpreter.Invoke();
+    if ((status != kTfLiteOk) && (static_cast<int>(status) != kTfLiteAbort)) {
+      MicroPrintf("Model interpreter invocation failed: %d\n", status);
+      return kTfLiteError;
+    }
+
+    if (status == kTfLiteOk) {
+      break;
+    }
+  }
+  TF_LITE_ENSURE_STATUS(
+      StoreLayerByLayerData(interpreter, unpacked_model, output_data));
+
+  return kTfLiteOk;
+}
+}  // namespace
+}  // namespace tflite
+
+/* Usage information:
+ This binary will write a debugging flatbuffer to the path provide in 2nd arg
+ using the tflite_model provided in the 1st arg :
+   `bazel run tensorflow/lite/micro/tools:layer_by_layer_output_tool -- \
+     </path/to/input_model.tflite>
+     </path/to/output.file_name>` */
+
+int main(int argc, char** argv) {
+  if (argc < 2) {
+    MicroPrintf("layer_by_layer: invalid usage!\n");
+    MicroPrintf(
+        "usage: layer_by_layer_output_tool  </path/to/input_model.tflite> "
+        "</path/to/output.file_name>");
+    return EXIT_FAILURE;
+  }
+
+  const char* model_file_name = argv[1];
+  const char* output_file_name = argv[2];
+
+  const auto model_file_content = tflite::ReadModelFile(model_file_name);
+
+  if (!model_file_content) {
+    MicroPrintf("Could not read model from file: %s", model_file_name);
+    return EXIT_FAILURE;
+  }
+
+  const tflite::Model* model = tflite::GetModel(model_file_content.get());
+
+  ModelTestDataT output_data;
+
+  TF_LITE_ENSURE_STATUS(tflite::Invoke(model, output_data));
+
+  if (!tflite::WriteToFile(output_file_name, output_data)) {
+    MicroPrintf("Could not write to %s", output_file_name);
+    return EXIT_FAILURE;
+  }
+
+  return EXIT_SUCCESS;
+}
diff --git a/tensorflow/lite/micro/tools/layer_by_layer_debugger.py b/tensorflow/lite/micro/tools/layer_by_layer_debugger.py
new file mode 100644
index 0000000..8aa263b
--- /dev/null
+++ b/tensorflow/lite/micro/tools/layer_by_layer_debugger.py
@@ -0,0 +1,270 @@
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Runs TFLM specific transformations to reduce model size on a .tflite model."""
+
+import sys
+import unittest
+
+from absl import app
+from absl import flags
+from absl import logging
+import numpy as np
+import tensorflow as tf
+
+from tflite_micro.tensorflow.lite.tools import flatbuffer_utils
+from tensorflow.python.platform import gfile
+from tflite_micro.python.tflite_micro import runtime
+from tflite_micro.tensorflow.lite.micro.tools import layer_by_layer_schema_py_generated as layer_schema_fb
+from tflite_micro.tensorflow.lite.micro.tools import model_transforms_utils
+
+np.set_printoptions(threshold=sys.maxsize)
+
+# Usage information:
+# This Python Tool/Script can first be used to compare TFLM vs Tflite outputs for
+# random Inputs by only providing a TfLite file
+
+# TfLite vs TFLM command:
+#   `bazel run tensorflow/lite/micro/tools:layer_by_layer_debugger -- \
+#     --input_tflite_file=</path/to/my_model.tflite>`
+
+# This Python Tool/Script can also be used to comapre TFLM vs Expected
+# Output/Flatbuffer provided by the C++ Tool/binary.
+
+# TFLM vs Expected Command:
+#   `bazel run tensorflow/lite/micro/tools:layer_by_layer_debugger -- \
+#     --input_tflite_file=</path/to/my_model.tflite> \
+#     --dbg_file=</path/to/my_debug_flatbuffer_file>`
+
+# Optional Flags:
+#   --print_dump
+#           when this flags is set it will dump a part of the TFLM and Ouput
+#           it's compared against for each layer
+
+#   --rng
+#           integer flag that only works in TfLite vs TFLM comparison(when only
+#           a TfLite Model is Provided).It can be used to set the rng seed to a
+#           differen value then it's default value of 42.
+
+_INPUT_TFLITE_FILE = flags.DEFINE_string(
+    "input_tflite_file",
+    None,
+    "Full path name to the input TFLite file.",
+    required=True)
+
+_RNG = flags.DEFINE_integer(
+    "rng",
+    42,
+    "This flag defines rng seed used to generate random test data for the"
+    " provided model. This only occurs when no input/golden data are provided."
+    " It is defaulted to 42. ",
+)
+
+_DEBUG_FILE = flags.DEFINE_string(
+    "layer_by_layer_data_file",
+    None,
+    "Full path to the debug file , generated in C++",
+    required=False,
+)
+
+_PRINT_PREVIEW = flags.DEFINE_bool(
+    "print_dump",
+    False,
+    "When this flag is set to True, it prints a preview of elements of the TFLM"
+    " output and output it's being compared with.",
+    required=False,
+)
+
+
+def numpy_from_tensor_type(tensor_type_idx):
+  """Gives the equivalent numpy dtype based on TensorType class (schema) number."""
+  tensor_type_idx_to_numpy = {
+      layer_schema_fb.TensorTypes.FLOAT32:
+      np.float32,
+      layer_schema_fb.TensorTypes.FLOAT16:
+      np.float16,
+      layer_schema_fb.TensorTypes.INT32:
+      np.int32,
+      layer_schema_fb.TensorTypes.UINT8:
+      np.uint8,
+      layer_schema_fb.TensorTypes.INT64:
+      np.int64,
+      layer_schema_fb.TensorTypes.STRING:
+      np.bytes_,
+      layer_schema_fb.TensorTypes.BOOL:
+      np.bool_,
+      layer_schema_fb.TensorTypes.INT16:
+      np.int16,
+      layer_schema_fb.TensorTypes.COMPLEX64:
+      np.complex64,
+      layer_schema_fb.TensorTypes.INT8:
+      np.int8,
+      layer_schema_fb.TensorTypes.FLOAT64:
+      np.float64,
+      layer_schema_fb.TensorTypes.COMPLEX128:
+      np.complex128,
+      layer_schema_fb.TensorTypes.UINT64:
+      np.uint64,
+      layer_schema_fb.TensorTypes.RESOURCE:
+      "RESORCE",
+      layer_schema_fb.TensorTypes.VARIANT:
+      "VARIANT",
+      layer_schema_fb.TensorTypes.UINT32:
+      np.uint32,
+      layer_schema_fb.TensorTypes.UINT16:
+      np.uint16,
+      # INT4 is mapped to INT8, b/246806634
+      layer_schema_fb.TensorTypes.INT4:
+      np.int8,
+  }
+  return tensor_type_idx_to_numpy.get(tensor_type_idx)
+
+
+def GenerateRandomInputTfLiteComparison(tflm_interpreter, tflite_interpreter,
+                                        model, rng_value):
+  subgraph_info = layer_schema_fb.ModelTestDataT()
+  subgraph_info.subgraphData = []
+  rng_seed = np.random.default_rng(seed=rng_value)
+
+  for subgraph_index, subgraph in enumerate(model.subgraphs):
+    subgraph_data = layer_schema_fb.SubgraphDataT()
+    subgraph_data.subgraphIndex = subgraph_index
+    subgraph_data.outputs = []
+
+    for op_index, operator in enumerate(subgraph.operators):
+      for output in operator.outputs:
+        tensor_data = layer_schema_fb.TensorDataT()
+        tensor_data.layerNumber = op_index
+        tensor_data.tensorIndex = output
+        subgraph_data.outputs.append(tensor_data)
+    subgraph_info.subgraphData.append(subgraph_data)
+
+  for index, input_tensor_index in enumerate(model.subgraphs[0].inputs):
+    input_tensor = model.subgraphs[0].tensors[input_tensor_index]
+    random_data = model_transforms_utils.generate_random_input_data(
+        model, input_tensor, rng_seed)
+    tflm_interpreter.set_input(random_data, index)
+    tflite_interpreter.set_tensor(input_tensor_index, random_data)
+  return subgraph_info, tflm_interpreter, tflite_interpreter
+
+
+def ReadDebugFile():
+  with gfile.GFile(_DEBUG_FILE.value, "rb") as debug_file_handle:
+    debug_bytearray = bytearray(debug_file_handle.read())
+  flatbuffer_root_object = layer_schema_fb.ModelTestData.GetRootAs(
+      debug_bytearray, 0)
+  debug_obj = layer_schema_fb.ModelTestDataT.InitFromObj(
+      flatbuffer_root_object)
+  return debug_obj
+
+
+def SetDebugFileInterpreterInput(tflm_interpreter, tflite_interpreter,
+                                 debug_obj):
+  for inputs in debug_obj.inputData:
+    input_array = np.frombuffer(bytearray(inputs.data),
+                                dtype=numpy_from_tensor_type(inputs.dtype))
+    input_array = np.reshape(input_array, inputs.shape)
+    tflm_interpreter.set_input(input_array, inputs.inputIndex)
+    tflite_interpreter.set_tensor(inputs.tensorIndex, input_array)
+
+  return tflm_interpreter, tflite_interpreter
+
+
+def main(_) -> None:
+  logging.info(
+      "\n--Running TFLM vs TfLite layer by layer debugger on: %s",
+      _INPUT_TFLITE_FILE.value,
+  )
+
+  model = flatbuffer_utils.read_model(_INPUT_TFLITE_FILE.value)
+
+  tflm_interpreter = runtime.Interpreter.from_file(
+      _INPUT_TFLITE_FILE.value,
+      intrepreter_config=runtime.InterpreterConfig.kPreserveAllTensors,
+  )
+
+  tflite_interpreter = tf.lite.Interpreter(
+      model_path=_INPUT_TFLITE_FILE.value,
+      experimental_preserve_all_tensors=True,
+  )
+
+  tflite_interpreter.allocate_tensors()
+
+  debug_obj = None
+
+  # Setting Inputs either randomly or using provided Debug File
+  if _DEBUG_FILE.value == None:
+    debug_obj, tflm_interpreter, tflite_interpreter = (
+        GenerateRandomInputTfLiteComparison(tflm_interpreter,
+                                            tflite_interpreter, model,
+                                            _RNG.value))
+    tflite_interpreter.invoke()
+  else:
+    debug_obj = ReadDebugFile()
+    tflm_interpreter, tflite_interpreter = SetDebugFileInterpreterInput(
+        tflm_interpreter, tflite_interpreter, debug_obj)
+
+  tflm_interpreter.invoke()
+  comparison = ""
+
+  for subgraph in debug_obj.subgraphData:
+    for output in subgraph.outputs:
+      tflm_ouput = tflm_interpreter.GetTensor(
+          output.tensorIndex, subgraph.subgraphIndex)["tensor_data"]
+
+      comparison_ouput = None
+
+      if _DEBUG_FILE.value == None:
+        tflite_output = tflite_interpreter.get_tensor(output.tensorIndex,
+                                                      subgraph.subgraphIndex)
+        comparison_ouput = tflite_output
+        comparison = "TfLite"
+      else:
+        expected_output_data = np.frombuffer(bytearray(output.data),
+                                             dtype=numpy_from_tensor_type(
+                                                 output.dtype))
+        expected_output_data = np.reshape(expected_output_data, output.shape)
+        comparison = "Expected Golden Data"
+        comparison_ouput = expected_output_data
+
+      error_message = (
+          "\n\nTFLM output does not match {comparison} output.\n Subgraph"
+          " number is {subgraph_index} \n Layer number is {layer_number} \n The"
+          " Tensor Index where this output does not match is {tensor_index}"
+          " \n\n\n".format(
+              comparison=comparison,
+              subgraph_index=subgraph.subgraphIndex,
+              layer_number=output.layerNumber,
+              tensor_index=output.tensorIndex,
+          ))
+      if _PRINT_PREVIEW.value:
+        print("layer number ", output.layerNumber)
+        print("tensor index ", output.tensorIndex, "\n\n")
+        print("TFLM output \n ", tflm_ouput[:10])
+        print(
+            "{comparison} output \n".format(comparison=comparison),
+            comparison_ouput[:_PRINT_PREVIEW.value],
+        )
+        print("--------------\n\n\n")
+      np.testing.assert_array_equal(tflm_ouput,
+                                    comparison_ouput,
+                                    err_msg=error_message,
+                                    verbose=True)
+  print(
+      "\n\nTFLM output matched {comparison} output for all Layers in the Model."
+      .format(comparison=comparison))
+
+
+if __name__ == "__main__":
+  app.run(main)
diff --git a/tensorflow/lite/micro/tools/layer_by_layer_schema.fbs b/tensorflow/lite/micro/tools/layer_by_layer_schema.fbs
new file mode 100644
index 0000000..4183c9c
--- /dev/null
+++ b/tensorflow/lite/micro/tools/layer_by_layer_schema.fbs
@@ -0,0 +1,62 @@
+// Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+enum TensorTypes : byte {
+  FLOAT32 = 0,
+  FLOAT16 = 1,
+  INT32 = 2,
+  UINT8 = 3,
+  INT64 = 4,
+  STRING = 5,
+  BOOL = 6,
+  INT16 = 7,
+  COMPLEX64 = 8,
+  INT8 = 9,
+  FLOAT64 = 10,
+  COMPLEX128 = 11,
+  UINT64 = 12,
+  // Experimental: Resource and variant types are experimental, that are subject
+  // to change. Do not implement custom kernels using resource & variant types
+  // now.
+  RESOURCE = 13,
+  VARIANT = 14,
+  UINT32 = 15,
+  UINT16 = 16,
+  INT4 = 17,
+  BFLOAT16 = 18,
+}
+
+table TensorData {
+  //input_index will be set to -1 for non-input_tensors
+  input_index:uint;
+  tensor_index: uint;
+  shape:[int];
+  dtype:TensorTypes;
+  data:[ubyte];
+  num_bytes:uint;
+  //layer_number will be set to -1 for input_tensors
+  layer_number:uint;
+}
+
+table SubgraphData {
+  outputs:[TensorData];
+  subgraph_index:uint;
+}
+
+table ModelTestData {
+  input_data:[TensorData];
+  subgraph_data:[SubgraphData];
+}
+
+root_type ModelTestData;
diff --git a/tensorflow/lite/micro/tools/layer_by_layer_schema_generated.h b/tensorflow/lite/micro/tools/layer_by_layer_schema_generated.h
new file mode 100644
index 0000000..67a2caa
--- /dev/null
+++ b/tensorflow/lite/micro/tools/layer_by_layer_schema_generated.h
@@ -0,0 +1,597 @@
+/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+// automatically generated by the FlatBuffers compiler, do not modify
+
+
+#ifndef FLATBUFFERS_GENERATED_LAYERBYLAYERSCHEMA_H_
+#define FLATBUFFERS_GENERATED_LAYERBYLAYERSCHEMA_H_
+
+#include "flatbuffers/flatbuffers.h"
+
+// Ensure the included flatbuffers.h is the same version as when this file was
+// generated, otherwise it may not be compatible.
+static_assert(FLATBUFFERS_VERSION_MAJOR == 23 &&
+              FLATBUFFERS_VERSION_MINOR == 5 &&
+              FLATBUFFERS_VERSION_REVISION == 26,
+             "Non-compatible flatbuffers version included");
+
+struct TensorData;
+struct TensorDataBuilder;
+struct TensorDataT;
+
+struct SubgraphData;
+struct SubgraphDataBuilder;
+struct SubgraphDataT;
+
+struct ModelTestData;
+struct ModelTestDataBuilder;
+struct ModelTestDataT;
+
+enum TensorTypes : int8_t {
+  TensorTypes_FLOAT32 = 0,
+  TensorTypes_FLOAT16 = 1,
+  TensorTypes_INT32 = 2,
+  TensorTypes_UINT8 = 3,
+  TensorTypes_INT64 = 4,
+  TensorTypes_STRING = 5,
+  TensorTypes_BOOL = 6,
+  TensorTypes_INT16 = 7,
+  TensorTypes_COMPLEX64 = 8,
+  TensorTypes_INT8 = 9,
+  TensorTypes_FLOAT64 = 10,
+  TensorTypes_COMPLEX128 = 11,
+  TensorTypes_UINT64 = 12,
+  TensorTypes_RESOURCE = 13,
+  TensorTypes_VARIANT = 14,
+  TensorTypes_UINT32 = 15,
+  TensorTypes_UINT16 = 16,
+  TensorTypes_INT4 = 17,
+  TensorTypes_BFLOAT16 = 18,
+  TensorTypes_MIN = TensorTypes_FLOAT32,
+  TensorTypes_MAX = TensorTypes_BFLOAT16
+};
+
+inline const TensorTypes (&EnumValuesTensorTypes())[19] {
+  static const TensorTypes values[] = {
+    TensorTypes_FLOAT32,
+    TensorTypes_FLOAT16,
+    TensorTypes_INT32,
+    TensorTypes_UINT8,
+    TensorTypes_INT64,
+    TensorTypes_STRING,
+    TensorTypes_BOOL,
+    TensorTypes_INT16,
+    TensorTypes_COMPLEX64,
+    TensorTypes_INT8,
+    TensorTypes_FLOAT64,
+    TensorTypes_COMPLEX128,
+    TensorTypes_UINT64,
+    TensorTypes_RESOURCE,
+    TensorTypes_VARIANT,
+    TensorTypes_UINT32,
+    TensorTypes_UINT16,
+    TensorTypes_INT4,
+    TensorTypes_BFLOAT16
+  };
+  return values;
+}
+
+inline const char * const *EnumNamesTensorTypes() {
+  static const char * const names[20] = {
+    "FLOAT32",
+    "FLOAT16",
+    "INT32",
+    "UINT8",
+    "INT64",
+    "STRING",
+    "BOOL",
+    "INT16",
+    "COMPLEX64",
+    "INT8",
+    "FLOAT64",
+    "COMPLEX128",
+    "UINT64",
+    "RESOURCE",
+    "VARIANT",
+    "UINT32",
+    "UINT16",
+    "INT4",
+    "BFLOAT16",
+    nullptr
+  };
+  return names;
+}
+
+inline const char *EnumNameTensorTypes(TensorTypes e) {
+  if (::flatbuffers::IsOutRange(e, TensorTypes_FLOAT32, TensorTypes_BFLOAT16)) return "";
+  const size_t index = static_cast<size_t>(e);
+  return EnumNamesTensorTypes()[index];
+}
+
+struct TensorDataT : public ::flatbuffers::NativeTable {
+  typedef TensorData TableType;
+  uint32_t input_index = 0;
+  uint32_t tensor_index = 0;
+  std::vector<int32_t> shape{};
+  TensorTypes dtype = TensorTypes_FLOAT32;
+  std::vector<uint8_t> data{};
+  uint32_t num_bytes = 0;
+  uint32_t layer_number = 0;
+};
+
+struct TensorData FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef TensorDataT NativeTableType;
+  typedef TensorDataBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_INPUT_INDEX = 4,
+    VT_TENSOR_INDEX = 6,
+    VT_SHAPE = 8,
+    VT_DTYPE = 10,
+    VT_DATA = 12,
+    VT_NUM_BYTES = 14,
+    VT_LAYER_NUMBER = 16
+  };
+  uint32_t input_index() const {
+    return GetField<uint32_t>(VT_INPUT_INDEX, 0);
+  }
+  uint32_t tensor_index() const {
+    return GetField<uint32_t>(VT_TENSOR_INDEX, 0);
+  }
+  const ::flatbuffers::Vector<int32_t> *shape() const {
+    return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_SHAPE);
+  }
+  TensorTypes dtype() const {
+    return static_cast<TensorTypes>(GetField<int8_t>(VT_DTYPE, 0));
+  }
+  const ::flatbuffers::Vector<uint8_t> *data() const {
+    return GetPointer<const ::flatbuffers::Vector<uint8_t> *>(VT_DATA);
+  }
+  uint32_t num_bytes() const {
+    return GetField<uint32_t>(VT_NUM_BYTES, 0);
+  }
+  uint32_t layer_number() const {
+    return GetField<uint32_t>(VT_LAYER_NUMBER, 0);
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyField<uint32_t>(verifier, VT_INPUT_INDEX, 4) &&
+           VerifyField<uint32_t>(verifier, VT_TENSOR_INDEX, 4) &&
+           VerifyOffset(verifier, VT_SHAPE) &&
+           verifier.VerifyVector(shape()) &&
+           VerifyField<int8_t>(verifier, VT_DTYPE, 1) &&
+           VerifyOffset(verifier, VT_DATA) &&
+           verifier.VerifyVector(data()) &&
+           VerifyField<uint32_t>(verifier, VT_NUM_BYTES, 4) &&
+           VerifyField<uint32_t>(verifier, VT_LAYER_NUMBER, 4) &&
+           verifier.EndTable();
+  }
+  TensorDataT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(TensorDataT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<TensorData> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TensorDataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct TensorDataBuilder {
+  typedef TensorData Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_input_index(uint32_t input_index) {
+    fbb_.AddElement<uint32_t>(TensorData::VT_INPUT_INDEX, input_index, 0);
+  }
+  void add_tensor_index(uint32_t tensor_index) {
+    fbb_.AddElement<uint32_t>(TensorData::VT_TENSOR_INDEX, tensor_index, 0);
+  }
+  void add_shape(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> shape) {
+    fbb_.AddOffset(TensorData::VT_SHAPE, shape);
+  }
+  void add_dtype(TensorTypes dtype) {
+    fbb_.AddElement<int8_t>(TensorData::VT_DTYPE, static_cast<int8_t>(dtype), 0);
+  }
+  void add_data(::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> data) {
+    fbb_.AddOffset(TensorData::VT_DATA, data);
+  }
+  void add_num_bytes(uint32_t num_bytes) {
+    fbb_.AddElement<uint32_t>(TensorData::VT_NUM_BYTES, num_bytes, 0);
+  }
+  void add_layer_number(uint32_t layer_number) {
+    fbb_.AddElement<uint32_t>(TensorData::VT_LAYER_NUMBER, layer_number, 0);
+  }
+  explicit TensorDataBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<TensorData> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<TensorData>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<TensorData> CreateTensorData(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    uint32_t input_index = 0,
+    uint32_t tensor_index = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> shape = 0,
+    TensorTypes dtype = TensorTypes_FLOAT32,
+    ::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> data = 0,
+    uint32_t num_bytes = 0,
+    uint32_t layer_number = 0) {
+  TensorDataBuilder builder_(_fbb);
+  builder_.add_layer_number(layer_number);
+  builder_.add_num_bytes(num_bytes);
+  builder_.add_data(data);
+  builder_.add_shape(shape);
+  builder_.add_tensor_index(tensor_index);
+  builder_.add_input_index(input_index);
+  builder_.add_dtype(dtype);
+  return builder_.Finish();
+}
+
+inline ::flatbuffers::Offset<TensorData> CreateTensorDataDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    uint32_t input_index = 0,
+    uint32_t tensor_index = 0,
+    const std::vector<int32_t> *shape = nullptr,
+    TensorTypes dtype = TensorTypes_FLOAT32,
+    const std::vector<uint8_t> *data = nullptr,
+    uint32_t num_bytes = 0,
+    uint32_t layer_number = 0) {
+  auto shape__ = shape ? _fbb.CreateVector<int32_t>(*shape) : 0;
+  auto data__ = data ? _fbb.CreateVector<uint8_t>(*data) : 0;
+  return CreateTensorData(
+      _fbb,
+      input_index,
+      tensor_index,
+      shape__,
+      dtype,
+      data__,
+      num_bytes,
+      layer_number);
+}
+
+::flatbuffers::Offset<TensorData> CreateTensorData(::flatbuffers::FlatBufferBuilder &_fbb, const TensorDataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SubgraphDataT : public ::flatbuffers::NativeTable {
+  typedef SubgraphData TableType;
+  std::vector<std::unique_ptr<TensorDataT>> outputs{};
+  uint32_t subgraph_index = 0;
+  SubgraphDataT() = default;
+  SubgraphDataT(const SubgraphDataT &o);
+  SubgraphDataT(SubgraphDataT&&) FLATBUFFERS_NOEXCEPT = default;
+  SubgraphDataT &operator=(SubgraphDataT o) FLATBUFFERS_NOEXCEPT;
+};
+
+struct SubgraphData FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef SubgraphDataT NativeTableType;
+  typedef SubgraphDataBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_OUTPUTS = 4,
+    VT_SUBGRAPH_INDEX = 6
+  };
+  const ::flatbuffers::Vector<::flatbuffers::Offset<TensorData>> *outputs() const {
+    return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<TensorData>> *>(VT_OUTPUTS);
+  }
+  uint32_t subgraph_index() const {
+    return GetField<uint32_t>(VT_SUBGRAPH_INDEX, 0);
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyOffset(verifier, VT_OUTPUTS) &&
+           verifier.VerifyVector(outputs()) &&
+           verifier.VerifyVectorOfTables(outputs()) &&
+           VerifyField<uint32_t>(verifier, VT_SUBGRAPH_INDEX, 4) &&
+           verifier.EndTable();
+  }
+  SubgraphDataT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(SubgraphDataT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<SubgraphData> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SubgraphDataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SubgraphDataBuilder {
+  typedef SubgraphData Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_outputs(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<TensorData>>> outputs) {
+    fbb_.AddOffset(SubgraphData::VT_OUTPUTS, outputs);
+  }
+  void add_subgraph_index(uint32_t subgraph_index) {
+    fbb_.AddElement<uint32_t>(SubgraphData::VT_SUBGRAPH_INDEX, subgraph_index, 0);
+  }
+  explicit SubgraphDataBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<SubgraphData> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<SubgraphData>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<SubgraphData> CreateSubgraphData(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<TensorData>>> outputs = 0,
+    uint32_t subgraph_index = 0) {
+  SubgraphDataBuilder builder_(_fbb);
+  builder_.add_subgraph_index(subgraph_index);
+  builder_.add_outputs(outputs);
+  return builder_.Finish();
+}
+
+inline ::flatbuffers::Offset<SubgraphData> CreateSubgraphDataDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    const std::vector<::flatbuffers::Offset<TensorData>> *outputs = nullptr,
+    uint32_t subgraph_index = 0) {
+  auto outputs__ = outputs ? _fbb.CreateVector<::flatbuffers::Offset<TensorData>>(*outputs) : 0;
+  return CreateSubgraphData(
+      _fbb,
+      outputs__,
+      subgraph_index);
+}
+
+::flatbuffers::Offset<SubgraphData> CreateSubgraphData(::flatbuffers::FlatBufferBuilder &_fbb, const SubgraphDataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ModelTestDataT : public ::flatbuffers::NativeTable {
+  typedef ModelTestData TableType;
+  std::vector<std::unique_ptr<TensorDataT>> input_data{};
+  std::vector<std::unique_ptr<SubgraphDataT>> subgraph_data{};
+  ModelTestDataT() = default;
+  ModelTestDataT(const ModelTestDataT &o);
+  ModelTestDataT(ModelTestDataT&&) FLATBUFFERS_NOEXCEPT = default;
+  ModelTestDataT &operator=(ModelTestDataT o) FLATBUFFERS_NOEXCEPT;
+};
+
+struct ModelTestData FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef ModelTestDataT NativeTableType;
+  typedef ModelTestDataBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_INPUT_DATA = 4,
+    VT_SUBGRAPH_DATA = 6
+  };
+  const ::flatbuffers::Vector<::flatbuffers::Offset<TensorData>> *input_data() const {
+    return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<TensorData>> *>(VT_INPUT_DATA);
+  }
+  const ::flatbuffers::Vector<::flatbuffers::Offset<SubgraphData>> *subgraph_data() const {
+    return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<SubgraphData>> *>(VT_SUBGRAPH_DATA);
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyOffset(verifier, VT_INPUT_DATA) &&
+           verifier.VerifyVector(input_data()) &&
+           verifier.VerifyVectorOfTables(input_data()) &&
+           VerifyOffset(verifier, VT_SUBGRAPH_DATA) &&
+           verifier.VerifyVector(subgraph_data()) &&
+           verifier.VerifyVectorOfTables(subgraph_data()) &&
+           verifier.EndTable();
+  }
+  ModelTestDataT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(ModelTestDataT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<ModelTestData> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ModelTestDataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ModelTestDataBuilder {
+  typedef ModelTestData Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_input_data(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<TensorData>>> input_data) {
+    fbb_.AddOffset(ModelTestData::VT_INPUT_DATA, input_data);
+  }
+  void add_subgraph_data(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<SubgraphData>>> subgraph_data) {
+    fbb_.AddOffset(ModelTestData::VT_SUBGRAPH_DATA, subgraph_data);
+  }
+  explicit ModelTestDataBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<ModelTestData> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<ModelTestData>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<ModelTestData> CreateModelTestData(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<TensorData>>> input_data = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<SubgraphData>>> subgraph_data = 0) {
+  ModelTestDataBuilder builder_(_fbb);
+  builder_.add_subgraph_data(subgraph_data);
+  builder_.add_input_data(input_data);
+  return builder_.Finish();
+}
+
+inline ::flatbuffers::Offset<ModelTestData> CreateModelTestDataDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    const std::vector<::flatbuffers::Offset<TensorData>> *input_data = nullptr,
+    const std::vector<::flatbuffers::Offset<SubgraphData>> *subgraph_data = nullptr) {
+  auto input_data__ = input_data ? _fbb.CreateVector<::flatbuffers::Offset<TensorData>>(*input_data) : 0;
+  auto subgraph_data__ = subgraph_data ? _fbb.CreateVector<::flatbuffers::Offset<SubgraphData>>(*subgraph_data) : 0;
+  return CreateModelTestData(
+      _fbb,
+      input_data__,
+      subgraph_data__);
+}
+
+::flatbuffers::Offset<ModelTestData> CreateModelTestData(::flatbuffers::FlatBufferBuilder &_fbb, const ModelTestDataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+inline TensorDataT *TensorData::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<TensorDataT>(new TensorDataT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void TensorData::UnPackTo(TensorDataT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = input_index(); _o->input_index = _e; }
+  { auto _e = tensor_index(); _o->tensor_index = _e; }
+  { auto _e = shape(); if (_e) { _o->shape.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape[_i] = _e->Get(_i); } } else { _o->shape.resize(0); } }
+  { auto _e = dtype(); _o->dtype = _e; }
+  { auto _e = data(); if (_e) { _o->data.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->data.begin()); } }
+  { auto _e = num_bytes(); _o->num_bytes = _e; }
+  { auto _e = layer_number(); _o->layer_number = _e; }
+}
+
+inline ::flatbuffers::Offset<TensorData> TensorData::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TensorDataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateTensorData(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<TensorData> CreateTensorData(::flatbuffers::FlatBufferBuilder &_fbb, const TensorDataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const TensorDataT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _input_index = _o->input_index;
+  auto _tensor_index = _o->tensor_index;
+  auto _shape = _o->shape.size() ? _fbb.CreateVector(_o->shape) : 0;
+  auto _dtype = _o->dtype;
+  auto _data = _o->data.size() ? _fbb.CreateVector(_o->data) : 0;
+  auto _num_bytes = _o->num_bytes;
+  auto _layer_number = _o->layer_number;
+  return CreateTensorData(
+      _fbb,
+      _input_index,
+      _tensor_index,
+      _shape,
+      _dtype,
+      _data,
+      _num_bytes,
+      _layer_number);
+}
+
+inline SubgraphDataT::SubgraphDataT(const SubgraphDataT &o)
+      : subgraph_index(o.subgraph_index) {
+  outputs.reserve(o.outputs.size());
+  for (const auto &outputs_ : o.outputs) { outputs.emplace_back((outputs_) ? new TensorDataT(*outputs_) : nullptr); }
+}
+
+inline SubgraphDataT &SubgraphDataT::operator=(SubgraphDataT o) FLATBUFFERS_NOEXCEPT {
+  std::swap(outputs, o.outputs);
+  std::swap(subgraph_index, o.subgraph_index);
+  return *this;
+}
+
+inline SubgraphDataT *SubgraphData::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<SubgraphDataT>(new SubgraphDataT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void SubgraphData::UnPackTo(SubgraphDataT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->outputs[_i]) { _e->Get(_i)->UnPackTo(_o->outputs[_i].get(), _resolver); } else { _o->outputs[_i] = std::unique_ptr<TensorDataT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->outputs.resize(0); } }
+  { auto _e = subgraph_index(); _o->subgraph_index = _e; }
+}
+
+inline ::flatbuffers::Offset<SubgraphData> SubgraphData::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SubgraphDataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateSubgraphData(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<SubgraphData> CreateSubgraphData(::flatbuffers::FlatBufferBuilder &_fbb, const SubgraphDataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SubgraphDataT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _outputs = _o->outputs.size() ? _fbb.CreateVector<::flatbuffers::Offset<TensorData>> (_o->outputs.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorData(*__va->__fbb, __va->__o->outputs[i].get(), __va->__rehasher); }, &_va ) : 0;
+  auto _subgraph_index = _o->subgraph_index;
+  return CreateSubgraphData(
+      _fbb,
+      _outputs,
+      _subgraph_index);
+}
+
+inline ModelTestDataT::ModelTestDataT(const ModelTestDataT &o) {
+  input_data.reserve(o.input_data.size());
+  for (const auto &input_data_ : o.input_data) { input_data.emplace_back((input_data_) ? new TensorDataT(*input_data_) : nullptr); }
+  subgraph_data.reserve(o.subgraph_data.size());
+  for (const auto &subgraph_data_ : o.subgraph_data) { subgraph_data.emplace_back((subgraph_data_) ? new SubgraphDataT(*subgraph_data_) : nullptr); }
+}
+
+inline ModelTestDataT &ModelTestDataT::operator=(ModelTestDataT o) FLATBUFFERS_NOEXCEPT {
+  std::swap(input_data, o.input_data);
+  std::swap(subgraph_data, o.subgraph_data);
+  return *this;
+}
+
+inline ModelTestDataT *ModelTestData::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<ModelTestDataT>(new ModelTestDataT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void ModelTestData::UnPackTo(ModelTestDataT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = input_data(); if (_e) { _o->input_data.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->input_data[_i]) { _e->Get(_i)->UnPackTo(_o->input_data[_i].get(), _resolver); } else { _o->input_data[_i] = std::unique_ptr<TensorDataT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->input_data.resize(0); } }
+  { auto _e = subgraph_data(); if (_e) { _o->subgraph_data.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->subgraph_data[_i]) { _e->Get(_i)->UnPackTo(_o->subgraph_data[_i].get(), _resolver); } else { _o->subgraph_data[_i] = std::unique_ptr<SubgraphDataT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->subgraph_data.resize(0); } }
+}
+
+inline ::flatbuffers::Offset<ModelTestData> ModelTestData::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ModelTestDataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateModelTestData(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<ModelTestData> CreateModelTestData(::flatbuffers::FlatBufferBuilder &_fbb, const ModelTestDataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ModelTestDataT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _input_data = _o->input_data.size() ? _fbb.CreateVector<::flatbuffers::Offset<TensorData>> (_o->input_data.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorData(*__va->__fbb, __va->__o->input_data[i].get(), __va->__rehasher); }, &_va ) : 0;
+  auto _subgraph_data = _o->subgraph_data.size() ? _fbb.CreateVector<::flatbuffers::Offset<SubgraphData>> (_o->subgraph_data.size(), [](size_t i, _VectorArgs *__va) { return CreateSubgraphData(*__va->__fbb, __va->__o->subgraph_data[i].get(), __va->__rehasher); }, &_va ) : 0;
+  return CreateModelTestData(
+      _fbb,
+      _input_data,
+      _subgraph_data);
+}
+
+inline const ModelTestData *GetModelTestData(const void *buf) {
+  return ::flatbuffers::GetRoot<ModelTestData>(buf);
+}
+
+inline const ModelTestData *GetSizePrefixedModelTestData(const void *buf) {
+  return ::flatbuffers::GetSizePrefixedRoot<ModelTestData>(buf);
+}
+
+inline bool VerifyModelTestDataBuffer(
+    ::flatbuffers::Verifier &verifier) {
+  return verifier.VerifyBuffer<ModelTestData>(nullptr);
+}
+
+inline bool VerifySizePrefixedModelTestDataBuffer(
+    ::flatbuffers::Verifier &verifier) {
+  return verifier.VerifySizePrefixedBuffer<ModelTestData>(nullptr);
+}
+
+inline void FinishModelTestDataBuffer(
+    ::flatbuffers::FlatBufferBuilder &fbb,
+    ::flatbuffers::Offset<ModelTestData> root) {
+  fbb.Finish(root);
+}
+
+inline void FinishSizePrefixedModelTestDataBuffer(
+    ::flatbuffers::FlatBufferBuilder &fbb,
+    ::flatbuffers::Offset<ModelTestData> root) {
+  fbb.FinishSizePrefixed(root);
+}
+
+inline std::unique_ptr<ModelTestDataT> UnPackModelTestData(
+    const void *buf,
+    const ::flatbuffers::resolver_function_t *res = nullptr) {
+  return std::unique_ptr<ModelTestDataT>(GetModelTestData(buf)->UnPack(res));
+}
+
+inline std::unique_ptr<ModelTestDataT> UnPackSizePrefixedModelTestData(
+    const void *buf,
+    const ::flatbuffers::resolver_function_t *res = nullptr) {
+  return std::unique_ptr<ModelTestDataT>(GetSizePrefixedModelTestData(buf)->UnPack(res));
+}
+
+#endif  // FLATBUFFERS_GENERATED_LAYERBYLAYERSCHEMA_H_
diff --git a/tensorflow/lite/micro/tools/make/Makefile b/tensorflow/lite/micro/tools/make/Makefile
index badd0ca..3bf2b54 100644
--- a/tensorflow/lite/micro/tools/make/Makefile
+++ b/tensorflow/lite/micro/tools/make/Makefile
@@ -1,4 +1,4 @@
-# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -60,9 +60,21 @@
 # Specify which specialized kernel implementation should be pulled in.
 OPTIMIZED_KERNEL_DIR :=
 
+# Optimize kernels for speed or memory. This is similar but not the same as KERNEL_OPTIMIZATION_LEVEL and
+# CORE_OPTIMIZATION_LEVEL, which specify compiler optimization level.
+# Instead this enables a kernel to provide multiple implementations that is configured at build time.
+# An example could be a kernel requiring a bigger scratch buffer for certain use cases.
+# The example kernel would have a smaller scratch buffer usage when building for size.
+# Vice versa it would use more scratch buffer when building for speed and would be more performant.
+# Note that this is optional. If having one implementation, nothing needs to be done.
+# OPTIMIZE_KERNELS_FOR has only two valid values, KERNELS_OPTIMIZED_FOR_SIZE and KERNELS_OPTIMIZED_FOR_SPEED where the
+# former is default.
+OPTIMIZE_KERNELS_FOR := KERNELS_OPTIMIZED_FOR_SPEED
+
 # Override this variable from the command line in case the optimized kernels are
 # in a different directory.
 OPTIMIZED_KERNEL_DIR_PREFIX := $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels
+OPTIMIZED_SIGNAL_KERNEL_DIR_PREFIX := $(TENSORFLOW_ROOT)signal/micro/kernels
 
 # Specify which co-processor's kernel implementation should be pulled in.
 # If the same kernel is implemented in both kernels/OPTIMIZED_KERNEL_DIR and
@@ -98,7 +110,7 @@
 
 MICROLITE_LIBS := -lm
 
-# For the optimized_kernel_dir, and co-processor as specified on the
+# For the optimized_kernel_dir, co-processor and optimize_kernels_for as specified on the
 # command line we add -D<tag> to the cflags to allow for #idefs in the code.
 #
 # We apply the following transformations (via the tr command):
@@ -112,6 +124,10 @@
   ADDITIONAL_DEFINES += -D$(shell echo $(CO_PROCESSOR) | tr [a-z] [A-Z])
 endif
 
+ifneq ($(OPTIMIZE_KERNELS_FOR),)
+  ADDITIONAL_DEFINES += -D$(shell echo $(OPTIMIZE_KERNELS_FOR) | tr [a-z] [A-Z])
+endif
+
 ifeq ($(TOOLCHAIN), armclang)
   CORE_OPTIMIZATION_LEVEL := -Oz
 else
@@ -130,7 +146,6 @@
 CC_WARNINGS := \
   -Wsign-compare \
   -Wdouble-promotion \
-  -Wshadow \
   -Wunused-variable \
   -Wunused-function \
   -Wswitch \
@@ -141,6 +156,14 @@
   -Wstrict-aliasing \
   -Wno-unused-parameter
 
+ifneq ($(TOOLCHAIN), gcc)
+  # GCC can be overly aggressive with shadow warnings, such as warning when a
+  # lambda has variable with the same name as a non-captured variable from the
+  # enclosing scope. As such, we don't enable shadow warnings on gcc.
+  # https://stackoverflow.com/q/66404751
+  CC_WARNINGS += -Wshadow
+endif
+
 COMMON_FLAGS := \
   -Werror \
   -fno-unwind-tables \
@@ -159,7 +182,7 @@
 endif
 
 CXXFLAGS := \
-  -std=c++11 \
+  -std=c++17 \
   -fno-rtti \
   -fno-exceptions \
   -fno-threadsafe-statics \
@@ -168,7 +191,7 @@
 
 CCFLAGS := \
   -Wimplicit-function-declaration \
-  -std=c11 \
+  -std=c17 \
   $(COMMON_FLAGS)
 
 ARFLAGS := -r
@@ -234,13 +257,6 @@
 	# https://github.com/tensorflow/tensorflow/issues/43076
 	CXXFLAGS := $(filter-out -DTF_LITE_STATIC_MEMORY, $(CXXFLAGS))
 	CCFLAGS := $(filter-out -DTF_LITE_STATIC_MEMORY, $(CCFLAGS))
-
-	# We are using C++17 for the no_tf_lite_static_memory_build to make it close
-	# to the TfLite bazel build.
-	CXXFLAGS := $(filter-out -std=c++11, $(CXXFLAGS))
-	CXXFLAGS += -std=c++17
-	CCFLAGS := $(filter-out -std=c11, $(CCLAGS))
-	CCFLAGS += -std=c17
 endif
 
 # This library is the main target for this makefile. It will contain a minimal
@@ -249,7 +265,15 @@
 
 # Where compiled objects are stored.
 BASE_GENDIR := gen
-GENDIR := $(BASE_GENDIR)/$(TARGET)_$(TARGET_ARCH)_$(BUILD_TYPE)/
+GENDIR := $(BASE_GENDIR)/$(TARGET)_$(TARGET_ARCH)_$(BUILD_TYPE)
+ifneq ($(OPTIMIZED_KERNEL_DIR),)
+  GENDIR := $(GENDIR)_$(OPTIMIZED_KERNEL_DIR)
+endif
+ifneq ($(CO_PROCESSOR),)
+  GENDIR := $(GENDIR)_$(CO_PROCESSOR)
+endif
+GENDIR := $(GENDIR)_$(TOOLCHAIN)/
+
 CORE_OBJDIR := $(GENDIR)obj/core/
 KERNEL_OBJDIR := $(GENDIR)obj/kernels/
 THIRD_PARTY_KERNEL_OBJDIR := $(GENDIR)obj/third_party_kernels/
@@ -281,10 +305,14 @@
 MICRO_LITE_GEN_MUTABLE_OP_RESOLVER_TEST += \
   $(wildcard $(TENSORFLOW_ROOT)tensorflow/lite/micro/tools/gen_micro_mutable_op_resolver_test/person_detect/Makefile.inc)
 
+OLD_MICRO_LITE_BENCHMARKS := $(wildcard $(TENSORFLOW_ROOT)tensorflow/lite/micro/benchmarks/Makefile.inc)
 MICRO_LITE_BENCHMARKS := $(wildcard $(TENSORFLOW_ROOT)tensorflow/lite/micro/tools/benchmarking/Makefile.inc)
 
+MICRO_LITE_LAYER_BY_LAYER_OUTPUT := $(wildcard $(TENSORFLOW_ROOT)tensorflow/lite/micro/tools/Makefile.inc)
+
 # TODO(b/152645559): move all benchmarks to benchmarks directory.
 MICROLITE_BENCHMARK_SRCS := \
+$(wildcard $(TENSORFLOW_ROOT)tensorflow/lite/micro/benchmarks/*benchmark.cc) \
 $(wildcard $(TENSORFLOW_ROOT)tensorflow/lite/micro/tools/benchmarking/*benchmark.cc)
 
 MICROLITE_TEST_SRCS := \
@@ -294,7 +322,7 @@
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/memory_helpers_test.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/micro_allocator_test.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/micro_allocation_info_test.cc \
-$(TENSORFLOW_ROOT)tensorflow/lite/micro/micro_context_test.cc \
+$(TENSORFLOW_ROOT)tensorflow/lite/micro/micro_interpreter_context_test.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/micro_log_test.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/micro_interpreter_test.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/micro_mutable_op_resolver_test.cc \
@@ -312,40 +340,6 @@
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim_test.cc
 
 MICROLITE_CC_KERNEL_SRCS := \
-$(TENSORFLOW_ROOT)signal/micro/kernels/delay.cc \
-$(TENSORFLOW_ROOT)signal/micro/kernels/energy.cc \
-$(TENSORFLOW_ROOT)signal/micro/kernels/fft_auto_scale.cc \
-$(TENSORFLOW_ROOT)signal/micro/kernels/filter_bank.cc \
-$(TENSORFLOW_ROOT)signal/micro/kernels/filter_bank_log.cc \
-$(TENSORFLOW_ROOT)signal/micro/kernels/filter_bank_square_root.cc \
-$(TENSORFLOW_ROOT)signal/micro/kernels/filter_bank_spectral_subtraction.cc \
-$(TENSORFLOW_ROOT)signal/micro/kernels/framer.cc \
-$(TENSORFLOW_ROOT)signal/micro/kernels/irfft.cc \
-$(TENSORFLOW_ROOT)signal/micro/kernels/rfft.cc \
-$(TENSORFLOW_ROOT)signal/micro/kernels/stacker.cc \
-$(TENSORFLOW_ROOT)signal/micro/kernels/overlap_add.cc \
-$(TENSORFLOW_ROOT)signal/micro/kernels/window.cc \
-$(TENSORFLOW_ROOT)signal/src/circular_buffer.cc \
-$(TENSORFLOW_ROOT)signal/src/energy.cc \
-$(TENSORFLOW_ROOT)signal/src/fft_auto_scale.cc \
-$(TENSORFLOW_ROOT)signal/src/filter_bank.cc \
-$(TENSORFLOW_ROOT)signal/src/filter_bank_log.cc \
-$(TENSORFLOW_ROOT)signal/src/filter_bank_square_root.cc \
-$(TENSORFLOW_ROOT)signal/src/filter_bank_spectral_subtraction.cc \
-$(TENSORFLOW_ROOT)signal/src/irfft_float.cc \
-$(TENSORFLOW_ROOT)signal/src/irfft_int16.cc \
-$(TENSORFLOW_ROOT)signal/src/irfft_int32.cc \
-$(TENSORFLOW_ROOT)signal/src/log.cc \
-$(TENSORFLOW_ROOT)signal/src/max_abs.cc \
-$(TENSORFLOW_ROOT)signal/src/msb_32.cc \
-$(TENSORFLOW_ROOT)signal/src/msb_64.cc \
-$(TENSORFLOW_ROOT)signal/src/overlap_add.cc \
-$(TENSORFLOW_ROOT)signal/src/rfft_float.cc \
-$(TENSORFLOW_ROOT)signal/src/rfft_int16.cc \
-$(TENSORFLOW_ROOT)signal/src/rfft_int32.cc \
-$(TENSORFLOW_ROOT)signal/src/square_root_32.cc \
-$(TENSORFLOW_ROOT)signal/src/square_root_64.cc \
-$(TENSORFLOW_ROOT)signal/src/window.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/activations.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/activations_common.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/add.cc \
@@ -353,6 +347,7 @@
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/add_n.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/arg_min_max.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/assign_variable.cc \
+$(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/batch_matmul.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/batch_to_space_nd.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/broadcast_args.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/broadcast_to.cc \
@@ -437,6 +432,7 @@
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/squared_difference.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/squeeze.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/strided_slice.cc \
+$(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/strided_slice_common.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/sub.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/sub_common.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/svdf.cc \
@@ -450,24 +446,64 @@
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/while.cc \
 $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/zeros_like.cc
 
+MICROLITE_CC_SIGNAL_KERNEL_SRCS := \
+$(TENSORFLOW_ROOT)signal/micro/kernels/delay.cc \
+$(TENSORFLOW_ROOT)signal/micro/kernels/energy.cc \
+$(TENSORFLOW_ROOT)signal/micro/kernels/fft_auto_scale_kernel.cc \
+$(TENSORFLOW_ROOT)signal/micro/kernels/fft_auto_scale_common.cc \
+$(TENSORFLOW_ROOT)signal/micro/kernels/filter_bank.cc \
+$(TENSORFLOW_ROOT)signal/micro/kernels/filter_bank_log.cc \
+$(TENSORFLOW_ROOT)signal/micro/kernels/filter_bank_square_root.cc \
+$(TENSORFLOW_ROOT)signal/micro/kernels/filter_bank_square_root_common.cc \
+$(TENSORFLOW_ROOT)signal/micro/kernels/filter_bank_spectral_subtraction.cc \
+$(TENSORFLOW_ROOT)signal/micro/kernels/framer.cc \
+$(TENSORFLOW_ROOT)signal/micro/kernels/irfft.cc \
+$(TENSORFLOW_ROOT)signal/micro/kernels/rfft.cc \
+$(TENSORFLOW_ROOT)signal/micro/kernels/stacker.cc \
+$(TENSORFLOW_ROOT)signal/micro/kernels/overlap_add.cc \
+$(TENSORFLOW_ROOT)signal/micro/kernels/pcan.cc \
+$(TENSORFLOW_ROOT)signal/micro/kernels/window.cc \
+$(TENSORFLOW_ROOT)signal/src/circular_buffer.cc \
+$(TENSORFLOW_ROOT)signal/src/energy.cc \
+$(TENSORFLOW_ROOT)signal/src/fft_auto_scale.cc \
+$(TENSORFLOW_ROOT)signal/src/filter_bank.cc \
+$(TENSORFLOW_ROOT)signal/src/filter_bank_log.cc \
+$(TENSORFLOW_ROOT)signal/src/filter_bank_square_root.cc \
+$(TENSORFLOW_ROOT)signal/src/filter_bank_spectral_subtraction.cc \
+$(TENSORFLOW_ROOT)signal/src/irfft_float.cc \
+$(TENSORFLOW_ROOT)signal/src/irfft_int16.cc \
+$(TENSORFLOW_ROOT)signal/src/irfft_int32.cc \
+$(TENSORFLOW_ROOT)signal/src/log.cc \
+$(TENSORFLOW_ROOT)signal/src/max_abs.cc \
+$(TENSORFLOW_ROOT)signal/src/msb_32.cc \
+$(TENSORFLOW_ROOT)signal/src/msb_64.cc \
+$(TENSORFLOW_ROOT)signal/src/overlap_add.cc \
+$(TENSORFLOW_ROOT)signal/src/pcan_argc_fixed.cc \
+$(TENSORFLOW_ROOT)signal/src/rfft_float.cc \
+$(TENSORFLOW_ROOT)signal/src/rfft_int16.cc \
+$(TENSORFLOW_ROOT)signal/src/rfft_int32.cc \
+$(TENSORFLOW_ROOT)signal/src/square_root_32.cc \
+$(TENSORFLOW_ROOT)signal/src/square_root_64.cc \
+$(TENSORFLOW_ROOT)signal/src/window.cc
+
 MICROLITE_TEST_HDRS := \
 $(wildcard $(TENSORFLOW_ROOT)tensorflow/lite/micro/testing/*.h)
 
 # The explicitly specified list of sources and headers that are shared between
 # TfLite and TFLM are in the ci/sync_from_upstream_tf.sh script.
 TFL_CC_SRCS := \
-$(shell find $(TENSORFLOW_ROOT)tensorflow/lite -type d \( -path $(TENSORFLOW_ROOT)tensorflow/lite/experimental -o -path $(TENSORFLOW_ROOT)tensorflow/lite/micro \) -prune -false -o -name "*.cc" -o -name "*.c")
+$(shell find $(TENSORFLOW_ROOT)tensorflow -type d \( -path $(TENSORFLOW_ROOT)tensorflow/lite/experimental -o -path $(TENSORFLOW_ROOT)tensorflow/lite/micro \) -prune -false -o -name "*.cc" -o -name "*.c")
 
 TFL_CC_HDRS := \
-$(shell find $(TENSORFLOW_ROOT)tensorflow/lite -type d \( -path $(TENSORFLOW_ROOT)tensorflow/lite/experimental -o -path $(TENSORFLOW_ROOT)tensorflow/lite/micro \) -prune -false -o -name "*.h")
+$(shell find $(TENSORFLOW_ROOT)tensorflow -type d \( -path $(TENSORFLOW_ROOT)tensorflow/lite/experimental -o -path $(TENSORFLOW_ROOT)tensorflow/lite/micro \) -prune -false -o -name "*.h")
 
 ifneq ($(BUILD_TYPE), no_tf_lite_static_memory)
   EXCLUDED_TFL_CC_SRCS := \
-  	$(TENSORFLOW_ROOT)tensorflow/lite/array.cc
+	$(TENSORFLOW_ROOT)tensorflow/lite/array.cc
   TFL_CC_SRCS := $(filter-out $(EXCLUDED_TFL_CC_SRCS), $(TFL_CC_SRCS))
 
   EXCLUDED_TFL_CC_HDRS := \
-  	$(TENSORFLOW_ROOT)tensorflow/lite/array.h
+	$(TENSORFLOW_ROOT)tensorflow/lite/array.h
   TFL_CC_HDRS := $(filter-out $(EXCLUDED_TFL_CC_HDRS), $(TFL_CC_HDRS))
 endif
 
@@ -501,20 +537,31 @@
 $(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/base.h \
 $(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/buffer.h \
 $(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/buffer_ref.h \
+$(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/code_generator.h \
+$(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/code_generators.h \
 $(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/default_allocator.h \
 $(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/detached_buffer.h \
+$(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/file_manager.h \
 $(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/flatbuffer_builder.h \
 $(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/flatbuffers.h \
+$(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/flex_flat_util.h \
 $(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/flexbuffers.h \
+$(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/grpc.h \
+$(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/hash.h \
+$(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/idl.h \
+$(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/minireflect.h \
+$(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/reflection.h \
+$(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/reflection_generated.h \
+$(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/registry.h \
 $(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/stl_emulation.h \
 $(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/string.h \
 $(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/struct.h \
 $(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/table.h \
+$(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/util.h \
 $(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/vector.h \
 $(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/vector_downward.h \
 $(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/verifier.h \
-$(DOWNLOADS_DIR)/flatbuffers/include/flatbuffers/util.h \
-$(DOWNLOADS_DIR)/flatbuffers/LICENSE.txt \
+$(DOWNLOADS_DIR)/flatbuffers/LICENSE \
 $(DOWNLOADS_DIR)/gemmlowp/fixedpoint/fixedpoint.h \
 $(DOWNLOADS_DIR)/gemmlowp/fixedpoint/fixedpoint_neon.h \
 $(DOWNLOADS_DIR)/gemmlowp/fixedpoint/fixedpoint_sse.h \
@@ -583,8 +630,14 @@
   include $(MAKEFILE_DIR)/targets/$(TARGET)_makefile.inc
 endif
 
+# Validate valid options.
+ifeq (,$(filter $(OPTIMIZE_KERNELS_FOR),KERNELS_OPTIMIZED_FOR_SPEED KERNELS_OPTIMIZED_FOR_SIZE))
+    $(error Incorrect OPTIMIZE_KERNELS_FOR: $(OPTIMIZE_KERNELS_FOR))
+endif
+
 ifneq ($(OPTIMIZED_KERNEL_DIR),)
   PATH_TO_OPTIMIZED_KERNELS := $(OPTIMIZED_KERNEL_DIR_PREFIX)/$(OPTIMIZED_KERNEL_DIR)
+  PATH_TO_SIGNAL_OPTIMIZED_KERNELS := $(OPTIMIZED_SIGNAL_KERNEL_DIR_PREFIX)/$(OPTIMIZED_KERNEL_DIR)
 
   # Check that OPTIMIZED_KERNEL_DIR is valid to avoid unexpected fallback to
   # reference kernels. See http://b/183546742 for more context.
@@ -599,6 +652,22 @@
 		--base_files "$(MICROLITE_CC_KERNEL_SRCS)" \
 		--specialize_directory $(PATH_TO_OPTIMIZED_KERNELS))
 
+  ifneq ($(filter $(OPTIMIZED_KERNEL_DIR), xtensa),)
+    # Check that OPTIMIZED_KERNEL_DIR is valid to avoid unexpected fallback to
+    # reference kernels. See http://b/183546742 for more context.
+    RESULT := $(shell $(MAKEFILE_DIR)/check_optimized_kernel_dir.sh $(PATH_TO_SIGNAL_OPTIMIZED_KERNELS))
+    ifneq ($(RESULT), SUCCESS)
+      $(error Incorrect SIGNAL OPTIMIZED_KERNEL_DIR: $(RESULT))
+    endif
+
+    # Specialize for the optimized kernels
+    MICROLITE_CC_SIGNAL_KERNEL_SRCS := $(shell python3 $(MAKEFILE_DIR)/specialize_files.py \
+      --base_files "$(MICROLITE_CC_SIGNAL_KERNEL_SRCS)" \
+      --specialize_directory $(PATH_TO_SIGNAL_OPTIMIZED_KERNELS))
+    MICROLITE_CC_KERNEL_SRCS += $(wildcard $(PATH_TO_SIGNAL_OPTIMIZED_KERNELS)/*.S)
+    MICROLITE_CC_HDRS += $(wildcard $(PATH_TO_SIGNAL_OPTIMIZED_KERNELS)/*.h)
+  endif
+
   # The first ifneq is needed to be compatible with make versions prior to 4.2
   # which do not support .SHELLSTATUS. While make 4.2 was released in 2016,
   # Ubuntu 18.04 only has version 4.1
@@ -614,6 +683,8 @@
   MICROLITE_CC_HDRS += $(wildcard $(PATH_TO_OPTIMIZED_KERNELS)/*.h)
 endif
 
+MICROLITE_CC_KERNEL_SRCS += $(MICROLITE_CC_SIGNAL_KERNEL_SRCS)
+
 # If a co-processor is specified on the command line with
 # CO_PROCESSOR=<co_processor> then we will include ext_libs/<co_processor>.inc
 # and find additional kernel sources in kernels/<co_processor>/
@@ -682,7 +753,14 @@
 # Load generated micro mutable op resolver test.
 include ${MICRO_LITE_GEN_MUTABLE_OP_RESOLVER_TEST}
 
+# Load layer_by_layer_output_tool
+ifneq ($(TARGET), $(filter $(TARGET), \
+  bluepill cortex_m_corstone_300 riscv32_generic hexagon))
+include ${MICRO_LITE_LAYER_BY_LAYER_OUTPUT}
+endif
+
 # Load the benchmarks.
+include $(OLD_MICRO_LITE_BENCHMARKS)
 include $(MICRO_LITE_BENCHMARKS)
 
 # Load custom kernel tests.
@@ -775,6 +853,7 @@
 # These are microcontroller-specific rules for converting the ELF output
 # of the linker into a binary image that can be loaded directly.
 ifeq ($(TOOLCHAIN), armclang)
+  CXXFLAGS += -ffp-mode=full
   FROMELF := ${TARGET_TOOLCHAIN_ROOT}$(TARGET_TOOLCHAIN_PREFIX)fromelf
   $(BINDIR)%.bin: $(BINDIR)%
 		@mkdir -p $(dir $@)
@@ -850,6 +929,9 @@
 # Just build the test targets
 build: $(MICROLITE_BUILD_TARGETS)
 
+list_gendir:
+	@echo $(GENDIR)
+
 list_library_sources:
 	@echo $(MICROLITE_CC_SRCS) $(MICROLITE_CC_KERNEL_SRCS)
 
diff --git a/tensorflow/lite/micro/tools/make/arm_gcc_download.sh b/tensorflow/lite/micro/tools/make/arm_gcc_download.sh
index 8e6d632..68d2ca1 100755
--- a/tensorflow/lite/micro/tools/make/arm_gcc_download.sh
+++ b/tensorflow/lite/micro/tools/make/arm_gcc_download.sh
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -63,19 +63,26 @@
     # host architechture
     UNAME_M=`uname -m`
     if [ "${UNAME_M}" == "x86_64" ]; then
-      GCC_URL="https://developer.arm.com/-/media/Files/downloads/gnu-rm/10.3-2021.10/gcc-arm-none-eabi-10.3-2021.10-x86_64-linux.tar.bz2"
-      EXPECTED_MD5="2383e4eb4ea23f248d33adc70dc3227e"
+      GCC_URL="https://developer.arm.com/-/media/Files/downloads/gnu/13.2.rel1/binrel/arm-gnu-toolchain-13.2.rel1-x86_64-arm-none-eabi.tar.xz"
+      EXPECTED_MD5="791754852f8c18ea04da7139f153a5b7"
     elif [ "${UNAME_M}" == "aarch64" ]; then
-      GCC_URL="https://developer.arm.com/-/media/Files/downloads/gnu-rm/10.3-2021.10/gcc-arm-none-eabi-10.3-2021.10-aarch64-linux.tar.bz2"
-      EXPECTED_MD5="3fe3d8bb693bd0a6e4615b6569443d0d"
+      GCC_URL="https://developer.arm.com/-/media/Files/downloads/gnu/13.2.rel1/binrel/arm-gnu-toolchain-13.2.rel1-aarch64-arm-none-eabi.tar.xz"
+      EXPECTED_MD5="5a08122e6d4caf97c6ccd1d29e62599c"
     fi
 
   elif [ "${HOST_OS}" == "osx" ]; then
-    GCC_URL="https://developer.arm.com/-/media/Files/downloads/gnu-rm/10.3-2021.10/gcc-arm-none-eabi-10.3-2021.10-mac.tar.bz2"
-    EXPECTED_MD5="7f2a7b7b23797302a9d6182c6e482449"
+    # host architechture
+    UNAME_M=`uname -m`
+    if [ "${UNAME_M}" == "arm64" ]; then
+      GCC_URL="https://developer.arm.com/-/media/Files/downloads/gnu/13.2.rel1/binrel/arm-gnu-toolchain-13.2.rel1-darwin-arm64-arm-none-eabi.tar.xz"
+      EXPECTED_MD5="2c43e9d72206c1f81227b0a685df5ea6"
+    else
+      GCC_URL="https://developer.arm.com/-/media/Files/downloads/gnu/13.2.rel1/binrel/arm-gnu-toolchain-13.2.rel1-darwin-x86_64-arm-none-eabi.tar.xz"
+      EXPECTED_MD5="41d49840b0fc676d2ae35aab21a58693"
+    fi
   elif [ "${HOST_OS}" == "windows" ]; then
-    GCC_URL="https://developer.arm.com/-/media/Files/downloads/gnu-rm/10.3-2021.10/gcc-arm-none-eabi-10.3-2021.10-win32.zip"
-    EXPECTED_MD5="2bc8f0c4c4659f8259c8176223eeafc1"
+    GCC_URL="https://developer.arm.com/-/media/Files/downloads/gnu/13.2.rel1/binrel/arm-gnu-toolchain-13.2.rel1-mingw-w64-i686-arm-none-eabi.zip"
+    EXPECTED_MD5="7fd677088038cdf82f33f149e2e943ee"
   else
     echo "OS type ${HOST_OS} not supported."
     exit 1
@@ -92,7 +99,7 @@
     unzip -q ${TEMPFILE} -d ${TEMPDIR} >&2
     mv ${TEMPDIR}/*/* ${DOWNLOADED_GCC_PATH}
   else
-    tar -C ${DOWNLOADED_GCC_PATH} --strip-components=1 -xjf ${TEMPFILE} >&2
+    tar -C ${DOWNLOADED_GCC_PATH} --strip-components=1 -xJf ${TEMPFILE} >&2
   fi
   echo >&2 "Unpacked to directory: ${DOWNLOADED_GCC_PATH}"
 fi
diff --git a/tensorflow/lite/micro/tools/make/bash_helpers.sh b/tensorflow/lite/micro/tools/make/bash_helpers.sh
index f29a641..e5446de 100755
--- a/tensorflow/lite/micro/tools/make/bash_helpers.sh
+++ b/tensorflow/lite/micro/tools/make/bash_helpers.sh
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -14,6 +14,32 @@
 # limitations under the License.
 # ==============================================================================
 
+# Check the download path argument
+#
+# Parameter(s):
+#   ${1} - path to the download directory or --no-downloads
+#
+# Outputs:
+# "yes" or "no"
+function check_should_download() {
+  if [[ ${1} == "--no-downloads" ]]; then
+    echo "no"
+  else
+    echo "yes"
+  fi
+}
+
+# Show the download URL and MD5 checksum
+#
+# Parameter(s):
+#   ${1} - download URL
+#   ${2} - download MD5 checksum
+#
+# Download scripts require informational output should be on stderr.
+function show_download_url_md5() {
+  echo >&2 "LIBRARY_URL=${1}"
+  echo >&2 "LIBRARY_MD5=${2}"
+}
 
 # Compute the MD5 sum.
 #
diff --git a/tensorflow/lite/micro/tools/make/corstone_300_download.sh b/tensorflow/lite/micro/tools/make/corstone_300_download.sh
index aa0a762..c485d3d 100755
--- a/tensorflow/lite/micro/tools/make/corstone_300_download.sh
+++ b/tensorflow/lite/micro/tools/make/corstone_300_download.sh
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
+# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,6 +17,7 @@
 # Called with following arguments:
 # 1 - Path to the downloads folder which is typically
 #     tensorflow/lite/micro/tools/make/downloads
+# 2 - (optional) TENSORFLOW_ROOT: path to root of the TFLM tree (relative to directory from where the script is called).
 #
 # This script is called from the Makefile and uses the following convention to
 # enable determination of sucess/failure:
@@ -31,11 +32,8 @@
 
 set -e
 
-SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-ROOT_DIR=${SCRIPT_DIR}/../../../../..
-cd "${ROOT_DIR}"
-
-source tensorflow/lite/micro/tools/make/bash_helpers.sh
+TENSORFLOW_ROOT=${2}
+source ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/bash_helpers.sh
 
 DOWNLOADS_DIR=${1}
 if [ ! -d ${DOWNLOADS_DIR} ]; then
@@ -48,10 +46,19 @@
 if [ -d ${DOWNLOADED_CORSTONE_PATH} ]; then
   echo >&2 "${DOWNLOADED_CORSTONE_PATH} already exists, skipping the download."
 else
-  UNAME_S=`uname -s`
+  UNAME_S=$(uname -s)
+  UNAME_M=$(uname -m)
   if [ ${UNAME_S} == Linux ]; then
-    CORSTONE_URL=https://developer.arm.com/-/media/Arm%20Developer%20Community/Downloads/OSS/FVP/Corstone-300/FVP_Corstone_SSE-300_11.16_26.tgz
-    EXPECTED_MD5=29d9208127b24a0d83356efb8343162d
+    if [ ${UNAME_M} == x86_64 ]; then
+      CORSTONE_URL=https://developer.arm.com/-/media/Arm%20Developer%20Community/Downloads/OSS/FVP/Corstone-300/FVP_Corstone_SSE-300_11.24_13_Linux64.tgz
+      EXPECTED_MD5=42500e49a4b9e8e0f633d1bad9b7c052
+    elif [ ${UNAME_M} == aarch64 ]; then
+      CORSTONE_URL=https://developer.arm.com/-/media/Arm%20Developer%20Community/Downloads/OSS/FVP/Corstone-300/FVP_Corstone_SSE-300_11.24_13_Linux64_armv8l.tgz
+      EXPECTED_MD5=89904e875c863235635e1570c4f6459e
+    else
+          echo "Cpu type ${UNAME_M} with OS type ${UNAME_S} not supported."
+          exit 1
+    fi
   else
     echo "OS type ${UNAME_S} not supported."
     exit 1
diff --git a/tensorflow/lite/micro/tools/make/download_and_extract.sh b/tensorflow/lite/micro/tools/make/download_and_extract.sh
index 974dca1..dcd60da 100755
--- a/tensorflow/lite/micro/tools/make/download_and_extract.sh
+++ b/tensorflow/lite/micro/tools/make/download_and_extract.sh
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -123,9 +123,9 @@
     sleep 2
   done
 
-  # Check that the file was downloaded correctly using a checksum.
+  # Check that the file was downloaded correctly using a checksum. Put expected_md5 as "SKIP_MD5_CHECK" to skip this check.
   DOWNLOADED_MD5=$(openssl dgst -md5 ${tempfile} | sed 's/.* //g')
-  if [ ${expected_md5} != ${DOWNLOADED_MD5} ]; then
+  if [ ${expected_md5} != ${DOWNLOADED_MD5} ] && [ ${expected_md5} != "SKIP_MD5_CHECK" ]; then
     echo "Checksum error for '${url}'. Expected ${expected_md5} but found ${DOWNLOADED_MD5}"
     exit 1
   fi
@@ -134,28 +134,30 @@
   url=$(echo "${url}" | sed "s/\?.*//")
 
   if [[ "${url}" == *gz ]]; then
-    tar -C "${dir}" --strip-components=1 -xzf ${tempfile}
+    tar -C "${tempdir2}" -xzf ${tempfile}
   elif [[ "${url}" == *tar.xz ]]; then
-    tar -C "${dir}" --strip-components=1 -xf ${tempfile}
+    tar -C "${tempdir2}" -xf ${tempfile}
   elif [[ "${url}" == *bz2 ]]; then
     curl -Ls "${url}" > ${tempdir}/tarred.bz2
-    tar -C "${dir}" --strip-components=1 -xjf ${tempfile}
+    tar -C "${tempdir2}" -xjf ${tempfile}
   elif [[ "${url}" == *zip ]]; then
     unzip ${tempfile} -d ${tempdir2} 2>&1 1>/dev/null
-    # If the zip file contains nested directories, extract the files from the
-    # inner directory.
-    if [ $(find $tempdir2/* -maxdepth 0 | wc -l) = 1 ] && [ -d $tempdir2/* ]; then
-      # unzip has no strip components, so unzip to a temp dir, and move the
-      # files we want from the tempdir to destination.
-      cp -R ${tempdir2}/*/* ${dir}/
-    else
-      cp -R ${tempdir2}/* ${dir}/
-    fi
   else
     echo "Error unsupported archive type. Failed to extract tool after download."
     exit 1
   fi
-  rm -rf ${tempdir2} ${tempdir}
+
+  # If the zip file contains nested directories, extract the files from the
+  # inner directory.
+  if [ $(find $tempdir2/* -maxdepth 0 | wc -l) = 1 ] && [ -d $tempdir2/* ]; then
+    # Unzip to a temp dir, and move the files we want from the tempdir to destination.
+    # We want this to be dependent on the folder structure of the zipped file, so --strip-components cannot be used.
+    cp -R ${tempdir2}/*/* ${dir}/
+  else
+    cp -R ${tempdir2}/* ${dir}/
+  fi
+
+  rm -rf ${tempdir} ${tempdir2}
 
   # Delete any potential BUILD files, which would interfere with Bazel builds.
   find "${dir}" -type f -name '*BUILD' -delete
diff --git a/tensorflow/lite/micro/tools/make/ethos_u_core_driver_download.sh b/tensorflow/lite/micro/tools/make/ethos_u_core_driver_download.sh
new file mode 100755
index 0000000..e6c96c1
--- /dev/null
+++ b/tensorflow/lite/micro/tools/make/ethos_u_core_driver_download.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+#
+# Called with following arguments:
+# 1 - Path to the downloads folder which is typically
+#     tensorflow/lite/micro/tools/make/downloads
+#
+# This script is called from the Makefile and uses the following convention to
+# enable determination of sucess/failure:
+#
+#   - If the script is successful, the only output on stdout should be SUCCESS.
+#     The makefile checks for this particular string.
+#
+#   - Any string on stdout that is not SUCCESS will be shown in the makefile as
+#     the cause for the script to have failed.
+#
+#   - Any other informational prints should be on stderr.
+
+set -e
+
+TENSORFLOW_ROOT=${2}
+source ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/bash_helpers.sh
+
+DOWNLOADS_DIR=${1}
+if [ ! -d ${DOWNLOADS_DIR} ]; then
+  echo "The top-level downloads directory: ${DOWNLOADS_DIR} does not exist."
+  exit 1
+fi
+
+DOWNLOADED_ETHOS_U_CORE_DRIVER_PATH=${DOWNLOADS_DIR}/ethos_u_core_driver
+
+if [ -d ${DOWNLOADED_ETHOS_U_CORE_DRIVER_PATH} ]; then
+  echo >&2 "${DOWNLOADED_ETHOS_U_CORE_DRIVER_PATH} already exists, skipping the download."
+else
+  UNAME_S=`uname -s`
+  if [ ${UNAME_S} != Linux ]; then
+    echo "OS type ${UNAME_S} not supported."
+    exit 1
+  fi
+
+  git clone "https://review.mlplatform.org/ml/ethos-u/ethos-u-core-driver" \
+      ${DOWNLOADED_ETHOS_U_CORE_DRIVER_PATH} >&2
+  pushd ${DOWNLOADED_ETHOS_U_CORE_DRIVER_PATH} > /dev/null
+  git -c advice.detachedHead=false checkout 9622608a5cc318c0933bcce720b59737d03bfb6f
+  rm -rf .git
+  create_git_repo ./
+  popd > /dev/null
+
+fi
+
+echo "SUCCESS"
diff --git a/tensorflow/lite/micro/tools/make/increase-stack-size-and-switch-DTCM-SRAM.patch b/tensorflow/lite/micro/tools/make/ethos_u_core_platform.patch
similarity index 72%
rename from tensorflow/lite/micro/tools/make/increase-stack-size-and-switch-DTCM-SRAM.patch
rename to tensorflow/lite/micro/tools/make/ethos_u_core_platform.patch
index 57c50c1..957c9c2 100644
--- a/tensorflow/lite/micro/tools/make/increase-stack-size-and-switch-DTCM-SRAM.patch
+++ b/tensorflow/lite/micro/tools/make/ethos_u_core_platform.patch
@@ -1,16 +1,16 @@
-From 470dee13bffc0adb9a778d56fab3028031f71e80 Mon Sep 17 00:00:00 2001
+From 70e504abb13fe56244250ac7ac58b1b5232481c7 Mon Sep 17 00:00:00 2001
 From: TFLM <tflm@google.com>
-Date: Fri, 28 Oct 2022 11:01:15 +0200
+Date: Mon, 28 Aug 2023 16:07:22 +0000
 Subject: [PATCH] TFLM patch
 
 ---
- targets/corstone-300/platform.ld      |  8 +++-----
- targets/corstone-300/platform.scatter |  5 +++--
+ targets/corstone-300/platform.ld      | 10 ++++------
+ targets/corstone-300/platform.scatter |  7 ++++---
  targets/corstone-300/retarget.c       | 16 ++++++++--------
- 3 files changed, 14 insertions(+), 15 deletions(-)
+ 3 files changed, 16 insertions(+), 17 deletions(-)
 
 diff --git a/targets/corstone-300/platform.ld b/targets/corstone-300/platform.ld
-index ec58acc..21316a4 100644
+index ec58acc..51c93ca 100644
 --- a/targets/corstone-300/platform.ld
 +++ b/targets/corstone-300/platform.ld
 @@ -75,7 +75,7 @@
@@ -22,6 +22,15 @@
  __HEAP_SIZE  = 0x00008000;
  
  MEMORY
+@@ -83,7 +83,7 @@ MEMORY
+   ITCM  (rx)  : ORIGIN = 0x10000000, LENGTH = 0x00080000
+   BRAM  (rw)  : ORIGIN = 0x11000000, LENGTH = 0x00400000
+   DTCM  (rw)  : ORIGIN = 0x30000000, LENGTH = 0x00080000
+-  SRAM  (rw)  : ORIGIN = 0x31000000, LENGTH = 0x00200000
++  SRAM  (rw)  : ORIGIN = 0x31000000, LENGTH = 0x02000000
+   DDR   (rwx) : ORIGIN = 0x70000000, LENGTH = 0x60000000
+ }
+ 
 @@ -150,9 +150,6 @@ SECTIONS
      *(EXCLUDE_FILE(*crtend?.o *crtend.o) .dtors)
      *(SORT(.dtors.*))
@@ -50,7 +59,7 @@
    .heap (COPY) :
    {
 diff --git a/targets/corstone-300/platform.scatter b/targets/corstone-300/platform.scatter
-index fab12d1..be5c227 100644
+index fab12d1..2180aca 100644
 --- a/targets/corstone-300/platform.scatter
 +++ b/targets/corstone-300/platform.scatter
 @@ -1,3 +1,4 @@
@@ -67,6 +76,15 @@
  #endif
  
  #ifndef HEAP_SIZE
+@@ -108,7 +109,7 @@
+ #define DTCM_SIZE  0x00080000
+ 
+ #define SRAM_START 0x31000000
+-#define SRAM_SIZE  0x00200000
++#define SRAM_SIZE  0x02000000
+ 
+ #define DDR_START  0x70000000
+ #define DDR_SIZE   0x02000000
 @@ -136,7 +137,6 @@ APP_IMAGE LR_START LR_SIZE
          ; Make sure reset_handler ends up in root segment, when split across
          ; ITCM and DTCM
@@ -115,5 +133,5 @@
 +}
 +#endif
 -- 
-2.17.1
+2.42.0.rc2.253.gd59a3bf2b4-goog
 
diff --git a/tensorflow/lite/micro/tools/make/ethos_u_core_platform_download.sh b/tensorflow/lite/micro/tools/make/ethos_u_core_platform_download.sh
index 76223db..3a1cd33 100755
--- a/tensorflow/lite/micro/tools/make/ethos_u_core_platform_download.sh
+++ b/tensorflow/lite/micro/tools/make/ethos_u_core_platform_download.sh
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
+# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -31,11 +31,8 @@
 
 set -e
 
-SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-ROOT_DIR=${SCRIPT_DIR}/../../../../..
-cd "${ROOT_DIR}"
-
-source tensorflow/lite/micro/tools/make/bash_helpers.sh
+TENSORFLOW_ROOT=${2}
+source ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/bash_helpers.sh
 
 DOWNLOADS_DIR=${1}
 if [ ! -d ${DOWNLOADS_DIR} ]; then
@@ -54,24 +51,22 @@
     exit 1
   fi
 
-  git clone https://git.mlplatform.org/ml/ethos-u/ethos-u-core-platform.git ${DOWNLOADED_ETHOS_U_CORE_PLATFORM_PATH} >&2
-  cd ${DOWNLOADED_ETHOS_U_CORE_PLATFORM_PATH}
+  git clone "https://review.mlplatform.org/ml/ethos-u/ethos-u-core-platform" ${DOWNLOADED_ETHOS_U_CORE_PLATFORM_PATH} >&2
+  pushd ${DOWNLOADED_ETHOS_U_CORE_PLATFORM_PATH} > /dev/null
   git checkout e25a89dec1cf990f3168dbd6c565e3b0d51cb151 >&2
   rm -rf .git
   create_git_repo ./
-
-  apply_patch_to_folder ./ ../../increase-stack-size-and-switch-DTCM-SRAM.patch "TFLM patch"
-
-  cd "${ROOT_DIR}"
+  apply_patch_to_folder ./ ../../ethos_u_core_platform.patch "TFLM patch"
+  popd > /dev/null
 
   LINKER_PATH=${DOWNLOADED_ETHOS_U_CORE_PLATFORM_PATH}/targets/corstone-300
 
   # Run C preprocessor on linker file to get rid of ifdefs and make sure compiler is downloaded first.
   COMPILER=${DOWNLOADS_DIR}/gcc_embedded/bin/arm-none-eabi-gcc
   if [ ! -f ${COMPILER} ]; then
-    RETURN_VALUE=`./tensorflow/lite/micro/tools/make/arm_gcc_download.sh ${DOWNLOADS_DIR}`
+    RETURN_VALUE=`${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/arm_gcc_download.sh ${DOWNLOADS_DIR} ${TENSORFLOW_ROOT}`
     if [ "SUCCESS" != "${RETURN_VALUE}" ]; then
-      echo "The script ./tensorflow/lite/micro/tools/make/arm_gcc_download.sh failed."
+      echo "The script ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/arm_gcc_download.sh failed."
       exit 1
     fi
   fi
diff --git a/tensorflow/lite/micro/tools/make/ext_libs/cmsis_nn.inc b/tensorflow/lite/micro/tools/make/ext_libs/cmsis_nn.inc
index e9ae5fc..a78aa49 100644
--- a/tensorflow/lite/micro/tools/make/ext_libs/cmsis_nn.inc
+++ b/tensorflow/lite/micro/tools/make/ext_libs/cmsis_nn.inc
@@ -43,7 +43,7 @@
 # the various intrinisics.
 THIRD_PARTY_CC_HDRS += \
   $(CMSIS_PATH)/LICENSE.txt \
-  $(CMSIS_NN_PATH)/LICENSE.txt \
+  $(CMSIS_NN_PATH)/LICENSE \
   $(wildcard $(CMSIS_PATH)/CMSIS/Core/Include/*.h)
 
 # We add -I$(CMSIS_PATH) to enable the code in the TFLM repo (mostly in the
diff --git a/tensorflow/lite/micro/tools/make/ext_libs/cmsis_nn_download.sh b/tensorflow/lite/micro/tools/make/ext_libs/cmsis_nn_download.sh
index bc8e87b..fb0ad92 100755
--- a/tensorflow/lite/micro/tools/make/ext_libs/cmsis_nn_download.sh
+++ b/tensorflow/lite/micro/tools/make/ext_libs/cmsis_nn_download.sh
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -36,21 +36,22 @@
 source ${TENSORFLOW_ROOT}tensorflow/lite/micro/tools/make/bash_helpers.sh
 
 DOWNLOADS_DIR=${1}
-if [ ! -d ${DOWNLOADS_DIR} ]; then
-  echo "The top-level downloads directory: ${DOWNLOADS_DIR} does not exist."
-  exit 1
-fi
-
 DOWNLOADED_CMSIS_NN_PATH=${DOWNLOADS_DIR}/cmsis_nn
 
-if [ -d ${DOWNLOADED_CMSIS_NN_PATH} ]; then
+ZIP_PREFIX_NN="01dee38e6d6bfbbf202f0cd425bbea1731747d51"
+CMSIS_NN_URL="http://github.com/ARM-software/CMSIS-NN/archive/${ZIP_PREFIX_NN}.zip"
+CMSIS_NN_MD5="f20be93ededf42bb704c19f699a24313"
+
+should_download=$(check_should_download ${DOWNLOADS_DIR})
+
+if [[ ${should_download} == "no" ]]; then
+  show_download_url_md5 ${CMSIS_NN_URL} ${CMSIS_NN_MD5}
+elif [ ! -d ${DOWNLOADS_DIR} ]; then
+  echo "The top-level downloads directory: ${DOWNLOADS_DIR} does not exist."
+  exit 1
+elif [ -d ${DOWNLOADED_CMSIS_NN_PATH} ]; then
   echo >&2 "${DOWNLOADED_CMSIS_NN_PATH} already exists, skipping the download."
 else
-
-  ZIP_PREFIX_NN="dc64e488f6655aa2792d2aceca316c896f78b4db"
-  CMSIS_NN_URL="http://github.com/ARM-software/CMSIS-NN/archive/${ZIP_PREFIX_NN}.zip"
-  CMSIS_NN_MD5="80f9cf0bcc10a4aefb6531ae53942044"
-
   # wget is much faster than git clone of the entire repo. So we wget a specific
   # version and can then apply a patch, as needed.
   wget ${CMSIS_NN_URL} -O /tmp/${ZIP_PREFIX_NN}.zip >&2
diff --git a/tensorflow/lite/micro/tools/make/ext_libs/ethos_u.inc b/tensorflow/lite/micro/tools/make/ext_libs/ethos_u.inc
index c61aaff..73447cd 100644
--- a/tensorflow/lite/micro/tools/make/ext_libs/ethos_u.inc
+++ b/tensorflow/lite/micro/tools/make/ext_libs/ethos_u.inc
@@ -30,12 +30,22 @@
 ETHOSU_DEFAULT_DOWNLOAD_DRIVER_PATH := $(MAKEFILE_DIR)/downloads/ethos_u_core_driver
 ETHOSU_DRIVER_PATH := $(ETHOSU_DEFAULT_DOWNLOAD_DRIVER_PATH)
 ifeq ($(ETHOSU_DRIVER_PATH), $(ETHOSU_DEFAULT_DOWNLOAD_DRIVER_PATH))
-  $(call $(or $(shell $(DOWNLOAD_SCRIPT) $(ETHOSU_URL) $(ETHOSU_MD5) $(ETHOSU_DRIVER_PATH) >&2 && echo SUCCESS), $(error $(DOWNLOAD_SCRIPT) failed)))
+DOWNLOAD_RESULT := $(shell $(MAKEFILE_DIR)/ethos_u_core_driver_download.sh $(DOWNLOADS_DIR) $(TENSORFLOW_ROOT))
+  ifneq ($(DOWNLOAD_RESULT), SUCCESS)
+    $(error $(DOWNLOAD_SCRIPT) failed)
+  endif
 endif
 
 THIRD_PARTY_CC_HDRS += $(shell find $(ETHOSU_DRIVER_PATH)/include -name "*.h")
 ifeq (,$(ETHOSU_DRIVER_LIBS))
-    THIRD_PARTY_CC_SRCS += $(shell find $(ETHOSU_DRIVER_PATH)/src -name "*.c")
+    THIRD_PARTY_CC_SRCS += $(ETHOSU_DRIVER_PATH)/src/ethosu_pmu.c \
+                           $(ETHOSU_DRIVER_PATH)/src/ethosu_driver.c
+    # Add the Ethos-U device-specific driver source.
+    ifeq ($(ETHOSU_ARCH), u85)
+      THIRD_PARTY_CC_SRCS += $(ETHOSU_DRIVER_PATH)/src/ethosu_device_u85.c
+    else
+      THIRD_PARTY_CC_SRCS += $(ETHOSU_DRIVER_PATH)/src/ethosu_device_u55_u65.c
+    endif
 else
     MICROLITE_LIBS += $(ETHOSU_DRIVER_LIBS)
 endif
@@ -67,8 +77,8 @@
 else
   $(error "ETHOSU_ARCH=$(ETHOSU_ARCH) is not supported")
 endif
-CCFLAGS += ${ETHOSU_FLAGS}
-CXXFLAGS += ${ETHOSU_FLAGS}
+CCFLAGS += ${ETHOSU_FLAGS} ${ETHOSU_EXTRA_FLAGS}
+CXXFLAGS += ${ETHOSU_FLAGS} ${ETHOSU_EXTRA_FLAGS}
 
 # Convert downloaded person detect int8 model.
 $(GENERATED_SRCS_DIR)tensorflow/lite/micro/models/person_detect_model_data_vela.cc:
diff --git a/tensorflow/lite/micro/tools/make/ext_libs/ndsplib-hifi3.patch b/tensorflow/lite/micro/tools/make/ext_libs/ndsplib-hifi3.patch
new file mode 100644
index 0000000..f216ce2
--- /dev/null
+++ b/tensorflow/lite/micro/tools/make/ext_libs/ndsplib-hifi3.patch
@@ -0,0 +1,140 @@
+diff --git a/library/include/NatureDSP_Signal_math.h b/library/include/NatureDSP_Signal_math.h
+index 2ffea63..adf08ab 100644
+--- a/library/include/NatureDSP_Signal_math.h
++++ b/library/include/NatureDSP_Signal_math.h
+@@ -38,7 +38,7 @@ extern "C" {
+   vec_log              Logarithm 

+   vec_antilog          Antilogarithm         

+   vec_sqrt             Square Root

+-  vec_rsqrt	           Reciprocal Square Root

++  vec_rsqrt            Reciprocal Square Root

+   vec_sine,vec_cosine  Sine/Cosine

+   vec_tan              Tangent 

+   vec_atan             Arctangent 

+@@ -384,9 +384,9 @@ int32_t scl_sqrt64x32(int64_t x);
+   part left by exponent value.

+ 

+   Mantissa accuracy is 1 LSB, so relative accuracy is:

+-  vec_rsqrt16x16, scl_rsqrt16x16	6.2e-5

+-  scl_rsqrt32x32	                2.4e-7

+-  vec_rsqrt32x32	                9.2e-10

++  vec_rsqrt16x16, scl_rsqrt16x16  6.2e-5

++  scl_rsqrt32x32                  2.4e-7

++  vec_rsqrt32x32                  9.2e-10

+ 

+   Precision: 

+   16x16  16-bit inputs, 16-bit output. Accuracy: 2LSB

+@@ -406,11 +406,11 @@ int32_t scl_sqrt64x32(int64_t x);
+   ----------------

+   Returned packed value: 

+   scl_rsqrt32x32():

+-  bits 23Â…0 fractional part

+-  bits 31Â…24 exponent

++  bits 23�0 fractional part

++  bits 31�24 exponent

+   scl_rsqrt16x16():

+-  bits 15Â…0 fractional part

+-  bits 31Â…16 exponent

++  bits 15�0 fractional part

++  bits 31�16 exponent

+ 

+ -------------------------------------------------------------------------*/

+ void vec_rsqrt32x32 ( int32_t * frac, int16_t * exp, const int32_t * x, int N);

+@@ -635,6 +635,7 @@ float32_t scl_atan2f (float32_t y, float32_t x);
+   return result, Q16.15 

+ -------------------------------------------------------------------------*/

+ void vec_tanh32x32(int32_t * y, const int32_t * x, int N);

++void vec_tanhf    (float32_t * y, const float32_t * x,int N);

+ int32_t scl_tanh32x32(int32_t x);

+ 

+ /*-------------------------------------------------------------------------

+@@ -659,7 +660,12 @@ int32_t scl_tanh32x32(int32_t x);
+   return result, Q16.15

+ -------------------------------------------------------------------------*/

+ void vec_sigmoid32x32(int32_t * y, const int32_t * x, int N);

++void vec_sigmoidf    (float32_t * y, const float32_t * x, int N);

+ int32_t scl_sigmoid32x32(int32_t x);

++float32_t scl_sigmoidf(float32_t x);

++

++void vec_relu32x32 (int32_t   * y, const int32_t   * x, int32_t   K, int N);

++void vec_reluf     (float32_t * y, const float32_t * x, float32_t K, int N);

+ 

+ /*-------------------------------------------------------------------------

+   Softmax

+@@ -685,6 +691,7 @@ int32_t scl_sigmoid32x32(int32_t x);
+ 

+ -------------------------------------------------------------------------*/

+ void vec_softmax32x32(int32_t * y, const int32_t * x, int N);

++void vec_softmaxf    (float32_t * y, const float32_t * x,int N);

+ 

+ /*-------------------------------------------------------------------------

+   Integer to float conversion

+diff --git a/library/include/NatureDSP_types.h b/library/include/NatureDSP_types.h
+index a38b334..6322852 100644
+--- a/library/include/NatureDSP_types.h
++++ b/library/include/NatureDSP_types.h
+@@ -332,7 +332,9 @@ typedef struct tagComplex32_t
+   #define NASSERT(x) 

+   #define restrict 

+ #elif defined (COMPILER_XTENSA)

++#if !defined restrict

+   #define restrict __restrict

++#endif

+   #define onchip

+   #define NASSERT(x) {(void)__builtin_expect((x)!=0,1);}

+ #else

+diff --git a/library/include_private/common.h b/library/include_private/common.h
+index 20206e4..74d4d42 100644
+--- a/library/include_private/common.h
++++ b/library/include_private/common.h
+@@ -157,6 +157,12 @@ __pragma (warning(pop))
+ __asm__(".type "#name", @object\n\t.global "#name"\n\t.align 4\n\t"#name":\n\t.long 0x49438B96,0x4D73F192\n\t");

+ #endif

+ 

++#if defined(COMPILER_XTENSA)

++#define DISCARD_FUN_FOR_NONVOID_RETURN(retval_type,name,arglist) \

++__attribute__ ((section ("/DISCARD/"))) \

++retval_type name arglist \

++{ return (retval_type) 0; }

++#endif

+ 

+ #ifdef __cplusplus

+ #define externC extern "C" 

+diff --git a/library/include_private/fft_real_twiddles.h b/library/include_private/fft_real_twiddles.h
+old mode 100644
+new mode 100755
+index 10cd4b2..6f5c80b
+--- a/library/include_private/fft_real_twiddles.h
++++ b/library/include_private/fft_real_twiddles.h
+@@ -44,8 +44,12 @@
+      7               128

+ */

+ 

++#if !defined MAX_RFFT_PWR

+ #define MAX_RFFT_PWR 13

++#endif

++#if !defined MAX_RFFT_LEN

+ #define MAX_RFFT_LEN (1<<MAX_RFFT_PWR)

++#endif

+ 

+ extern const int16_t twiddleSplit[MAX_RFFT_LEN/2];

+ 

+diff --git a/library/include_private/fft_real_twiddles_24x24.h b/library/include_private/fft_real_twiddles_24x24.h
+old mode 100644
+new mode 100755
+index 3017c61..257396e
+--- a/library/include_private/fft_real_twiddles_24x24.h
++++ b/library/include_private/fft_real_twiddles_24x24.h
+@@ -44,8 +44,12 @@
+      7               128

+ */

+ 

++#if !defined MAX_RFFT_PWR

+ #define MAX_RFFT_PWR 13

++#endif

++#if !defined MAX_RFFT_LEN

+ #define MAX_RFFT_LEN (1<<MAX_RFFT_PWR)

++#endif

+ 

+ extern const int32_t twiddleSplit24x24[MAX_RFFT_LEN/2];

+ 

diff --git a/tensorflow/lite/micro/tools/make/ext_libs/ndsplib-hifi4.patch b/tensorflow/lite/micro/tools/make/ext_libs/ndsplib-hifi4.patch
new file mode 100644
index 0000000..38a1b3d
--- /dev/null
+++ b/tensorflow/lite/micro/tools/make/ext_libs/ndsplib-hifi4.patch
@@ -0,0 +1,42 @@
+diff --git a/library/fft/fft/fft_real24x24_hifi4.c b/library/fft/fft/fft_real24x24_hifi4.c
+index 82f48e2..bdce59e 100644
+--- a/library/fft/fft/fft_real24x24_hifi4.c
++++ b/library/fft/fft/fft_real24x24_hifi4.c
+@@ -78,7 +78,7 @@
+   x,y           should not overlap
+   x,y           aligned on a 8-bytes boundary
+ -------------------------------------------------------------------------*/
+-DISCARD_FUN( int,
++DISCARD_FUN_FOR_NONVOID_RETURN( int,
+              fft_real24x24,
+              (f24* y,
+               int32_t* x,
+diff --git a/library/include/NatureDSP_types.h b/library/include/NatureDSP_types.h
+index 0069361..c0f411f 100644
+--- a/library/include/NatureDSP_types.h
++++ b/library/include/NatureDSP_types.h
+@@ -332,7 +332,9 @@ typedef struct tagComplex32_t
+   #define NASSERT(x) 
+   #define restrict 
+ #elif defined (COMPILER_XTENSA)
++#if !defined restrict
+   #define restrict __restrict
++#endif
+   #define onchip
+   #define NASSERT(x) {(void)__builtin_expect((x)!=0,1);}
+ #else
+diff --git a/library/include_private/common.h b/library/include_private/common.h
+index d647af4..25d0ca8 100644
+--- a/library/include_private/common.h
++++ b/library/include_private/common.h
+@@ -187,6 +187,10 @@ F_UNDERSCORE STRINGIZE(name) ":\n"          \
+ __attribute__ ((section ("/DISCARD/"))) \
+ retval_type name arglist \
+ {  }
++#define DISCARD_FUN_FOR_NONVOID_RETURN(retval_type,name,arglist) \
++__attribute__ ((section ("/DISCARD/"))) \
++retval_type name arglist \
++{ return (retval_type) 0; }
+ #endif
+ 
+ #ifdef __cplusplus
diff --git a/tensorflow/lite/micro/tools/make/ext_libs/ndsplib-hifi5.patch b/tensorflow/lite/micro/tools/make/ext_libs/ndsplib-hifi5.patch
new file mode 100644
index 0000000..118845c
--- /dev/null
+++ b/tensorflow/lite/micro/tools/make/ext_libs/ndsplib-hifi5.patch
@@ -0,0 +1,31 @@
+diff --git a/library/include/NatureDSP_types.h b/library/include/NatureDSP_types.h
+index 82e16c7..ce7fb75 100644
+--- a/library/include/NatureDSP_types.h
++++ b/library/include/NatureDSP_types.h
+@@ -332,7 +332,9 @@ typedef struct tagComplex32_t
+   #define NASSERT(x) 
+   #define restrict 
+ #elif defined (COMPILER_XTENSA)
++#if !defined restrict
+   #define restrict __restrict
++#endif
+   #define onchip
+   #define NASSERT(x) {(void)__builtin_expect((x)!=0,1);}
+ #else
+diff --git a/library/include_private/common.h b/library/include_private/common.h
+index 2eaf70f..9df811c 100644
+--- a/library/include_private/common.h
++++ b/library/include_private/common.h
+@@ -172,6 +172,12 @@ __pragma (warning(pop))
+ #if defined(COMPILER_XTENSA) || defined(COMPILER_GNU)
+ #define DISCARD_FUN(retval_type,name,arglist) \
+ __asm__(".type "#name", @object\n\t.global "#name"\n\t.align 4\n\t"#name":\n\t.long 0x49438B96,0x4D73F192\n\t");
++
++#define DISCARD_FUN_FOR_NONVOID_RETURN(retval_type,name,arglist) \
++__attribute__ ((section ("/DISCARD/"))) \
++retval_type name arglist \
++{ return (retval_type) 0; }
++
+ #endif
+ 
+ /*------ LIST OF DEFINES DEPENDING ON ISA OPTIONS ------*/
diff --git a/tensorflow/lite/micro/tools/make/ext_libs/person_detection_int8_vela_convert.sh b/tensorflow/lite/micro/tools/make/ext_libs/person_detection_int8_vela_convert.sh
index da51d75..b3fe24e 100755
--- a/tensorflow/lite/micro/tools/make/ext_libs/person_detection_int8_vela_convert.sh
+++ b/tensorflow/lite/micro/tools/make/ext_libs/person_detection_int8_vela_convert.sh
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
+# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -47,18 +47,30 @@
 # See tensorflow/lite/micro/kernels/ethos_u/README.md for more info.
 MODEL_DIR=${GENERATED_SRCS_DIR}tensorflow/lite/micro/models
 CONVERTED_PERSON_MODEL_INT8=${MODEL_DIR}/person_detect_model_data_vela.cc
+PERSON_MODEL_HEADER=${MODEL_DIR}/person_detect_model_data.h
 
 if [ ! -f ${CONVERTED_PERSON_MODEL_INT8} ]; then
+
+  # Install ethos-u-vela if not already installed.
+  set +e
+  pip show ethos-u-vela >&2
+  retVal=$?
+  set -e
+  if [ $retVal -ne 0 ]; then
+    TEMPFILE=$(mktemp -d)/
+    python3 -m venv $TEMPFILE
+    source $TEMPFILE/bin/activate
+    pip install numpy==1.26.4 >&2
+    pip install --prefer-binary ethos-u-vela >&2
+  fi
+
   # Compile an optimized .tflite version for Ethos-U.
-  TEMPFILE=$(mktemp -d)/
-  python3 -m venv $TEMPFILE
-  source $TEMPFILE/bin/activate
-  python3 -m pip install --upgrade pip >&2
-  pip install --upgrade cython >&2
-  pip install --prefer-binary ethos-u-vela >&2
   vela --accelerator-config=ethos-u55-256 ${DOWNLOADS_DIR}/../../../models/person_detect.tflite \
        --output-dir ${MODEL_DIR} >&2
-  deactivate
+
+  if [ $retVal -ne 0 ]; then
+    deactivate
+  fi
 
   # Convert .tflite back to C array.
   echo "// This file is generated by $0." > ${CONVERTED_PERSON_MODEL_INT8}
@@ -66,11 +78,12 @@
        ${CONVERTED_PERSON_MODEL_INT8}
   echo -n "const " >> ${CONVERTED_PERSON_MODEL_INT8}
   xxd -i ${MODEL_DIR}/person_detect_vela.tflite >> ${CONVERTED_PERSON_MODEL_INT8}
-  sed -i 's/gen_cortex_m_corstone_300_cortex_m55_default_genfiles_tensorflow_lite_micro_models_person_detect_vela_tflite/g_person_detect_model_data/' \
+  sed  -i 's/gen_cortex_m_corstone_300_cortex_m55_.*genfiles_tensorflow_lite_micro_models_person_detect_vela_tflite/g_person_detect_model_data/' \
       ${CONVERTED_PERSON_MODEL_INT8}
-  sed -i 's/^const unsigned char g_person_detect_model_data/alignas\(16\) &/'  ${CONVERTED_PERSON_MODEL_INT8}
-  sed -i 's/g_person_detect_model_data_len/g_person_detect_model_data_size/'  ${CONVERTED_PERSON_MODEL_INT8}
-  sed -i 's/unsigned int/const unsigned int/' ${CONVERTED_PERSON_MODEL_INT8}
+  sed -i 's/^const unsigned char g_person_detect_model_data/alignas\(16\) &/' ${CONVERTED_PERSON_MODEL_INT8}
+  SIZE=$(sed -E -n -e 's/^.*g_person_detect_model_data_len = ([0-9]+);/\1/p' ${CONVERTED_PERSON_MODEL_INT8})
+  sed -i 's/^.*g_person_detect_model_data_len.*$//' ${CONVERTED_PERSON_MODEL_INT8}
+  sed -E -i "s/(^constexpr.*g_person_detect_model_data_size = )([0-9]+);/\1$SIZE;/" ${PERSON_MODEL_HEADER}
 fi
 
 echo "SUCCESS"
diff --git a/tensorflow/lite/micro/tools/make/ext_libs/xa_nnlib_hifi4.patch b/tensorflow/lite/micro/tools/make/ext_libs/xa_nnlib_hifi4.patch
index 227ee92..1bb15aa 100644
--- a/tensorflow/lite/micro/tools/make/ext_libs/xa_nnlib_hifi4.patch
+++ b/tensorflow/lite/micro/tools/make/ext_libs/xa_nnlib_hifi4.patch
@@ -1,207 +1,5 @@
-From 0a68f2ffa640d1b52314278cec838384722eb1d0 Mon Sep 17 00:00:00 2001
-From: William Huang <yushh@google.com>
-Date: Tue, 16 May 2023 09:18:55 +0000
-Subject: [PATCH] Optimize Xtensa transpose convolution for more kernel sizes
- and input channels.
-
-Previously, there were three code paths, in decreasing performance:
-
-1. Kernel size (H*W) multiple of 4, input channels multiple of 16
-2. Kernel size (H*W) multiple of 4, input channels multiple of 4
-3. Others (unoptimized case)
-
-This patch reduces them to the follow two cases:
-
-1. Input channels multiple of 4
-2. Others (unoptimized case)
-
-Original CL=cl/516144094
-
-BUG=227374718
-
-Signed-off-by: William Huang <yushh@google.com>
-
-Optimize Xtensa CONV2D circular buffer copy.
-
-In Xtensa's CONV2D kernel, data is shuffled around and padded so the 2D
-convolution turns into sequential vector products. Unfortunately, this
-process is somewhat slow, and the overhead is especially high for small
-vector lengths.
-
-This patch introduces the following:
-
-- Faster code path for no padding (since our models use VALID padding,
-  i.e., no padding at all)
-- Manual loop if array is small and memcpy if array is large
-- Skip memset on padded channels as the corresponding kernels are
-  already zero
-
-BUG=249796929
-
-Signed-off-by: William Huang <yushh@google.com>
-
-Add implementation for zero-copy CONV2D kernels.
-
-The previous `xa_nn_conv2d_std_sym8sxsym16s` implementation shuffles the
-input tensor into a circular buffer, flattening the dimensions, so that
-the 2D convolution turns into sequential vector products. However, this
-created significant overhead for layers where the resulting vector
-lengths are small.
-
-This patch implements an alternative zero-copy method that takes
-advantage of two facts:
-
-1. If `x_padding == 0`, the width dimension is automatically flattened
-   with the channel dimension, and we need only `kernel_height`
-   sequential vector products, even without the data shuffling
-2. Similar to the loop tiling done in
-   `xa_nn_matXvec_sym8sxsym16s_sym16s_circ`, we can tile the `out_width`
-   and `out_channels` dimensions, achieving the throughput of
-   `_xa_nn_dot_product_2row_4vec_mat_vecs_4bytes_aligned` (i.e., 1.6
-   MULAAAAQs/cycle), even when `out_height < 2`
-
-As a result, the patch significantly benefits layers where the kernel
-and output heights are small, leading to 25%+ cycle reductions in some
-use cases.
-
-Signed-off-by: William Huang <yushh@google.com>
----
- .../cnn/hifi4/xa_nn_conv2d_std_circ_buf.c     |  84 +++++++-
- .../cnn/hifi4/xa_nn_conv2d_std_state.h        |  15 ++
- .../cnn/hifi4/xa_nn_conv2d_std_sym8sxsym16s.c | 203 +++++++++++++++---
- .../hifi4/xa_nn_transpose_conv_sym8sxsym16s.c |  36 +---
- 4 files changed, 275 insertions(+), 63 deletions(-)
-
-diff --git a/algo/kernels/cnn/hifi4/xa_nn_conv2d_std_circ_buf.c b/algo/kernels/cnn/hifi4/xa_nn_conv2d_std_circ_buf.c
-index f8adba2..1a5f186 100644
---- a/algo/kernels/cnn/hifi4/xa_nn_conv2d_std_circ_buf.c
-+++ b/algo/kernels/cnn/hifi4/xa_nn_conv2d_std_circ_buf.c
-@@ -642,7 +642,8 @@ VOID conv2d_std_init_cir_buf(
- }
- 
- // Add x_stride (but not more than kernel_width) x (input_height x input_channels) new planes to circular buffer
--VOID conv2d_std_update_cir_buf(
-+// Slow version of conv2d_std_update_cir_buf with fewer requirements
-+VOID conv2d_std_update_cir_buf_slow(
-     WORD32 input_channels,
-     WORD32 input_channels_pad,
-     WORD32 input_bytewidth,
-@@ -742,6 +743,87 @@ VOID conv2d_std_update_cir_buf(
-   *pp_inp = (VOID *)p_inp;
- }
- 
-+// Add x_stride (but not more than kernel_width) x (input_height x input_channels) new planes to circular buffer
-+VOID conv2d_std_update_cir_buf(
-+    WORD32 input_channels,
-+    WORD32 input_channels_pad,
-+    WORD32 input_bytewidth,
-+    WORD32 input_width,
-+    WORD32 input_height,
-+    WORD32 y_padding,
-+    WORD32 y_b_pad,
-+    WORD32 x_padding,
-+    WORD32 kernel_width,
-+    WORD32 x_stride,
-+    VOID **pp_inp,
-+    WORD32 idx_beg_inp_width_pad,
-+    xa_nn_conv_state_t *p_state)
-+{
-+  if (y_padding != 0 || y_b_pad != 0 || x_padding != 0) {
-+    conv2d_std_update_cir_buf_slow(
-+      input_channels,
-+      input_channels_pad,
-+      input_bytewidth,
-+      input_width,
-+      input_height,
-+      y_padding,
-+      y_b_pad,
-+      x_padding,
-+      kernel_width,
-+      x_stride,
-+      pp_inp,
-+      idx_beg_inp_width_pad,
-+      p_state
-+    );
-+    return;
-+  }
-+
-+  WORD32 i,k;
-+  WORD8 *p_inp = (WORD8 *)*pp_inp;
-+  WORD32 planes_to_add = x_stride > kernel_width ? kernel_width : x_stride;
-+  WORD32 planes_to_keep = kernel_width - planes_to_add;
-+
-+  // Copy 'planes_to_add' planes of data to circular buffer
-+  AE_ADDCIRC16X4_XC((ae_int16x4 *)p_state->cir_buf.p_curr, planes_to_add * input_channels_pad * input_bytewidth);
-+  WORD8 *p_dst = (WORD8 *)p_state->cir_buf.p_curr;
-+  AE_ADDCIRC16X4_XC((ae_int16x4 *)p_dst, planes_to_keep * input_channels_pad * input_bytewidth);
-+
-+  WORD32 copy_inp_width = planes_to_add;
-+  WORD32 to_skip_inp_width = x_stride - planes_to_add;     // Non-zero for x_stride > kernel_width
-+
-+  int size = input_channels * input_bytewidth;
-+  if (size <= 32) {
-+    for(i=0;i<input_height;i++)
-+    {
-+      for(k=0;k<copy_inp_width;k++)
-+      {
-+        for (int j = 0; j < size; ++j) {
-+          p_dst[j] = p_inp[j];
-+        }
-+        AE_ADDCIRC16X4_XC((ae_int16x4 *)p_dst, input_channels_pad * input_bytewidth);
-+        p_inp += input_channels * input_bytewidth;
-+      }
-+      AE_ADDCIRC16X4_XC((ae_int16x4 *)p_dst, planes_to_keep * input_channels_pad * input_bytewidth);
-+      p_inp += (input_width - copy_inp_width) * input_channels * input_bytewidth;
-+    }
-+  } else {
-+    for(i=0;i<input_height;i++)
-+    {
-+      for(k=0;k<copy_inp_width;k++)
-+      {
-+        memcpy(p_dst, p_inp, input_channels * input_bytewidth);
-+        AE_ADDCIRC16X4_XC((ae_int16x4 *)p_dst, input_channels_pad * input_bytewidth);
-+        p_inp += input_channels * input_bytewidth;
-+      }
-+      AE_ADDCIRC16X4_XC((ae_int16x4 *)p_dst, planes_to_keep * input_channels_pad * input_bytewidth);
-+      p_inp += (input_width - copy_inp_width) * input_channels * input_bytewidth;
-+    }
-+  }
-+  p_inp += (-input_height * input_width + copy_inp_width + to_skip_inp_width) * input_channels * input_bytewidth;
-+
-+  *pp_inp = (VOID *)p_inp;
-+}
-+
- VOID xa_nn_dilated_conv2d_std_load_cir_buf_asym8(
-     WORD32 input_channels,
-     WORD32 input_channels_pad,
-diff --git a/algo/kernels/cnn/hifi4/xa_nn_conv2d_std_state.h b/algo/kernels/cnn/hifi4/xa_nn_conv2d_std_state.h
-index a2ba355..8d33bad 100644
---- a/algo/kernels/cnn/hifi4/xa_nn_conv2d_std_state.h
-+++ b/algo/kernels/cnn/hifi4/xa_nn_conv2d_std_state.h
-@@ -214,6 +214,21 @@ VOID conv2d_std_init_cir_buf(
-     VOID **pp_inp,
-     xa_nn_conv_state_t *p_state);
- 
-+VOID conv2d_std_update_cir_buf_slow(
-+    WORD32 input_channels,
-+    WORD32 input_channels_pad,
-+    WORD32 input_bytewidth,
-+    WORD32 input_width,
-+    WORD32 input_height,
-+    WORD32 y_padding,
-+    WORD32 y_b_pad,
-+    WORD32 x_padding,
-+    WORD32 kernel_width,
-+    WORD32 x_stride,
-+    VOID **pp_inp,
-+    WORD32 idx_beg_inp_width_pad,
-+    xa_nn_conv_state_t *p_state);
-+
- VOID conv2d_std_update_cir_buf(
-     WORD32 input_channels,
-     WORD32 input_channels_pad,
 diff --git a/algo/kernels/cnn/hifi4/xa_nn_conv2d_std_sym8sxsym16s.c b/algo/kernels/cnn/hifi4/xa_nn_conv2d_std_sym8sxsym16s.c
-index 92721bc..6f868be 100644
+index b9905e9..990b713 100644
 --- a/algo/kernels/cnn/hifi4/xa_nn_conv2d_std_sym8sxsym16s.c
 +++ b/algo/kernels/cnn/hifi4/xa_nn_conv2d_std_sym8sxsym16s.c
 @@ -49,6 +49,24 @@ static inline ae_int32x2 MultiplyByQuantizedMultiplier_ref(ae_int64 d_x,
@@ -229,78 +27,35 @@
  static WORD32 conv_x_left_pad(
      WORD32 x_padding,
      WORD32 kernel_width,
-@@ -238,41 +256,166 @@ WORD32 xa_nn_conv2d_std_per_chan_sym8sxsym16s(
-   WORD32 y_b_pad = kernel_height + (out_height - 1) * y_stride - (y_padding + input_height);
-   y_b_pad = y_b_pad < 0 ? 0 : y_b_pad;
+@@ -129,6 +147,160 @@ static WORD32 conv_x_right_pad(
+   return out_width_over_x_r_pad;
+ }
  
--  conv2d_std_init_cir_buf(input_channels, input_channels_pad, input_bytewidth, input_width, input_height, y_padding, y_b_pad, x_padding_var, kernel_width, x_stride, (VOID**)&pp_inp, p_state);
-+  if (x_padding || (input_channels & 0x3) || (out_channels & 0x3) || (out_width & 0x1)) {
-+    conv2d_std_init_cir_buf(input_channels, input_channels_pad, input_bytewidth, input_width, input_height, y_padding, y_b_pad, x_padding_var, kernel_width, x_stride, (VOID**)&pp_inp, p_state);
- 
--  // Index to padded input width
--  WORD32 idx_beg_inp_width_pad = kernel_width - x_stride;
--  idx_beg_inp_width_pad = idx_beg_inp_width_pad < 0 ? 0 : idx_beg_inp_width_pad;
-+    // Index to padded input width
-+    WORD32 idx_beg_inp_width_pad = kernel_width - x_stride;
-+    idx_beg_inp_width_pad = idx_beg_inp_width_pad < 0 ? 0 : idx_beg_inp_width_pad;
- 
- 
--  // Process Loop to compute one output plane [out_height x out_channels] per iteration
--  for(j=0;j<out_width-out_width_over_x_pad-out_width_over_x_r_pad;j++)
--  {
--    // Add x_stride x (input_height x input_channels) new planes to circular buffer
--    conv2d_std_update_cir_buf(input_channels, input_channels_pad, input_bytewidth, input_width, input_height, y_padding, y_b_pad, x_padding_var, kernel_width, x_stride, (VOID**)&pp_inp, idx_beg_inp_width_pad, p_state);
-+    // Process Loop to compute one output plane [out_height x out_channels] per iteration
-+    for(j=0;j<out_width-out_width_over_x_pad-out_width_over_x_r_pad;j++)
++static WORD32 xa_nn_conv2d_std_per_chan_sym8sxsym16s_no_circ_buf(
++    WORD16* __restrict__ p_out,
++    const WORD16* __restrict__ p_inp,
++    const WORD8* __restrict__ p_kernel,
++    const WORD64* __restrict__ p_bias,
++    WORD32 input_height,
++    WORD32 input_width,
++    WORD32 input_channels,
++    WORD32 kernel_height,
++    WORD32 kernel_width,
++    WORD32 out_channels,
++    WORD32 x_stride,
++    WORD32 y_stride,
++    WORD32 x_padding,
++    WORD32 y_padding,
++    WORD32 out_height,
++    WORD32 out_width,
++    WORD32 input_zero_bias,
++    WORD32 * p_out_multiplier,
++    WORD32 * p_out_shift,
++    WORD32 out_zero_bias,
++    WORD32 out_data_format
++    )
 +    {
-+      // Add x_stride x (input_height x input_channels) new planes to circular buffer
-+      conv2d_std_update_cir_buf(input_channels, input_channels_pad, input_bytewidth, input_width, input_height, y_padding, y_b_pad, x_padding_var, kernel_width, x_stride, (VOID**)&pp_inp, idx_beg_inp_width_pad, p_state);
- 
--    // Update index to input width padded
--    idx_beg_inp_width_pad += x_stride;
-+      // Update index to input width padded
-+      idx_beg_inp_width_pad += x_stride;
- 
--    // Convolution using matXvec with matrix as circular buffer
--    xa_nn_matXvec_sym8sxsym16s_sym16s_circ
--      (p_out /* output */
--       ,p_state->cir_buf.p_curr/* matrix: rows x cols */
--       ,p_state->p_kernel_padded /* vec: cols */
--       ,p_bias /* bias */
--       ,out_height /* rows */
--       ,input_channels_pad * kernel_width * kernel_height /* cols */
--       ,input_channels_pad * kernel_width * y_stride/* row_offset */
--       ,out_channels /* vec_count */
--       ,input_channels_pad * kernel_width * kernel_height /* vec_stride */
--       ,out_channels_offset /* out_col_offset */
--       ,out_height_offset /* out_row_offset */
--       ,input_zero_bias
--       ,p_out_multiplier
--       ,p_out_shift
--       ,out_zero_bias
--      );
--    p_out += out_width_offset;
-+      // Convolution using matXvec with matrix as circular buffer
-+      xa_nn_matXvec_sym8sxsym16s_sym16s_circ
-+        (p_out /* output */
-+        ,p_state->cir_buf.p_curr/* matrix: rows x cols */
-+        ,p_state->p_kernel_padded /* vec: cols */
-+        ,p_bias /* bias */
-+        ,out_height /* rows */
-+        ,input_channels_pad * kernel_width * kernel_height /* cols */
-+        ,input_channels_pad * kernel_width * y_stride/* row_offset */
-+        ,out_channels /* vec_count */
-+        ,input_channels_pad * kernel_width * kernel_height /* vec_stride */
-+        ,out_channels_offset /* out_col_offset */
-+        ,out_height_offset /* out_row_offset */
-+        ,input_zero_bias
-+        ,p_out_multiplier
-+        ,p_out_shift
-+        ,out_zero_bias
-+        );
-+      p_out += out_width_offset;
-+    }
-+  } else {
++
 +    const WORD16 *p_dst0_0 = p_out + 0;
 +    const WORD16 *p_dst0_1 = p_out + 1;
 +    const WORD16 *p_dst0_2 = p_out + 2;
@@ -310,8 +65,8 @@
 +    const WORD16 *p_dst1_2 = p_out + out_channels + 2;
 +    const WORD16 *p_dst1_3 = p_out + out_channels + 3;
 +    int kernel_out_ch_offset = kernel_height * kernel_width * input_channels;
-+    int input_x_offset = input_channels * x_stride / 4;
-+    int p_inp_vec_stride = input_width * input_channels / 4;
++    int input_x_offset = (input_channels * x_stride) / 4;
++    int p_inp_vec_stride = (input_width * input_channels) / 4;
 +    int p_kern_vec_stride = kernel_width * input_channels;
 +    int vec_len = kernel_width * input_channels;
 +    for (int out_y = 0; out_y < out_height; ++out_y) {
@@ -325,6 +80,7 @@
 +          ae_int64 out1_1 = p_bias[out_ch + 1];
 +          ae_int64 out1_2 = p_bias[out_ch + 2];
 +          ae_int64 out1_3 = p_bias[out_ch + 3];
++
 +          out0_0 = AE_SLAI64(out0_0, 8);
 +          out0_1 = AE_SLAI64(out0_1, 8);
 +          out0_2 = AE_SLAI64(out0_2, 8);
@@ -333,10 +89,11 @@
 +          out1_1 = AE_SLAI64(out1_1, 8);
 +          out1_2 = AE_SLAI64(out1_2, 8);
 +          out1_3 = AE_SLAI64(out1_3, 8);
++
 +          int in_x_o = out_x * x_stride;
 +          int in_y_o = out_y * y_stride - y_padding;
 +          int k_y_min = -in_y_o;
-+          int k_y_max = input_width - in_y_o;
++          int k_y_max = input_height - in_y_o;
 +          k_y_min = (k_y_min < 0) ? 0 : k_y_min;
 +          k_y_min = (k_y_min < kernel_height) ? k_y_min : kernel_height;
 +          k_y_max = (k_y_max < 0) ? 0 : k_y_max;
@@ -382,6 +139,7 @@
 +              AE_MULAAAAQ16(out1_3, d_inp1, d_kern3);
 +            }
 +          }
++
 +          out0_0 = AE_SRAI64(out0_0, 8);
 +          out0_1 = AE_SRAI64(out0_1, 8);
 +          out0_2 = AE_SRAI64(out0_2, 8);
@@ -390,6 +148,7 @@
 +          out1_1 = AE_SRAI64(out1_1, 8);
 +          out1_2 = AE_SRAI64(out1_2, 8);
 +          out1_3 = AE_SRAI64(out1_3, 8);
++
 +          ae_int32x2 acc_vec0 = MultiplyByQuantizedMultiplier_x2_opt(
 +              out0_0, out1_0, p_out_multiplier[out_ch + 0],
 +              p_out_shift[out_ch + 0]);
@@ -423,70 +182,45 @@
 +        p_dst1_3 += out_channels;
 +      }
 +    }
++  return 0;
++}
++
+ WORD32 xa_nn_conv2d_std_per_chan_sym8sxsym16s(
+     WORD16* __restrict__ p_out,
+     const WORD16* __restrict__ p_inp,
+@@ -180,6 +352,35 @@ WORD32 xa_nn_conv2d_std_per_chan_sym8sxsym16s(
+     XA_NNLIB_ARG_CHK_COND((p_out_shift[itr] < -31 || p_out_shift[itr] > 31), -1);
    }
  
-   return 0;
-diff --git a/algo/kernels/cnn/hifi4/xa_nn_transpose_conv_sym8sxsym16s.c b/algo/kernels/cnn/hifi4/xa_nn_transpose_conv_sym8sxsym16s.c
-index 7f31b75..a010d45 100644
---- a/algo/kernels/cnn/hifi4/xa_nn_transpose_conv_sym8sxsym16s.c
-+++ b/algo/kernels/cnn/hifi4/xa_nn_transpose_conv_sym8sxsym16s.c
-@@ -157,7 +157,7 @@ int xa_nn_transpose_conv_sym8sxsym16s(WORD16* output_data,
- 	 */
- 	if(input_data && filter_data && output_data && scratch_buffer &&
- 			(((unsigned int)input_data&0x7)==0) && (((unsigned int)filter_data&0x3)==0) && (((unsigned int)output_data&0x7) == 0) &&
--			(((unsigned int)scratch_buffer&0x7) == 0) && ((input_depth&0xF)==0) && ((filter_height*filter_width&0x3)==0))
-+			(((unsigned int)scratch_buffer&0x7) == 0) && ((input_depth&0x3)==0))
- 	{
- 		{
- 			//tbd : batch = 1, need to handle other values and in_x_min/max= 0 .. need toc heck for other values
-@@ -180,7 +180,8 @@ int xa_nn_transpose_conv_sym8sxsym16s(WORD16* output_data,
- 					filt_y_max = (filt_y_max < filter_height) ? filt_y_max : filter_height;
- 					filt_y_max = (filt_y_max < 0) ? 0 : filt_y_max;
- 					pinp =  (WORD16*)&input_data[in_y*input_width*input_depth+in_x*input_depth];
--					for (int in_channel = 0; in_channel < input_depth; in_channel+=16)
-+					int in_channel = 0;
-+					for (; in_channel + 15 < input_depth; in_channel+=16)
- 					{
- 						ae_int16x4 d_inp, d_inp1, d_inp2, d_inp3;
- 						AE_L16X4_IP(d_inp, (ae_int16x4*)pinp, sizeof(WORD64));
-@@ -235,36 +236,7 @@ int xa_nn_transpose_conv_sym8sxsym16s(WORD16* output_data,
- 							}
- 						}
- 					}
--				}
--			}
--		}
--	}
--	else if(input_data && filter_data && output_data && scratch_buffer &&
--			(((unsigned int)input_data&0x7)==0) && (((unsigned int)filter_data&0x3)==0) && (((unsigned int)output_data&0x7) == 0) &&
--			(((unsigned int)scratch_buffer&0x7) == 0) && ((input_depth&0x3)==0) && ((filter_height*filter_width&0x3)==0))
--	{
--		{
--			//tbd : batch = 1, need to handle other values and in_x_min/max= 0 .. need toc heck for other values
--			for (int in_y = 0; in_y < input_height; ++in_y)
--			{
--				for (int in_x = 0; in_x < input_width; ++in_x)
--				{
--					const int out_x_orig = in_x*stride_width - pad_width;
--					const int out_y_orig = in_y*stride_height - pad_height;
--					int filt_x_min = -out_x_orig; 
--					int filt_x_max = output_width - out_x_orig; 
--					int filt_y_min = -out_y_orig; 
--					int filt_y_max = output_height - out_y_orig; 
--					filt_x_min = (filt_x_min < filter_width) ? filt_x_min : filter_width;
--					filt_x_min = (filt_x_min < 0) ? 0 : filt_x_min;
--					filt_x_max = (filt_x_max < filter_width) ? filt_x_max : filter_width;
--					filt_x_max = (filt_x_max < 0) ? 0 : filt_x_max;
--					filt_y_min = (filt_y_min < filter_height) ? filt_y_min : filter_height;
--					filt_y_min = (filt_y_min < 0) ? 0 : filt_y_min;
--					filt_y_max = (filt_y_max < filter_height) ? filt_y_max : filter_height;
--					filt_y_max = (filt_y_max < 0) ? 0 : filt_y_max;
--					pinp =  (WORD16*)&input_data[in_y*input_width*input_depth+in_x*input_depth];
--					for (int in_channel = 0; in_channel < input_depth; in_channel+=4)
-+					for (; in_channel + 3 < input_depth; in_channel+=4)
- 					{
- 						ae_int16x4 d_inp;
- 						AE_L16X4_IP(d_inp, (ae_int16x4*)pinp, sizeof(WORD64));
--- 
-2.41.0.162.gfafddb0af9-goog
-
++  if ( !(x_padding) && !(input_channels & 0x3) && !(out_channels & 0x3) && !(out_width & 0x1) && (out_data_format == 0) && ((out_width-1)*x_stride <=(input_width-kernel_width) ) )
++  {
++    int ret_val=0;
++    ret_val=xa_nn_conv2d_std_per_chan_sym8sxsym16s_no_circ_buf(p_out,
++                                                              p_inp,
++                                                              p_kernel,
++                                                              p_bias,
++                                                              input_height,
++                                                              input_width,
++                                                              input_channels,
++                                                              kernel_height,
++                                                              kernel_width,
++                                                              out_channels,
++                                                              x_stride,
++                                                              y_stride,
++                                                              x_padding,
++                                                              y_padding,
++                                                              out_height,
++                                                              out_width,
++                                                              input_zero_bias,
++                                                              p_out_multiplier,
++                                                              p_out_shift,
++                                                              out_zero_bias,
++                                                              out_data_format
++                                                            );
++
++    return ret_val;
++  }
++
+   WORD32 j;
+   WORD32 input_bytewidth = 2;
+   VOID *pp_inp = (VOID *)p_inp;
diff --git a/tensorflow/lite/micro/tools/make/ext_libs/xa_nnlib_hifi5.patch b/tensorflow/lite/micro/tools/make/ext_libs/xa_nnlib_hifi5.patch
deleted file mode 100644
index 9d95c63..0000000
--- a/tensorflow/lite/micro/tools/make/ext_libs/xa_nnlib_hifi5.patch
+++ /dev/null
@@ -1,36 +0,0 @@
-diff --git a/algo/kernels/fc/hifi4/xa_nn_fully_connected.c b/algo/kernels/fc/hifi4/xa_nn_fully_connected.c
-index 26a2b73..61f0a64 100644
---- a/algo/kernels/fc/hifi4/xa_nn_fully_connected.c
-+++ b/algo/kernels/fc/hifi4/xa_nn_fully_connected.c
-@@ -298,7 +298,6 @@ WORD32 xa_nn_fully_connected_sym8sxasym8s_asym8s
-   XA_NNLIB_ARG_CHK_PTR(p_out, -1);
-   XA_NNLIB_ARG_CHK_PTR(p_weight, -1);
-   XA_NNLIB_ARG_CHK_PTR(p_inp, -1);
--  XA_NNLIB_ARG_CHK_PTR(p_bias, -1);
-   /* Pointer alignment checks */
- #if 0
-   XA_NNLIB_ARG_CHK_ALIGN(p_out, ALIGNMENT, -1);
-@@ -310,7 +309,8 @@ WORD32 xa_nn_fully_connected_sym8sxasym8s_asym8s
-   XA_NNLIB_ARG_CHK_ALIGN(p_out, sizeof(WORD8), -1);
-   XA_NNLIB_ARG_CHK_ALIGN(p_weight, sizeof(WORD8), -1);
-   XA_NNLIB_ARG_CHK_ALIGN(p_inp, sizeof(WORD8), -1);
--  XA_NNLIB_ARG_CHK_ALIGN(p_bias, sizeof(WORD32), -1);
-+  if (p_bias != NULL)
-+    XA_NNLIB_ARG_CHK_ALIGN(p_bias, sizeof(WORD32), -1);
- #endif
-   /* Basic Parameter checks */
-   XA_NNLIB_ARG_CHK_COND((out_depth <= 0), -1);
-diff --git a/algo/kernels/matXvec/hifi5/xa_nn_matXvec_sym8sxasym8s.c b/algo/kernels/matXvec/hifi5/xa_nn_matXvec_sym8sxasym8s.c
-index 5350cbe..a91e043 100644
---- a/algo/kernels/matXvec/hifi5/xa_nn_matXvec_sym8sxasym8s.c
-+++ b/algo/kernels/matXvec/hifi5/xa_nn_matXvec_sym8sxasym8s.c
-@@ -704,7 +704,8 @@ WORD32 xa_nn_matXvec_sym8sxasym8s_asym8s(
-   XA_NNLIB_ARG_CHK_PTR(p_mat1, -1);
-   XA_NNLIB_ARG_CHK_PTR(p_vec1, -1);
-   /* Pointer alignment checks */
--  XA_NNLIB_ARG_CHK_ALIGN(p_bias, sizeof(WORD32), -1);
-+  if (p_bias != NULL)
-+    XA_NNLIB_ARG_CHK_ALIGN(p_bias, sizeof(WORD32), -1);
-   /* Basic Parameter checks */
-   XA_NNLIB_ARG_CHK_COND((rows <= 0), -1);
-   XA_NNLIB_ARG_CHK_COND((cols1 <= 0), -1);
diff --git a/tensorflow/lite/micro/tools/make/ext_libs/xi_tflmlib_vision_p6.patch b/tensorflow/lite/micro/tools/make/ext_libs/xi_tflmlib_vision_p6.patch
new file mode 100644
index 0000000..d7df52f
--- /dev/null
+++ b/tensorflow/lite/micro/tools/make/ext_libs/xi_tflmlib_vision_p6.patch
@@ -0,0 +1,28 @@
+From ed92529a6be7a910462558edcf10070fbb0f2870 Mon Sep 17 00:00:00 2001
+From: Ryan Kuester <kuester@bdti.com>
+Date: Thu, 1 Aug 2024 12:48:12 -0500
+Subject: [PATCH] fix: use <climits> instead of <limits> to access INT_MAX and
+ friends
+
+For compatibility with the C++ library standard, use the header,
+<climits>, to access constants such as INT_MAX.
+---
+ runtime/include/cnnrt_xi.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/runtime/include/cnnrt_xi.h b/runtime/include/cnnrt_xi.h
+index f3a911e..00c74b8 100644
+--- a/runtime/include/cnnrt_xi.h
++++ b/runtime/include/cnnrt_xi.h
+@@ -25,7 +25,7 @@
+ #  define INCLUDE_XI_CNN

+ #endif

+ 

+-#include <limits>

++#include <climits>

+ #include "xi_api.h"

+ #include "xi_cnn_api.h"

+ #include "xi_tile_manager.h"

+-- 
+2.43.0
+
diff --git a/tensorflow/lite/micro/tools/make/ext_libs/xtensa.inc b/tensorflow/lite/micro/tools/make/ext_libs/xtensa.inc
index 3b28267..70e1880 100644
--- a/tensorflow/lite/micro/tools/make/ext_libs/xtensa.inc
+++ b/tensorflow/lite/micro/tools/make/ext_libs/xtensa.inc
@@ -1,9 +1,12 @@
+
 # Explicitly add kernel sources specific to the Xtensa optimized
 # implementations.
 MICROLITE_CC_KERNEL_SRCS += \
   $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/xtensa/add_vision.cc \
+  $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/xtensa/conv_common_xtensa.cc \
   $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/xtensa/conv_hifi.cc \
   $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/xtensa/conv_int16_reference.cc \
+  $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/xtensa/conv_int8_int16.cc \
   $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/xtensa/conv_int8_reference.cc \
   $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/xtensa/conv_vision.cc \
   $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/xtensa/depthwise_conv_hifi.cc \
@@ -25,12 +28,25 @@
     $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/xtensa/hifimini/svdf.cc \
     $(TENSORFLOW_ROOT)tensorflow/lite/micro/kernels/xtensa/hifimini/fully_connected.cc
 
+  FFT_PATH := $(MAKEFILE_DIR)/downloads/hifi_fft
+  INCLUDES += -I$(FFT_PATH)/
+
+  THIRD_PARTY_KERNEL_CC_SRCS += \
+    $(shell find $(FFT_PATH)/hifi2_fft -name "*.c")
+  THIRD_PARTY_CC_HDRS += \
+    $(shell find $(FFT_PATH)/hifi2_fft -name "*.h")
+
 else ifeq ($(TARGET_ARCH), hifi5)
   DOWNLOAD_RESULT := $(shell $(MAKEFILE_DIR)/ext_libs/xtensa_download.sh ${DOWNLOADS_DIR} hifi5 $(TENSORFLOW_ROOT))
   ifneq ($(DOWNLOAD_RESULT), SUCCESS)
     $(error Something went wrong with the xtensa download: $(DOWNLOAD_RESULT))
   endif
 
+  DOWNLOAD_RESULT := $(shell $(MAKEFILE_DIR)/ext_libs/xtensa_ndsp_download.sh ${DOWNLOADS_DIR} hifi5 $(TENSORFLOW_ROOT))
+  ifneq ($(DOWNLOAD_RESULT), SUCCESS)
+    $(error Something went wrong with the xtensa ndsp download: $(DOWNLOAD_RESULT))
+  endif
+
   # TODO(b/161489252): -Wno-shadow is only needed for xannlib. But since we do
   # not have separate cflags (or the concept of modular build targets) with the
   # Makefile, -Wno-shadow will be used for everything.
@@ -43,10 +59,19 @@
   CXXFLAGS += $(PLATFORM_FLAGS)
 
   NNLIB_PATH := $(MAKEFILE_DIR)/downloads/xa_nnlib_hifi5
+  NDSPLIB_PATH := $(MAKEFILE_DIR)/downloads/ndsplib-hifi5
 
   THIRD_PARTY_KERNEL_CC_SRCS += \
     $(shell find $(NNLIB_PATH) -name "*.c")
 
+  # The NDSP library has a lot of file. Add as needed.
+  THIRD_PARTY_KERNEL_CC_SRCS += \
+    $(shell find $(NDSPLIB_PATH)/library/fft/fft -name "*.c") \
+    $(shell find $(NDSPLIB_PATH)/library/fft/fft_ie -name "*.c") \
+    $(shell find $(NDSPLIB_PATH)/library/fft/fft_ief -name "*.c") \
+    $(shell find $(NDSPLIB_PATH)/library/twiddles -name "*.c") \
+    $(shell find $(NDSPLIB_PATH)/library -name "version.c")
+
   EXCLUDED_NNLIB_SRCS = \
     $(NNLIB_PATH)/algo/layers/cnn/src/xa_nn_cnn_api.c \
     $(NNLIB_PATH)/algo/layers/gru/src/xa_nn_gru_api.c \
@@ -55,7 +80,8 @@
   THIRD_PARTY_KERNEL_CC_SRCS := $(filter-out $(EXCLUDED_NNLIB_SRCS), $(THIRD_PARTY_KERNEL_CC_SRCS))
 
   THIRD_PARTY_CC_HDRS += \
-    $(shell find $(NNLIB_PATH) -name "*.h")
+    $(shell find $(NNLIB_PATH) -name "*.h") \
+    $(shell find $(NDSPLIB_PATH) -name "*.h")
 
   INCLUDES += \
     -I$(NNLIB_PATH)/ \
@@ -63,15 +89,20 @@
     -I$(NNLIB_PATH)/include/nnlib/ \
     -I$(NNLIB_PATH)/include/ \
     -I$(NNLIB_PATH)/algo/common/include/ \
-    -I$(NNLIB_PATH)/algo/ndsp/hifi5/include/
-
-else ifeq ($(TARGET_ARCH), hifi4)
-
+    -I$(NDSPLIB_PATH)/library/include/ \
+    -I$(NDSPLIB_PATH)/library/include_private/
+else ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), hifi3 hifi4))
+  # NNLib hifi4 also supports hifi3
   DOWNLOAD_RESULT := $(shell $(MAKEFILE_DIR)/ext_libs/xtensa_download.sh ${DOWNLOADS_DIR} hifi4 $(TENSORFLOW_ROOT))
   ifneq ($(DOWNLOAD_RESULT), SUCCESS)
     $(error Something went wrong with the xtensa download: $(DOWNLOAD_RESULT))
   endif
 
+  DOWNLOAD_RESULT := $(shell $(MAKEFILE_DIR)/ext_libs/xtensa_ndsp_download.sh ${DOWNLOADS_DIR} $(TARGET_ARCH) $(TENSORFLOW_ROOT))
+  ifneq ($(DOWNLOAD_RESULT), SUCCESS)
+    $(error Something went wrong with the xtensa ndsp download: $(DOWNLOAD_RESULT))
+  endif
+
   # TODO(b/161489252): -Wno-shadow is only needed for xannlib. But since we do
   # not have separate cflags (or the concept of modular build targets) with the
   # Makefile, -Wno-shadow will be used for everything.
@@ -83,20 +114,44 @@
   CCFLAGS += $(PLATFORM_FLAGS)
   CXXFLAGS += $(PLATFORM_FLAGS)
 
+  # NNLib for hifi4 also supports hifi3
   NNLIB_PATH := $(MAKEFILE_DIR)/downloads/xa_nnlib_hifi4
+  NDSPLIB_PATH := $(MAKEFILE_DIR)/downloads/ndsplib-$(TARGET_ARCH)
 
   THIRD_PARTY_KERNEL_CC_SRCS += \
     $(shell find $(NNLIB_PATH) -name "*.c")
 
+  # The NDSP library has a lot of file. Add as needed.
+  THIRD_PARTY_KERNEL_CC_SRCS += \
+    $(shell find $(NDSPLIB_PATH)/library/fft/fft -name "*.c") \
+    $(shell find $(NDSPLIB_PATH)/library/fft/fft_ie -name "*.c") \
+    $(shell find $(NDSPLIB_PATH)/library/fft/fft_ief -name "*.c") \
+    $(shell find $(NDSPLIB_PATH)/library/twiddles -name "*.c") \
+    $(shell find $(NDSPLIB_PATH)/library -name "version.c")
+
   EXCLUDED_NNLIB_SRCS = \
     $(NNLIB_PATH)/algo/layers/cnn/src/xa_nn_cnn_api.c \
     $(NNLIB_PATH)/algo/layers/gru/src/xa_nn_gru_api.c \
     $(NNLIB_PATH)/algo/layers/lstm/src/xa_nn_lstm_api.c
 
+  ifeq ($(TARGET_ARCH), hifi3)
+    EXCLUDED_NNLIB_SRCS += \
+      $(NNLIB_PATH)/algo/ndsp/hifi4/src/pow2f_tbl.c \
+      $(NNLIB_PATH)/algo/ndsp/hifi4/src/scl_tanhf_hifi4.c \
+      $(NNLIB_PATH)/algo/ndsp/hifi4/src/vec_tanhf_hifi4.c \
+      $(NNLIB_PATH)/algo/ndsp/hifi4/src/tanhf_tbl.c
+  endif
+  
+  ifeq ($(TARGET_ARCH), hifi4)
+    EXCLUDED_NNLIB_SRCS += \
+      $(NNLIB_PATH)/algo/kernels/activations/hifi4/xa_nn_activations_asym8_asym8.c
+  endif
+
   THIRD_PARTY_KERNEL_CC_SRCS := $(filter-out $(EXCLUDED_NNLIB_SRCS), $(THIRD_PARTY_KERNEL_CC_SRCS))
 
   THIRD_PARTY_CC_HDRS += \
-    $(shell find $(NNLIB_PATH) -name "*.h")
+    $(shell find $(NNLIB_PATH) -name "*.h") \
+    $(shell find $(NDSPLIB_PATH) -name "*.h")
 
   INCLUDES += \
     -I$(NNLIB_PATH)/ \
@@ -104,7 +159,8 @@
     -I$(NNLIB_PATH)/include/nnlib/ \
     -I$(NNLIB_PATH)/include/ \
     -I$(NNLIB_PATH)/algo/common/include/ \
-    -I$(NNLIB_PATH)/algo/ndsp/hifi4/include/
+    -I$(NDSPLIB_PATH)/library/include/ \
+    -I$(NDSPLIB_PATH)/library/include_private/
 
 else ifeq ($(TARGET_ARCH), vision_p6)
   DOWNLOAD_RESULT := $(shell $(MAKEFILE_DIR)/ext_libs/xtensa_download.sh ${DOWNLOADS_DIR} vision_p6 $(TENSORFLOW_ROOT))
@@ -139,21 +195,3 @@
 else
   $(error Unsupported TARGET_ARCH=$(TARGET_ARCH))
 endif
-
-FFT_PATH := $(MAKEFILE_DIR)/downloads/hifi_fft
-
-INCLUDES += -I$(FFT_PATH)/
-
-ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), hifi3 hifi4 hifi5))
-THIRD_PARTY_KERNEL_CC_SRCS += \
-    $(shell find $(FFT_PATH)/hifi3_fft -name "*.c")
-
-THIRD_PARTY_CC_HDRS += \
-    $(shell find $(FFT_PATH)/hifi3_fft -name "*.h")
-else ifeq ($(TARGET_ARCH), hifimini)
-THIRD_PARTY_KERNEL_CC_SRCS += \
-    $(shell find $(FFT_PATH)/hifi2_fft -name "*.c")
-
-THIRD_PARTY_CC_HDRS += \
-    $(shell find $(FFT_PATH)/hifi2_fft -name "*.h")
-endif
diff --git a/tensorflow/lite/micro/tools/make/ext_libs/xtensa_download.sh b/tensorflow/lite/micro/tools/make/ext_libs/xtensa_download.sh
index fb45123..9726980 100755
--- a/tensorflow/lite/micro/tools/make/ext_libs/xtensa_download.sh
+++ b/tensorflow/lite/micro/tools/make/ext_libs/xtensa_download.sh
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -38,23 +38,26 @@
 source ${3}tensorflow/lite/micro/tools/make/bash_helpers.sh
 
 DOWNLOADS_DIR=${1}
-if [ ! -d ${DOWNLOADS_DIR} ]; then
-  echo "The top-level downloads directory: ${DOWNLOADS_DIR} does not exist."
-  exit 1
-fi
+PATCH=""
 
-if [[ ${2} == "hifi4" ]]; then
-  LIBRARY_URL="http://github.com/foss-xtensa/nnlib-hifi4/raw/master/archive/xa_nnlib_hifi4_10_14_2022.zip"
+if [[ ${2} == "hifi3" ]]; then
+  LIBRARY_URL="http://github.com/foss-xtensa/nnlib-hifi4/raw/master/archive/xa_nnlib_hifi4_09_05_2023.zip"
   LIBRARY_DIRNAME="xa_nnlib_hifi4"
-  LIBRARY_MD5="2bf3c1c7fd5a23f157babc8e24fd2c55"
+  LIBRARY_MD5="2a54e056aef73a4fcffde4643998501a"
+elif [[ ${2} == "hifi4" ]]; then
+  LIBRARY_URL="http://github.com/foss-xtensa/nnlib-hifi4/raw/master/archive/xa_nnlib_hifi4_09_05_2023.zip"
+  LIBRARY_DIRNAME="xa_nnlib_hifi4"
+  LIBRARY_MD5="2a54e056aef73a4fcffde4643998501a"
+  PATCH="../../ext_libs/xa_nnlib_hifi4.patch"
 elif [[ ${2} == "hifi5" ]]; then
-  LIBRARY_URL="http://github.com/foss-xtensa/nnlib-hifi5/raw/master/archive/xa_nnlib_hifi5_12_19_2022.zip"
+  LIBRARY_URL="http://github.com/foss-xtensa/nnlib-hifi5/raw/master/archive/xa_nnlib_hifi5_09_05_2023.zip"
   LIBRARY_DIRNAME="xa_nnlib_hifi5"
-  LIBRARY_MD5="83306809191f42a064bde688b94e1eb1"
+  LIBRARY_MD5="1deb55ef200bf5dbedc70b99b02140c0"
 elif [[ ${2} == "vision_p6" ]]; then
   LIBRARY_URL="https://github.com/foss-xtensa/tflmlib_vision/raw/main/archive/xi_tflmlib_vision_p6_22_06_29.zip"
   LIBRARY_DIRNAME="xi_tflmlib_vision_p6"
   LIBRARY_MD5="fea3720d76fdb3a5a337ace7b6081b56"
+  PATCH="../../ext_libs/xi_tflmlib_vision_p6.patch"
 else
   echo "Attempting to download an unsupported xtensa variant: ${2}"
   exit 1
@@ -62,35 +65,30 @@
 
 LIBRARY_INSTALL_PATH=${DOWNLOADS_DIR}/${LIBRARY_DIRNAME}
 
-if [ -d ${LIBRARY_INSTALL_PATH} ]; then
+should_download=$(check_should_download ${DOWNLOADS_DIR})
+
+if [[ ${should_download} == "no" ]]; then
+  show_download_url_md5 ${LIBRARY_URL} ${LIBRARY_MD5}
+elif [ ! -d ${DOWNLOADS_DIR} ]; then
+  echo "The top-level downloads directory: ${DOWNLOADS_DIR} does not exist."
+  exit 1
+elif [ -d ${LIBRARY_INSTALL_PATH} ]; then
   echo >&2 "${LIBRARY_INSTALL_PATH} already exists, skipping the download."
 else
   TEMPDIR="$(mktemp -d)"
   TEMPFILE="${TEMPDIR}/${LIBRARY_DIRNAME}.zip"
   wget ${LIBRARY_URL} -O "$TEMPFILE" >&2
-  MD5=`md5sum "$TEMPFILE" | awk '{print $1}'`
+  check_md5 "${TEMPFILE}" ${LIBRARY_MD5}
 
-  if [[ ${MD5} != ${LIBRARY_MD5} ]]
-  then
-    echo "Bad checksum. Expected: ${LIBRARY_MD5}, Got: ${MD5}"
-    exit 1
-  fi
+  unzip -qo "$TEMPFILE" -d ${DOWNLOADS_DIR} >&2
 
-  # Check if another make process has already extracted the downloaded files.
-  # If so, skip extracting and patching.
-  if [ -d ${LIBRARY_INSTALL_PATH} ]; then
-    echo >&2 "${LIBRARY_INSTALL_PATH} already exists, skipping the extraction."
-  else
-    unzip -qo "$TEMPFILE" -d ${DOWNLOADS_DIR} >&2
+  rm -rf "${TEMPDIR}"
 
-    rm -rf "${TEMPDIR}"
-
-    pushd "${LIBRARY_INSTALL_PATH}" > /dev/null
-    chmod -R +w ./
-    if [[ -f "../../ext_libs/xa_nnlib_${2}.patch" ]]; then
-      create_git_repo ./
-      apply_patch_to_folder ./ "../../ext_libs/xa_nnlib_${2}.patch" "TFLM patch"
-    fi
+  pushd "${LIBRARY_INSTALL_PATH}" > /dev/null
+  chmod -R +w ./
+  if [ "${PATCH}" ]; then
+    create_git_repo ./
+    apply_patch_to_folder ./ ${PATCH} "TFLM patch"
   fi
 fi
 
diff --git a/tensorflow/lite/micro/tools/make/ext_libs/xtensa_ndsp_download.sh b/tensorflow/lite/micro/tools/make/ext_libs/xtensa_ndsp_download.sh
new file mode 100755
index 0000000..71fe1d1
--- /dev/null
+++ b/tensorflow/lite/micro/tools/make/ext_libs/xtensa_ndsp_download.sh
@@ -0,0 +1,100 @@
+#!/bin/bash
+# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+#
+# Downloads necessary to build with OPTIMIZED_KERNEL_DIR=xtensa.
+#
+# Called with four arguments:
+# 1 - Path to the downloads folder which is typically
+#     ${TENSORFLOW_ROOT}/tensorflow/lite/micro/tools/make/downloads
+# 2 - Xtensa variant to download for (e.g. hifi4)
+# 3 - (optional) TENSORFLOW_ROOT: path to root of the TFLM tree (relative to directory from where the script is called).
+#
+# This script is called from the Makefile and uses the following convention to
+# enable determination of sucess/failure:
+#
+#   - If the script is successful, the only output on stdout should be SUCCESS.
+#     The makefile checks for this particular string.
+#
+#   - Any string on stdout that is not SUCCESS will be shown in the makefile as
+#     the cause for the script to have failed.
+#
+#   - Any other informational prints should be on stderr.
+
+set -e
+
+source ${3}tensorflow/lite/micro/tools/make/bash_helpers.sh
+
+DOWNLOADS_DIR=${1}
+
+if [[ ${2} == "hifi3" ]]; then
+  COMMIT="d17bf205dc530a9e1a1d979249520f4401529db1"
+  LIBRARY_DIRNAME="ndsplib-hifi3"
+  LIBRARY_URL="https://github.com/foss-xtensa/${LIBRARY_DIRNAME}/archive/${COMMIT}.zip"
+  LIBRARY_MD5="5572b27361736c1f773474ebaf42c5d4"
+  CORE_NAME="HiFi3"
+elif [[ ${2} == "hifi4" ]]; then
+  COMMIT="aba2485ba12d9851fa398bcb5c18c05cc3731a17"
+  LIBRARY_DIRNAME="ndsplib-hifi4"
+  LIBRARY_URL="https://github.com/foss-xtensa/${LIBRARY_DIRNAME}/archive/${COMMIT}.zip"
+  LIBRARY_MD5="062b8f957c662b6ab834bbe284237b6c"
+  CORE_NAME="HiFi4"
+elif [[ ${2} == "hifi5" ]]; then
+  COMMIT="01c92ceb26cc0a598c6d83d17c3d88363bd8f7fc"
+  LIBRARY_DIRNAME="ndsplib-hifi5"
+  LIBRARY_URL="https://github.com/foss-xtensa/${LIBRARY_DIRNAME}/archive/${COMMIT}.zip"
+  LIBRARY_MD5="94b372d608781c13be2fb2d1a8fd3b58"
+  CORE_NAME="HiFi5"
+else
+  echo "Attempting to download an unsupported xtensa variant: ${2}"
+  exit 1
+fi
+
+LIBRARY_INSTALL_PATH=${DOWNLOADS_DIR}/${LIBRARY_DIRNAME}
+
+should_download=$(check_should_download ${DOWNLOADS_DIR})
+
+if [[ ${should_download} == "no" ]]; then
+  show_download_url_md5 ${LIBRARY_URL} ${LIBRARY_MD5}
+elif [ ! -d ${DOWNLOADS_DIR} ]; then
+  echo "The top-level downloads directory: ${DOWNLOADS_DIR} does not exist."
+  exit 1
+elif [ -d ${LIBRARY_INSTALL_PATH} ]; then
+  echo >&2 "${LIBRARY_INSTALL_PATH} already exists, skipping the download."
+else
+  TEMPDIR="$(mktemp -d)"
+  TEMPFILE="${TEMPDIR}/${LIBRARY_DIRNAME}.zip"
+  wget ${LIBRARY_URL} -O "$TEMPFILE" >&2
+  check_md5 "${TEMPFILE}" ${LIBRARY_MD5}
+
+  unzip -qo "$TEMPFILE" -d ${TEMPDIR} >&2
+  unzip -qo ${TEMPDIR}/${LIBRARY_DIRNAME}-${COMMIT}/NDSP_${CORE_NAME}/NDSP_${CORE_NAME}*.zip -d ${TEMPDIR}/${LIBRARY_DIRNAME}-${COMMIT}/NDSP_${CORE_NAME}/ >&2
+  find ${TEMPDIR}/${LIBRARY_DIRNAME}-${COMMIT}/NDSP_${CORE_NAME}/* -maxdepth 0 -type d -exec mv {} ${LIBRARY_INSTALL_PATH} \;
+  rm -rf "${TEMPDIR}"
+  # NDSP sources in GitHub currently uses DOS style newlines, which causes compiler errors.
+  find ${LIBRARY_INSTALL_PATH} -type f -exec sed -i.bak 's/\r$//g' {} \;
+
+  pushd "${LIBRARY_INSTALL_PATH}" > /dev/null
+  chmod -R +w ./
+  if [[ -f "../../ext_libs/ndsplib-${2}.patch" ]]; then
+    create_git_repo ./
+    apply_patch_to_folder ./ "../../ext_libs/ndsplib-${2}.patch" "TFLM patch"
+  fi
+  # Rename the strings in __renaming__.h to names that are traceable to TFLM.
+  # Note that renaming is disabled by default and must be enabled with -D__RENAMING__
+  sed -i 's/NatureDSP_/NatureDSP_TFLM_/' library/include_private/__renaming__.h
+fi
+
+echo "SUCCESS"
diff --git a/tensorflow/lite/micro/tools/make/flatbuffers.patch b/tensorflow/lite/micro/tools/make/flatbuffers.patch
index cb22cf0..2017775 100644
--- a/tensorflow/lite/micro/tools/make/flatbuffers.patch
+++ b/tensorflow/lite/micro/tools/make/flatbuffers.patch
@@ -1,5 +1,5 @@
 diff --git a/include/flatbuffers/base.h b/include/flatbuffers/base.h
-index a5ac10d..371b6fd 100644
+index 5c4cae79..1a631641 100644
 --- a/include/flatbuffers/base.h
 +++ b/include/flatbuffers/base.h
 @@ -1,6 +1,16 @@
@@ -20,10 +20,10 @@
  
  // If activate should be declared and included first.
 diff --git a/include/flatbuffers/default_allocator.h b/include/flatbuffers/default_allocator.h
-index 8b173af..975d938 100644
+index d4724122..975d9380 100644
 --- a/include/flatbuffers/default_allocator.h
 +++ b/include/flatbuffers/default_allocator.h
-@@ -39,26 +39,20 @@ class DefaultAllocator : public Allocator {
+@@ -39,24 +39,18 @@ class DefaultAllocator : public Allocator {
  // This is to avoid having a statically or dynamically allocated default
  // allocator, or having to move it between the classes that may own it.
  inline uint8_t *Allocate(Allocator *allocator, size_t size) {
@@ -52,15 +52,11 @@
  }
  
  }  // namespace flatbuffers
- 
--#endif  // FLATBUFFERS_DEFAULT_ALLOCATOR_H_
-\ No newline at end of file
-+#endif  // FLATBUFFERS_DEFAULT_ALLOCATOR_H_
 diff --git a/include/flatbuffers/flexbuffers.h b/include/flatbuffers/flexbuffers.h
-index 89f3f30..6e6d0b3 100644
+index 8e8cac14..52dae316 100644
 --- a/include/flatbuffers/flexbuffers.h
 +++ b/include/flatbuffers/flexbuffers.h
-@@ -496,9 +496,24 @@ class Reference {
+@@ -495,9 +495,24 @@ class Reference {
            return static_cast<double>(ReadUInt64(Indirect(), byte_width_));
          case FBT_NULL: return 0.0;
          case FBT_STRING: {
@@ -86,10 +82,10 @@
          case FBT_VECTOR: return static_cast<double>(AsVector().size());
          case FBT_BOOL:
 diff --git a/include/flatbuffers/util.h b/include/flatbuffers/util.h
-index 93a39de..1cd4e8f 100644
+index 1ccf3517..34a75193 100644
 --- a/include/flatbuffers/util.h
 +++ b/include/flatbuffers/util.h
-@@ -24,6 +24,12 @@
+@@ -23,6 +23,12 @@
  #include "flatbuffers/base.h"
  #include "flatbuffers/stl_emulation.h"
  
@@ -102,4 +98,3 @@
  #ifndef FLATBUFFERS_PREFER_PRINTF
  #  include <iomanip>
  #  include <sstream>
- 
\ No newline at end of file
diff --git a/tensorflow/lite/micro/tools/make/flatbuffers_download.sh b/tensorflow/lite/micro/tools/make/flatbuffers_download.sh
index af5e80f..52acccc 100755
--- a/tensorflow/lite/micro/tools/make/flatbuffers_download.sh
+++ b/tensorflow/lite/micro/tools/make/flatbuffers_download.sh
@@ -54,9 +54,9 @@
 if [ -d ${DOWNLOADED_FLATBUFFERS_PATH} ]; then
   echo >&2 "${DOWNLOADED_FLATBUFFERS_PATH} already exists, skipping the download."
 else
-  ZIP_PREFIX="a66de58af9565586832c276fbb4251fc416bf07f"
+  ZIP_PREFIX="v23.5.26"
   FLATBUFFERS_URL="https://github.com/google/flatbuffers/archive/${ZIP_PREFIX}.zip"
-  FLATBUFFERS_MD5="51a7a96747e1c33eb4aac6d52513a02f"
+  FLATBUFFERS_MD5="e87e8acd8e2d53653387ad78720316e2"
 
   TEMPDIR="$(mktemp -d)"
   TEMPFILE="${TEMPDIR}/${ZIP_PREFIX}.zip"
@@ -64,7 +64,7 @@
   check_md5 "${TEMPFILE}" ${FLATBUFFERS_MD5}
 
   unzip -qo "$TEMPFILE" -d "${TEMPDIR}" >&2
-  mv "${TEMPDIR}/flatbuffers-${ZIP_PREFIX}" ${DOWNLOADED_FLATBUFFERS_PATH}
+  mv "${TEMPDIR}/flatbuffers-${ZIP_PREFIX#v}" ${DOWNLOADED_FLATBUFFERS_PATH}
   rm -rf "${TEMPDIR}"
 
   pushd ${DOWNLOADED_FLATBUFFERS_PATH} > /dev/null
diff --git a/tensorflow/lite/micro/tools/make/targets/arc/arc_common.inc b/tensorflow/lite/micro/tools/make/targets/arc/arc_common.inc
index abc45c3..8809279 100644
--- a/tensorflow/lite/micro/tools/make/targets/arc/arc_common.inc
+++ b/tensorflow/lite/micro/tools/make/targets/arc/arc_common.inc
@@ -64,9 +64,6 @@
   PLATFORM_LDFLAGS += $(notdir $(LCF_FILE))
 endif
 
-  CXXFLAGS := $(filter-out -std=c++11,$(CXXFLAGS))
-  CCFLAGS := $(filter-out -std=c11,$(CCFLAGS))
-
   ldflags_to_remove = -Wl,--fatal-warnings -Wl,--gc-sections
   LDFLAGS := $(filter-out $(ldflags_to_remove),$(LDFLAGS))
   
diff --git a/tensorflow/lite/micro/tools/make/targets/bluepill_makefile.inc b/tensorflow/lite/micro/tools/make/targets/bluepill_makefile.inc
index 532689c..ae3b0b0 100644
--- a/tensorflow/lite/micro/tools/make/targets/bluepill_makefile.inc
+++ b/tensorflow/lite/micro/tools/make/targets/bluepill_makefile.inc
@@ -58,6 +58,7 @@
 
 LDFLAGS += \
   -T $(MAKEFILE_DIR)/targets/bluepill/bluepill.lds \
+  -Wl,--no-warn-rwx-segment \
   -Wl,-Map=gen/$(TARGET).map,--cref
 
 # Additional include paths needed for the stm_32_bare_lib only.
@@ -87,8 +88,7 @@
 
 MICROLITE_TEST_SRCS := $(filter-out $(EXCLUDED_TESTS), $(MICROLITE_TEST_SRCS))
 
-EXCLUDED_EXAMPLE_TESTS := \
-  $(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/Makefile.inc
+EXCLUDED_EXAMPLE_TESTS :=
 
 MICRO_LITE_EXAMPLE_TESTS := $(filter-out $(EXCLUDED_EXAMPLE_TESTS), $(MICRO_LITE_EXAMPLE_TESTS))
 
diff --git a/tensorflow/lite/micro/tools/make/targets/ceva_makefile.inc b/tensorflow/lite/micro/tools/make/targets/ceva_makefile.inc
index 5cc8ad1..d0ddc98 100755
--- a/tensorflow/lite/micro/tools/make/targets/ceva_makefile.inc
+++ b/tensorflow/lite/micro/tools/make/targets/ceva_makefile.inc
@@ -33,8 +33,8 @@
 	-D_LIBCPP_EXTERN_TEMPLATE_INLINE_VISIBILITY=""
 	 
 
-CXXFLAGS := -std=c++11 -DTF_LITE_STATIC_MEMORY 
-CCFLAGS  := -std=c11   -DTF_LITE_STATIC_MEMORY 
+CXXFLAGS := -std=c++17 -DTF_LITE_STATIC_MEMORY 
+CCFLAGS  := -std=c17   -DTF_LITE_STATIC_MEMORY 
 
 ifeq ($(TARGET_ARCH), CEVA_BX1)
 PLATFORM_FLAGS += \
diff --git a/tensorflow/lite/micro/tools/make/targets/cortex_a_generic_makefile.inc b/tensorflow/lite/micro/tools/make/targets/cortex_a_generic_makefile.inc
new file mode 100644
index 0000000..ad272cc
--- /dev/null
+++ b/tensorflow/lite/micro/tools/make/targets/cortex_a_generic_makefile.inc
@@ -0,0 +1,66 @@
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+# Generic Makefile target for ARM Cortex A builds.
+
+FPU := neon
+FLOAT_ABI := softfp
+
+GCC_TARGET_ARCH :=
+GCC_TARGET_CPU :=
+
+ifeq ($(TARGET_ARCH), armv8.2-a)
+  GCC_TARGET_ARCH := armv8.2-a
+
+else ifeq ($(TARGET_ARCH), armv7-a)
+  GCC_TARGET_ARCH := armv7-a
+
+else ifeq ($(TARGET_ARCH), cortex-a32)
+  GCC_TARGET_CPU := cortex-a32
+
+else
+  $(error "TARGET_ARCH=$(TARGET_ARCH) is not supported")
+endif
+
+TARGET_TOOLCHAIN_PREFIX := arm-none-eabi-
+
+ifneq ($(GCC_TARGET_ARCH),)
+  FLAGS_GCC += -march=$(GCC_TARGET_ARCH)
+else ifneq ($(GCC_TARGET_CPU),)
+  FLAGS_GCC += -mcpu=$(GCC_TARGET_CPU)
+endif
+
+CXXFLAGS += $(FLAGS_GCC)
+CCFLAGS += $(FLAGS_GCC)
+
+PLATFORM_FLAGS = \
+  -DTF_LITE_MCU_DEBUG_LOG \
+  -mfloat-abi=$(FLOAT_ABI) \
+  -mfpu=$(FPU) \
+  -funsigned-char \
+  -mlittle-endian \
+  -Wno-type-limits \
+  -Wno-unused-private-field \
+  -fomit-frame-pointer \
+  -MD
+
+ifneq ($(PIC),)
+  PLATFORM_FLAGS += -fpic
+endif
+
+# Common + C/C++ flags
+CXXFLAGS += $(PLATFORM_FLAGS)
+CCFLAGS += $(PLATFORM_FLAGS)
+
diff --git a/tensorflow/lite/micro/tools/make/targets/cortex_m_corstone_300_makefile.inc b/tensorflow/lite/micro/tools/make/targets/cortex_m_corstone_300_makefile.inc
index 0ffe5a3..0c483ac 100644
--- a/tensorflow/lite/micro/tools/make/targets/cortex_m_corstone_300_makefile.inc
+++ b/tensorflow/lite/micro/tools/make/targets/cortex_m_corstone_300_makefile.inc
@@ -1,4 +1,4 @@
-# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,23 +16,29 @@
 # ARM Cortex M makefile targeted for a FVP based on Arm Corstone-300 software.
 # For more info see: tensorflow/lite/micro/cortex_m_corstone_300/README.md
 
-export PATH := $(MAKEFILE_DIR)/downloads/corstone300/models/Linux64_GCC-6.4:$(PATH)
-DOWNLOAD_RESULT := $(shell $(MAKEFILE_DIR)/corstone_300_download.sh ${MAKEFILE_DIR}/downloads)
+UNAME_M := $(shell uname -m)
+ifeq ($(UNAME_M), aarch64)
+  export PATH := $(DOWNLOADS_DIR)/corstone300/models/Linux64_armv8l_GCC-9.3:$(PATH)
+else
+  export PATH := $(DOWNLOADS_DIR)/corstone300/models/Linux64_GCC-9.3:$(PATH)
+endif
+
+DOWNLOAD_RESULT := $(shell $(MAKEFILE_DIR)/corstone_300_download.sh $(DOWNLOADS_DIR) $(TENSORFLOW_ROOT))
 ifneq ($(DOWNLOAD_RESULT), SUCCESS)
   $(error Something went wrong with the Arm Corstone-300 software download: $(DOWNLOAD_RESULT))
 endif
 
-ETHOS_U_CORE_PLATFORM := ${PWD}/$(MAKEFILE_DIR)/downloads/ethos_u_core_platform/targets/corstone-300
-DOWNLOAD_RESULT := $(shell $(MAKEFILE_DIR)/ethos_u_core_platform_download.sh ${MAKEFILE_DIR}/downloads)
+ETHOS_U_CORE_PLATFORM := $(DOWNLOADS_DIR)/ethos_u_core_platform/targets/corstone-300
+DOWNLOAD_RESULT := $(shell $(MAKEFILE_DIR)/ethos_u_core_platform_download.sh $(DOWNLOADS_DIR) $(TENSORFLOW_ROOT))
 ifneq ($(DOWNLOAD_RESULT), SUCCESS)
   $(error Something went wrong with the Ethos-U Core Platform software download: $(DOWNLOAD_RESULT))
 endif
 
 # This target has dependencies to CMSIS-Device so just in case running without OPTIMIZED_KERNEL_DIR=cmsis_nn.
-CMSIS_DEFAULT_DOWNLOAD_PATH := $(MAKEFILE_DIR)/downloads/cmsis
+CMSIS_DEFAULT_DOWNLOAD_PATH := $(DOWNLOADS_DIR)/cmsis
 CMSIS_PATH := $(CMSIS_DEFAULT_DOWNLOAD_PATH)
 ifeq ($(CMSIS_PATH), $(CMSIS_DEFAULT_DOWNLOAD_PATH))
-  DOWNLOAD_RESULT := $(shell $(MAKEFILE_DIR)/ext_libs/cmsis_download.sh ${MAKEFILE_DIR}/downloads)
+  DOWNLOAD_RESULT := $(shell $(MAKEFILE_DIR)/ext_libs/cmsis_download.sh $(DOWNLOADS_DIR) $(TENSORFLOW_ROOT))
   ifneq ($(DOWNLOAD_RESULT), SUCCESS)
     $(error Something went wrong with the CMSIS download: $(DOWNLOAD_RESULT))
   endif
@@ -97,14 +103,13 @@
   FLAGS_ARMC = \
     --target=arm-arm-none-eabi \
     -Wno-unused-private-field \
-    -mcpu=$(MCPU_OPTION) \
-    -ffp-mode=full
+    -mcpu=$(MCPU_OPTION)
 
   # Pass comma separated linker options to armlink
   ARMC6_LDFLAGS += -Wl,--strict,--summary_stderr,--info,summarysizes,--map
   ARMC6_LDFLAGS += -Wl,--load_addr_map_info,--xref,--callgraph,--symbols
   ARMC6_LDFLAGS += -Wl,--info,sizes,--info,totals,--info,unused,--info,veneers
-  ARMC6_LDFLAGS += -Wl,--list=${TENSORFLOW_ROOT}gen/$(TARGET).map
+  ARMC6_LDFLAGS += -Wl,--list=gen/$(TARGET).map
   ARMC6_LDFLAGS += -Wl,--entry=Reset_Handler  --verbose
   ARMC6_LDFLAGS += -Wl,--scatter=$(ETHOS_U_CORE_PLATFORM)/platform.scatter
 
@@ -130,11 +135,16 @@
   # https://developer.arm.com/documentation/100891/0611/troubleshooting/general-troubleshooting-advice
   MICROLITE_LIBS := $(filter-out -lm,$(MICROLITE_LIBS))
 
+  # This does not build with armclang and is anyway not used by this target.
+  EXCLUDED_TESTS := \
+    tensorflow/lite/micro/tools/benchmarking/Makefile.inc
+  MICRO_LITE_BENCHMARKS := $(filter-out $(EXCLUDED_TESTS), $(MICRO_LITE_BENCHMARKS))
+
 else ifeq ($(TOOLCHAIN), gcc)
-  TARGET_DEFAULT_TOOLCHAIN_ROOT := $(MAKEFILE_DIR)/downloads/gcc_embedded/bin/
+  TARGET_DEFAULT_TOOLCHAIN_ROOT := $(DOWNLOADS_DIR)/gcc_embedded/bin/
   TARGET_TOOLCHAIN_ROOT := $(TARGET_DEFAULT_TOOLCHAIN_ROOT)
   ifeq ($(TARGET_TOOLCHAIN_ROOT), $(TARGET_DEFAULT_TOOLCHAIN_ROOT))
-    DOWNLOAD_RESULT := $(shell $(MAKEFILE_DIR)/arm_gcc_download.sh ${MAKEFILE_DIR}/downloads)
+    DOWNLOAD_RESULT := $(shell $(MAKEFILE_DIR)/arm_gcc_download.sh $(DOWNLOADS_DIR) $(TENSORFLOW_ROOT))
     ifneq ($(DOWNLOAD_RESULT), SUCCESS)
       $(error Something went wrong with the GCC download: $(DOWNLOAD_RESULT))
     endif
@@ -148,7 +158,7 @@
   LDFLAGS += \
     --specs=nosys.specs \
     -T $(ETHOS_U_CORE_PLATFORM)/platform_parsed.ld \
-    -Wl,-Map=${TENSORFLOW_ROOT}gen/$(TARGET).map,--cref \
+    -Wl,-Map=gen/$(TARGET).map,--cref \
     -Wl,--gc-sections \
     --entry Reset_Handler
 
@@ -186,7 +196,7 @@
   ETHOSU_ARCH=u55
 endif
 
-CMSIS_DEFAULT_DOWNLOAD_PATH := $(MAKEFILE_DIR)/downloads/cmsis
+CMSIS_DEFAULT_DOWNLOAD_PATH := $(DOWNLOADS_DIR)/cmsis
 CMSIS_PATH := $(CMSIS_DEFAULT_DOWNLOAD_PATH)
 THIRD_PARTY_CC_SRCS += \
   $(CMSIS_PATH)/Device/ARM/$(ARM_CPU)/Source/system_$(ARM_CPU).c \
@@ -197,8 +207,13 @@
 
 # TODO(#274): Examine why some tests fail here.
 EXCLUDED_TESTS := \
-  tensorflow/lite/micro/memory_arena_threshold_test.cc  \
-  tensorflow/lite/micro/recording_micro_allocator_test.cc
+  $(TENSORFLOW_ROOT)tensorflow/lite/micro/memory_arena_threshold_test.cc  \
+  $(TENSORFLOW_ROOT)tensorflow/lite/micro/recording_micro_allocator_test.cc
+ifeq ($(CO_PROCESSOR), ethos_u)
+# This does not work with Ethos-U enabled since then NPU PMU counters are used instead for the sake of the benchmark example.
+EXCLUDED_TESTS += \
+  $(TENSORFLOW_ROOT)tensorflow/lite/micro/micro_time_test.cc
+endif
 MICROLITE_TEST_SRCS := $(filter-out $(EXCLUDED_TESTS), $(MICROLITE_TEST_SRCS))
 
-TEST_SCRIPT := tensorflow/lite/micro/testing/test_with_arm_corstone_300.sh
+TEST_SCRIPT := $(TENSORFLOW_ROOT)tensorflow/lite/micro/testing/test_with_arm_corstone_300.sh
diff --git a/tensorflow/lite/micro/tools/make/targets/cortex_m_generic_makefile.inc b/tensorflow/lite/micro/tools/make/targets/cortex_m_generic_makefile.inc
index 0e034ad..5aa5d25 100644
--- a/tensorflow/lite/micro/tools/make/targets/cortex_m_generic_makefile.inc
+++ b/tensorflow/lite/micro/tools/make/targets/cortex_m_generic_makefile.inc
@@ -1,4 +1,4 @@
-# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,6 +16,16 @@
 # Generic Makefile target for ARM Cortex M builds.
 # For more info see: tensorflow/lite/micro/cortex_m_generic/README.md
 
+# Needed in case running without OPTIMIZED_KERNEL_DIR=cmsis_nn.
+CMSIS_DEFAULT_DOWNLOAD_PATH := $(DOWNLOADS_DIR)/cmsis
+CMSIS_PATH := $(CMSIS_DEFAULT_DOWNLOAD_PATH)
+ifeq ($(CMSIS_PATH), $(CMSIS_DEFAULT_DOWNLOAD_PATH))
+  DOWNLOAD_RESULT := $(shell $(MAKEFILE_DIR)/ext_libs/cmsis_download.sh $(DOWNLOADS_DIR) $(TENSORFLOW_ROOT))
+  ifneq ($(DOWNLOAD_RESULT), SUCCESS)
+    $(error Something went wrong with the CMSIS download: $(DOWNLOAD_RESULT))
+  endif
+endif
+
 FLOAT := soft
 GCC_TARGET_ARCH := $(TARGET_ARCH)
 
@@ -92,8 +102,6 @@
   CORE=M85
   ARM_LDFLAGS := -Wl,--cpu=8.1-M.Main.mve.fp
   FLOAT=hard
-  # GCC does not yet support cortex-m85 option hence go with cortex-m55 for now.
-  GCC_TARGET_ARCH := cortex-m55
 
 else ifeq ($(TARGET_ARCH), project_generation)
   # No flags needed here as project_generation does not build anything.
@@ -204,5 +212,3 @@
     $(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/Makefile.inc
   MICRO_LITE_EXAMPLE_TESTS := $(filter-out $(EXCLUDED_EXAMPLE_TESTS), $(MICRO_LITE_EXAMPLE_TESTS))
 endif
-
-include $(MAKEFILE_DIR)/ext_libs/eyalroz_printf.inc
diff --git a/tensorflow/lite/micro/tools/make/targets/hexagon_makefile.inc b/tensorflow/lite/micro/tools/make/targets/hexagon_makefile.inc
index 2e7d18c..6fe7be7 100644
--- a/tensorflow/lite/micro/tools/make/targets/hexagon_makefile.inc
+++ b/tensorflow/lite/micro/tools/make/targets/hexagon_makefile.inc
@@ -80,6 +80,10 @@
   -mcpu=$(HEXAGON_CPU_VER) \
   -m$(HEXAGON_CPU_VER)
 
+ifeq ($(HEXAGON_PIC_BUILD), true)
+  PLATFORM_ARGS += -fPIC
+endif
+
 # See http://b/183462077 for more details on why we need -G0 for an LPI build.
 ifeq ($(HEXAGON_LPI_BUILD), true)
   PLATFORM_ARGS += -G0
@@ -112,5 +116,10 @@
 
 MICROLITE_TEST_SRCS := $(filter-out $(EXCLUDED_TESTS), $(MICROLITE_TEST_SRCS))
 
+# TODO(b/302404477): micro_speech_test example does not pass due to misprediction
+EXCLUDED_EXAMPLE_TESTS := \
+  $(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/micro_speech/Makefile.inc
+MICRO_LITE_EXAMPLE_TESTS := $(filter-out $(EXCLUDED_EXAMPLE_TESTS), $(MICRO_LITE_EXAMPLE_TESTS))
+
 TEST_SCRIPT := $(TENSORFLOW_ROOT)tensorflow/lite/micro/testing/test_hexagon_binary.sh
 SIZE_SCRIPT := $(TENSORFLOW_ROOT)tensorflow/lite/micro/testing/size_hexagon_binary.sh
diff --git a/tensorflow/lite/micro/tools/make/targets/mips_makefile.inc b/tensorflow/lite/micro/tools/make/targets/mips_makefile.inc
new file mode 100644
index 0000000..cf0d38e
--- /dev/null
+++ b/tensorflow/lite/micro/tools/make/targets/mips_makefile.inc
@@ -0,0 +1,41 @@
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+TARGET_ARCH := mips
+TARGET_TOOLCHAIN_PREFIX := mips-elf-
+
+
+# Allow additional flags on the command line for debugging.
+MIPS_EXTRA_CFLAGS :=
+
+export PATH := $(TARGET_TOOLCHAIN_ROOT):$(PATH)
+
+PLATFORM_FLAGS = \
+  -fno-builtin-printf \
+  -DTF_LITE_MCU_DEBUG_LOG \
+  -DTF_LITE_USE_GLOBAL_CMATH_FUNCTIONS \
+  -fno-delete-null-pointer-checks \
+  -fomit-frame-pointer
+
+CXXFLAGS += $(PLATFORM_FLAGS) \
+  -fpermissive \
+  -fno-use-cxa-atexit \
+  -DTF_LITE_USE_GLOBAL_MIN \
+  -DTF_LITE_USE_GLOBAL_MAX
+
+CCFLAGS += $(PLATFORM_FLAGS)
+
+CCFLAGS += $(MIPS_EXTRA_CFLAGS)
+CXXFLAGS += $(MIPS_EXTRA_CFLAGS)
diff --git a/tensorflow/lite/micro/tools/make/targets/riscv32_generic_makefile.inc b/tensorflow/lite/micro/tools/make/targets/riscv32_generic_makefile.inc
index ce5f0eb..453e9d0 100644
--- a/tensorflow/lite/micro/tools/make/targets/riscv32_generic_makefile.inc
+++ b/tensorflow/lite/micro/tools/make/targets/riscv32_generic_makefile.inc
@@ -2,6 +2,13 @@
 TARGET_ARCH := riscv32
 TARGET_TOOLCHAIN_PREFIX := riscv64-unknown-elf-
 
+RISCV_ARCH := rv32imc
+RISCV_ABI := ilp32
+RISCV_CODE_MODEL := medany
+
+# Allow additional flags on the command line for debugging.
+RISCV_EXTRA_CFLAGS :=
+
 TARGET_DEFAULT_TOOLCHAIN_ROOT := $(DOWNLOADS_DIR)/riscv_toolchain/bin/
 TARGET_TOOLCHAIN_ROOT := $(TARGET_DEFAULT_TOOLCHAIN_ROOT)
 ifeq ($(TARGET_TOOLCHAIN_ROOT), $(TARGET_DEFAULT_TOOLCHAIN_ROOT))
@@ -11,9 +18,9 @@
 export PATH := $(TARGET_TOOLCHAIN_ROOT):$(PATH)
 
 PLATFORM_FLAGS = \
-  -march=rv32imac \
-  -mabi=ilp32 \
-  -mcmodel=medany \
+  -march=$(RISCV_ARCH) \
+  -mabi=$(RISCV_ABI) \
+  -mcmodel=$(RISCV_CODE_MODEL) \
   -mexplicit-relocs \
   -fno-builtin-printf \
   -DTF_LITE_MCU_DEBUG_LOG \
@@ -40,9 +47,13 @@
 
 MICROLITE_TEST_SRCS := $(filter-out $(EXCLUDED_TESTS), $(MICROLITE_TEST_SRCS))
 
+CCFLAGS += $(RISCV_EXTRA_CFLAGS)
+CXXFLAGS += $(RISCV_EXTRA_CFLAGS)
+
 # This disables the "linker relaxation" optimization, which produced incorrect code.
 # TODO(b/279805615): Check whether this is fixed in newer versions of the toolchain.
 LDFLAGS += -mno-relax
 TEST_SCRIPT := $(TENSORFLOW_ROOT)tensorflow/lite/micro/testing/test_with_qemu.sh riscv32 rv32
 SIZE_SCRIPT := ${TENSORFLOW_ROOT}tensorflow/lite/micro/testing/size_riscv32_binary.sh
 
+include $(MAKEFILE_DIR)/ext_libs/eyalroz_printf.inc
diff --git a/tensorflow/lite/micro/tools/make/targets/xtensa_makefile.inc b/tensorflow/lite/micro/tools/make/targets/xtensa_makefile.inc
index 8d970c7..92527ad 100644
--- a/tensorflow/lite/micro/tools/make/targets/xtensa_makefile.inc
+++ b/tensorflow/lite/micro/tools/make/targets/xtensa_makefile.inc
@@ -35,12 +35,11 @@
 TARGET_ARCH_DEFINES := -D$(shell echo $(TARGET_ARCH) | tr [a-z] [A-Z])
 
 PLATFORM_FLAGS = \
+  -stdlib=libc++ \
   -DTF_LITE_MCU_DEBUG_LOG \
   -DTF_LITE_USE_CTIME \
   --xtensa-core=$(XTENSA_CORE) \
   -mcoproc \
-  -DMAX_RFFT_PWR=9 \
-  -DMIN_RFFT_PWR=MAX_RFFT_PWR \
   $(TARGET_ARCH_DEFINES) \
   -mlongcalls
 
diff --git a/tensorflow/lite/micro/tools/make/third_party_downloads.inc b/tensorflow/lite/micro/tools/make/third_party_downloads.inc
index a8e63e1..902206d 100644
--- a/tensorflow/lite/micro/tools/make/third_party_downloads.inc
+++ b/tensorflow/lite/micro/tools/make/third_party_downloads.inc
@@ -39,5 +39,4 @@
 EMBARC_MLI_PRE_COMPILED_URL := "https://github.com/foss-for-synopsys-dwc-arc-processors/embarc_mli/releases/download/Release_1.1/embARC_MLI_package.zip"
 EMBARC_MLI_PRE_COMPILED_MD5 := "173990c2dde4efef6a2c95b92d1f0244"
 
-ETHOSU_URL := "https://git.mlplatform.org/ml/ethos-u/ethos-u-core-driver.git/snapshot/ethos-u-core-driver-24455eedb9e8939f8a28ca0101a6f2d171e1b2f9.tar.gz"
-ETHOSU_MD5 := "14b5712525d4af612d35217f0bc53fcc"
+# Skip md5sum-check since ethos-u-core-driver download link is non-deterministic, see  https://github.com/google/gitiles/issues/84
diff --git a/tensorflow/lite/micro/tools/model_transforms_utils.py b/tensorflow/lite/micro/tools/model_transforms_utils.py
index c713f01..4a59d88 100644
--- a/tensorflow/lite/micro/tools/model_transforms_utils.py
+++ b/tensorflow/lite/micro/tools/model_transforms_utils.py
@@ -264,7 +264,7 @@
       schema_fb.TensorType.INT32: np.int32,
       schema_fb.TensorType.UINT8: np.uint8,
       schema_fb.TensorType.INT64: np.int64,
-      schema_fb.TensorType.STRING: np.string_,
+      schema_fb.TensorType.STRING: np.bytes_,
       schema_fb.TensorType.BOOL: np.bool_,
       schema_fb.TensorType.INT16: np.int16,
       schema_fb.TensorType.COMPLEX64: np.complex64,
diff --git a/tensorflow/lite/micro/tools/project_generation/Makefile b/tensorflow/lite/micro/tools/project_generation/Makefile
index 7497f07..f73fbdb 100644
--- a/tensorflow/lite/micro/tools/project_generation/Makefile
+++ b/tensorflow/lite/micro/tools/project_generation/Makefile
@@ -72,14 +72,14 @@
 endif
 
 CXXFLAGS := \
-  -std=c++11 \
+  -std=c++17 \
   -fno-rtti \
   -fno-exceptions \
   -fno-threadsafe-statics \
   $(COMMON_FLAGS)
 
 CCFLAGS := \
-  -std=c11 \
+  -std=c17 \
   $(COMMON_FLAGS)
 
 ARFLAGS := -r
@@ -90,6 +90,7 @@
 LIB := $(GENDIR)/libtflm.a
 
 TFLM_CC_SRCS := $(shell find $(TENSORFLOW_ROOT)tensorflow -name "*.cc" -o -name "*.c")
+TFLM_CC_SRCS += $(shell find $(TENSORFLOW_ROOT)signal -name "*.cc" -o -name "*.c")
 OBJS := $(addprefix $(OBJDIR)/, $(patsubst %.c,%.o,$(patsubst %.cc,%.o,$(TFLM_CC_SRCS))))
 
 # if the third party printf library is present, add the include paths
diff --git a/tensorflow/lite/micro/tools/requantize_flatbuffer_test.py b/tensorflow/lite/micro/tools/requantize_flatbuffer_test.py
index 4d80991..885af89 100644
--- a/tensorflow/lite/micro/tools/requantize_flatbuffer_test.py
+++ b/tensorflow/lite/micro/tools/requantize_flatbuffer_test.py
@@ -24,7 +24,7 @@
 from tflite_micro.tensorflow.lite.tools import flatbuffer_utils
 
 
-#TODO(b/248061370): replace the keras model creation process with flatbuffer manipulation to speed up test
+# TODO(b/248061370): replace the keras model creation process with flatbuffer manipulation to speed up test
 def create_simple_fc_model():
   '''Create a simple model with two fully connected(fc) layers'''
   model = tf.keras.models.Sequential([
@@ -60,6 +60,9 @@
         EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
     ]
   converter.representative_dataset = representative_dataset_gen
+  # TODO(b/324385802): Support per-channel quantization for FullyConnected.
+  converter._experimental_disable_per_channel_quantization_for_dense_layers = True
+  converter._experimental_disable_per_channel = True
   return converter.convert()
 
 
diff --git a/tensorflow/lite/micro/tools/requantize_flatbuffer_utils.py b/tensorflow/lite/micro/tools/requantize_flatbuffer_utils.py
index 5709ff2..c4a3b10 100644
--- a/tensorflow/lite/micro/tools/requantize_flatbuffer_utils.py
+++ b/tensorflow/lite/micro/tools/requantize_flatbuffer_utils.py
@@ -24,7 +24,7 @@
     TensorType.INT32: np.int32,
     TensorType.UINT8: np.uint8,
     TensorType.INT64: np.int64,
-    TensorType.STRING: np.string_,
+    TensorType.STRING: np.bytes_,
     TensorType.BOOL: np.bool_,
     TensorType.INT16: np.int16,
     TensorType.COMPLEX64: np.complex64,
diff --git a/tensorflow/lite/micro/tools/tflm_model_transforms_lib.py b/tensorflow/lite/micro/tools/tflm_model_transforms_lib.py
index 60530ff..2bf90cf 100644
--- a/tensorflow/lite/micro/tools/tflm_model_transforms_lib.py
+++ b/tensorflow/lite/micro/tools/tflm_model_transforms_lib.py
@@ -29,7 +29,7 @@
 
 from tflite_micro.tensorflow.lite.tools import flatbuffer_utils
 from tflite_micro.tensorflow.lite.micro.tools import model_transforms_utils
-from tflite_micro.tensorflow.lite.micro.python.interpreter.src import runtime
+from tflite_micro.python.tflite_micro import runtime
 
 
 def _save_and_align_flatbuffer(model, model_path):
diff --git a/tensorflow/lite/portable_type_to_tflitetype.h b/tensorflow/lite/portable_type_to_tflitetype.h
index b600585..03357db 100644
--- a/tensorflow/lite/portable_type_to_tflitetype.h
+++ b/tensorflow/lite/portable_type_to_tflitetype.h
@@ -52,6 +52,10 @@
     return TFLITE_TYPE_ENUM;                                   \
   }                                                            \
   template <>                                                  \
+  constexpr TfLiteType typeToTfLiteType<const CPP_TYPE>() {    \
+    return TFLITE_TYPE_ENUM;                                   \
+  }                                                            \
+  template <>                                                  \
   struct TfLiteTypeToType<TFLITE_TYPE_ENUM> {                  \
     using Type = CPP_TYPE;                                     \
   }
diff --git a/tensorflow/lite/python/BUILD b/tensorflow/lite/python/BUILD
index 3dc7232..7a7ce3c 100644
--- a/tensorflow/lite/python/BUILD
+++ b/tensorflow/lite/python/BUILD
@@ -10,7 +10,7 @@
 
 flatbuffer_py_library(
     name = "schema_py",
-    srcs = ["//tensorflow/lite/schema:schema.fbs"],
+    srcs = ["//tensorflow/compiler/mlir/lite/schema:schema.fbs"],
 )
 
 py_library(
@@ -20,6 +20,6 @@
     visibility = ["//:__subpackages__"],
     deps = [
         requirement("flatbuffers"),
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
     ],
 )
diff --git a/tensorflow/lite/python/schema_py_generated.py b/tensorflow/lite/python/schema_py_generated.py
index 914340e..52999cb 100755
--- a/tensorflow/lite/python/schema_py_generated.py
+++ b/tensorflow/lite/python/schema_py_generated.py
@@ -7,1386 +7,64 @@
 from flatbuffers.compat import import_numpy
 np = import_numpy()
 
-class ATan2Options(object):
-    __slots__ = ['_tab']
+class TensorType(object):
+    FLOAT32 = 0
+    FLOAT16 = 1
+    INT32 = 2
+    UINT8 = 3
+    INT64 = 4
+    STRING = 5
+    BOOL = 6
+    INT16 = 7
+    COMPLEX64 = 8
+    INT8 = 9
+    FLOAT64 = 10
+    COMPLEX128 = 11
+    UINT64 = 12
+    RESOURCE = 13
+    VARIANT = 14
+    UINT32 = 15
+    UINT16 = 16
+    INT4 = 17
+    BFLOAT16 = 18
 
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = ATan2Options()
-        x.Init(buf, n + offset)
-        return x
 
-    @classmethod
-    def GetRootAsATan2Options(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def ATan2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # ATan2Options
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def ATan2OptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return ATan2OptionsStart(builder)
-def ATan2OptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return ATan2OptionsEnd(builder)
-
-class ATan2OptionsT(object):
-
-    # ATan2OptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        atan2options = ATan2Options()
-        atan2options.Init(buf, pos)
-        return cls.InitFromObj(atan2options)
-
-    @classmethod
-    def InitFromObj(cls, atan2options):
-        x = ATan2OptionsT()
-        x._UnPack(atan2options)
-        return x
-
-    # ATan2OptionsT
-    def _UnPack(self, atan2options):
-        if atan2options is None:
-            return
-
-    # ATan2OptionsT
-    def Pack(self, builder):
-        ATan2OptionsStart(builder)
-        atan2options = ATan2OptionsEnd(builder)
-        return atan2options
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class AbsOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = AbsOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsAbsOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def AbsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # AbsOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def AbsOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return AbsOptionsStart(builder)
-def AbsOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return AbsOptionsEnd(builder)
-
-class AbsOptionsT(object):
-
-    # AbsOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        absOptions = AbsOptions()
-        absOptions.Init(buf, pos)
-        return cls.InitFromObj(absOptions)
-
-    @classmethod
-    def InitFromObj(cls, absOptions):
-        x = AbsOptionsT()
-        x._UnPack(absOptions)
-        return x
-
-    # AbsOptionsT
-    def _UnPack(self, absOptions):
-        if absOptions is None:
-            return
-
-    # AbsOptionsT
-    def Pack(self, builder):
-        AbsOptionsStart(builder)
-        absOptions = AbsOptionsEnd(builder)
-        return absOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-class ActivationFunctionType(object):
+class QuantizationDetails(object):
     NONE = 0
-    RELU = 1
-    RELU_N1_TO_1 = 2
-    RELU6 = 3
-    TANH = 4
-    SIGN_BIT = 5
-# automatically generated by the FlatBuffers compiler, do not modify
+    CustomQuantization = 1
 
-# namespace: tflite
+def QuantizationDetailsCreator(unionType, table):
+    from flatbuffers.table import Table
+    if not isinstance(table, Table):
+        return None
+    if unionType == QuantizationDetails().CustomQuantization:
+        return CustomQuantizationT.InitFromBuf(table.Bytes, table.Pos)
+    return None
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
 
-class AddNOptions(object):
-    __slots__ = ['_tab']
+class DimensionType(object):
+    DENSE = 0
+    SPARSE_CSR = 1
 
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = AddNOptions()
-        x.Init(buf, n + offset)
-        return x
 
-    @classmethod
-    def GetRootAsAddNOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def AddNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+class SparseIndexVector(object):
+    NONE = 0
+    Int32Vector = 1
+    Uint16Vector = 2
+    Uint8Vector = 3
 
-    # AddNOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def AddNOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return AddNOptionsStart(builder)
-def AddNOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return AddNOptionsEnd(builder)
-
-class AddNOptionsT(object):
-
-    # AddNOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        addNoptions = AddNOptions()
-        addNoptions.Init(buf, pos)
-        return cls.InitFromObj(addNoptions)
-
-    @classmethod
-    def InitFromObj(cls, addNoptions):
-        x = AddNOptionsT()
-        x._UnPack(addNoptions)
-        return x
-
-    # AddNOptionsT
-    def _UnPack(self, addNoptions):
-        if addNoptions is None:
-            return
-
-    # AddNOptionsT
-    def Pack(self, builder):
-        AddNOptionsStart(builder)
-        addNoptions = AddNOptionsEnd(builder)
-        return addNoptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class AddOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = AddOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsAddOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def AddOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # AddOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # AddOptions
-    def FusedActivationFunction(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-    # AddOptions
-    def PotScaleInt16(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return True
-
-def AddOptionsStart(builder): builder.StartObject(2)
-def Start(builder):
-    return AddOptionsStart(builder)
-def AddOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0)
-def AddFusedActivationFunction(builder, fusedActivationFunction):
-    return AddOptionsAddFusedActivationFunction(builder, fusedActivationFunction)
-def AddOptionsAddPotScaleInt16(builder, potScaleInt16): builder.PrependBoolSlot(1, potScaleInt16, 1)
-def AddPotScaleInt16(builder, potScaleInt16):
-    return AddOptionsAddPotScaleInt16(builder, potScaleInt16)
-def AddOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return AddOptionsEnd(builder)
-
-class AddOptionsT(object):
-
-    # AddOptionsT
-    def __init__(self):
-        self.fusedActivationFunction = 0  # type: int
-        self.potScaleInt16 = True  # type: bool
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        addOptions = AddOptions()
-        addOptions.Init(buf, pos)
-        return cls.InitFromObj(addOptions)
-
-    @classmethod
-    def InitFromObj(cls, addOptions):
-        x = AddOptionsT()
-        x._UnPack(addOptions)
-        return x
-
-    # AddOptionsT
-    def _UnPack(self, addOptions):
-        if addOptions is None:
-            return
-        self.fusedActivationFunction = addOptions.FusedActivationFunction()
-        self.potScaleInt16 = addOptions.PotScaleInt16()
-
-    # AddOptionsT
-    def Pack(self, builder):
-        AddOptionsStart(builder)
-        AddOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
-        AddOptionsAddPotScaleInt16(builder, self.potScaleInt16)
-        addOptions = AddOptionsEnd(builder)
-        return addOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class ArgMaxOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = ArgMaxOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsArgMaxOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def ArgMaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # ArgMaxOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # ArgMaxOptions
-    def OutputType(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-def ArgMaxOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return ArgMaxOptionsStart(builder)
-def ArgMaxOptionsAddOutputType(builder, outputType): builder.PrependInt8Slot(0, outputType, 0)
-def AddOutputType(builder, outputType):
-    return ArgMaxOptionsAddOutputType(builder, outputType)
-def ArgMaxOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return ArgMaxOptionsEnd(builder)
-
-class ArgMaxOptionsT(object):
-
-    # ArgMaxOptionsT
-    def __init__(self):
-        self.outputType = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        argMaxOptions = ArgMaxOptions()
-        argMaxOptions.Init(buf, pos)
-        return cls.InitFromObj(argMaxOptions)
-
-    @classmethod
-    def InitFromObj(cls, argMaxOptions):
-        x = ArgMaxOptionsT()
-        x._UnPack(argMaxOptions)
-        return x
-
-    # ArgMaxOptionsT
-    def _UnPack(self, argMaxOptions):
-        if argMaxOptions is None:
-            return
-        self.outputType = argMaxOptions.OutputType()
-
-    # ArgMaxOptionsT
-    def Pack(self, builder):
-        ArgMaxOptionsStart(builder)
-        ArgMaxOptionsAddOutputType(builder, self.outputType)
-        argMaxOptions = ArgMaxOptionsEnd(builder)
-        return argMaxOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class ArgMinOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = ArgMinOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsArgMinOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def ArgMinOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # ArgMinOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # ArgMinOptions
-    def OutputType(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-def ArgMinOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return ArgMinOptionsStart(builder)
-def ArgMinOptionsAddOutputType(builder, outputType): builder.PrependInt8Slot(0, outputType, 0)
-def AddOutputType(builder, outputType):
-    return ArgMinOptionsAddOutputType(builder, outputType)
-def ArgMinOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return ArgMinOptionsEnd(builder)
-
-class ArgMinOptionsT(object):
-
-    # ArgMinOptionsT
-    def __init__(self):
-        self.outputType = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        argMinOptions = ArgMinOptions()
-        argMinOptions.Init(buf, pos)
-        return cls.InitFromObj(argMinOptions)
-
-    @classmethod
-    def InitFromObj(cls, argMinOptions):
-        x = ArgMinOptionsT()
-        x._UnPack(argMinOptions)
-        return x
-
-    # ArgMinOptionsT
-    def _UnPack(self, argMinOptions):
-        if argMinOptions is None:
-            return
-        self.outputType = argMinOptions.OutputType()
-
-    # ArgMinOptionsT
-    def Pack(self, builder):
-        ArgMinOptionsStart(builder)
-        ArgMinOptionsAddOutputType(builder, self.outputType)
-        argMinOptions = ArgMinOptionsEnd(builder)
-        return argMinOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class AssignVariableOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = AssignVariableOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsAssignVariableOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def AssignVariableOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # AssignVariableOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def AssignVariableOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return AssignVariableOptionsStart(builder)
-def AssignVariableOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return AssignVariableOptionsEnd(builder)
-
-class AssignVariableOptionsT(object):
-
-    # AssignVariableOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        assignVariableOptions = AssignVariableOptions()
-        assignVariableOptions.Init(buf, pos)
-        return cls.InitFromObj(assignVariableOptions)
-
-    @classmethod
-    def InitFromObj(cls, assignVariableOptions):
-        x = AssignVariableOptionsT()
-        x._UnPack(assignVariableOptions)
-        return x
-
-    # AssignVariableOptionsT
-    def _UnPack(self, assignVariableOptions):
-        if assignVariableOptions is None:
-            return
-
-    # AssignVariableOptionsT
-    def Pack(self, builder):
-        AssignVariableOptionsStart(builder)
-        assignVariableOptions = AssignVariableOptionsEnd(builder)
-        return assignVariableOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class BatchMatMulOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = BatchMatMulOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsBatchMatMulOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def BatchMatMulOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # BatchMatMulOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # BatchMatMulOptions
-    def AdjX(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-    # BatchMatMulOptions
-    def AdjY(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-    # BatchMatMulOptions
-    def AsymmetricQuantizeInputs(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-def BatchMatMulOptionsStart(builder): builder.StartObject(3)
-def Start(builder):
-    return BatchMatMulOptionsStart(builder)
-def BatchMatMulOptionsAddAdjX(builder, adjX): builder.PrependBoolSlot(0, adjX, 0)
-def AddAdjX(builder, adjX):
-    return BatchMatMulOptionsAddAdjX(builder, adjX)
-def BatchMatMulOptionsAddAdjY(builder, adjY): builder.PrependBoolSlot(1, adjY, 0)
-def AddAdjY(builder, adjY):
-    return BatchMatMulOptionsAddAdjY(builder, adjY)
-def BatchMatMulOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(2, asymmetricQuantizeInputs, 0)
-def AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs):
-    return BatchMatMulOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs)
-def BatchMatMulOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return BatchMatMulOptionsEnd(builder)
-
-class BatchMatMulOptionsT(object):
-
-    # BatchMatMulOptionsT
-    def __init__(self):
-        self.adjX = False  # type: bool
-        self.adjY = False  # type: bool
-        self.asymmetricQuantizeInputs = False  # type: bool
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        batchMatMulOptions = BatchMatMulOptions()
-        batchMatMulOptions.Init(buf, pos)
-        return cls.InitFromObj(batchMatMulOptions)
-
-    @classmethod
-    def InitFromObj(cls, batchMatMulOptions):
-        x = BatchMatMulOptionsT()
-        x._UnPack(batchMatMulOptions)
-        return x
-
-    # BatchMatMulOptionsT
-    def _UnPack(self, batchMatMulOptions):
-        if batchMatMulOptions is None:
-            return
-        self.adjX = batchMatMulOptions.AdjX()
-        self.adjY = batchMatMulOptions.AdjY()
-        self.asymmetricQuantizeInputs = batchMatMulOptions.AsymmetricQuantizeInputs()
-
-    # BatchMatMulOptionsT
-    def Pack(self, builder):
-        BatchMatMulOptionsStart(builder)
-        BatchMatMulOptionsAddAdjX(builder, self.adjX)
-        BatchMatMulOptionsAddAdjY(builder, self.adjY)
-        BatchMatMulOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs)
-        batchMatMulOptions = BatchMatMulOptionsEnd(builder)
-        return batchMatMulOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class BatchToSpaceNDOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = BatchToSpaceNDOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsBatchToSpaceNDOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def BatchToSpaceNDOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # BatchToSpaceNDOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def BatchToSpaceNDOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return BatchToSpaceNDOptionsStart(builder)
-def BatchToSpaceNDOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return BatchToSpaceNDOptionsEnd(builder)
-
-class BatchToSpaceNDOptionsT(object):
-
-    # BatchToSpaceNDOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        batchToSpaceNdoptions = BatchToSpaceNDOptions()
-        batchToSpaceNdoptions.Init(buf, pos)
-        return cls.InitFromObj(batchToSpaceNdoptions)
-
-    @classmethod
-    def InitFromObj(cls, batchToSpaceNdoptions):
-        x = BatchToSpaceNDOptionsT()
-        x._UnPack(batchToSpaceNdoptions)
-        return x
-
-    # BatchToSpaceNDOptionsT
-    def _UnPack(self, batchToSpaceNdoptions):
-        if batchToSpaceNdoptions is None:
-            return
-
-    # BatchToSpaceNDOptionsT
-    def Pack(self, builder):
-        BatchToSpaceNDOptionsStart(builder)
-        batchToSpaceNdoptions = BatchToSpaceNDOptionsEnd(builder)
-        return batchToSpaceNdoptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class BidirectionalSequenceLSTMOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = BidirectionalSequenceLSTMOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsBidirectionalSequenceLSTMOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def BidirectionalSequenceLSTMOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # BidirectionalSequenceLSTMOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # BidirectionalSequenceLSTMOptions
-    def FusedActivationFunction(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-    # BidirectionalSequenceLSTMOptions
-    def CellClip(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
-        return 0.0
-
-    # BidirectionalSequenceLSTMOptions
-    def ProjClip(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
-        return 0.0
-
-    # BidirectionalSequenceLSTMOptions
-    def MergeOutputs(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-    # BidirectionalSequenceLSTMOptions
-    def TimeMajor(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return True
-
-    # BidirectionalSequenceLSTMOptions
-    def AsymmetricQuantizeInputs(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-def BidirectionalSequenceLSTMOptionsStart(builder): builder.StartObject(6)
-def Start(builder):
-    return BidirectionalSequenceLSTMOptionsStart(builder)
-def BidirectionalSequenceLSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0)
-def AddFusedActivationFunction(builder, fusedActivationFunction):
-    return BidirectionalSequenceLSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction)
-def BidirectionalSequenceLSTMOptionsAddCellClip(builder, cellClip): builder.PrependFloat32Slot(1, cellClip, 0.0)
-def AddCellClip(builder, cellClip):
-    return BidirectionalSequenceLSTMOptionsAddCellClip(builder, cellClip)
-def BidirectionalSequenceLSTMOptionsAddProjClip(builder, projClip): builder.PrependFloat32Slot(2, projClip, 0.0)
-def AddProjClip(builder, projClip):
-    return BidirectionalSequenceLSTMOptionsAddProjClip(builder, projClip)
-def BidirectionalSequenceLSTMOptionsAddMergeOutputs(builder, mergeOutputs): builder.PrependBoolSlot(3, mergeOutputs, 0)
-def AddMergeOutputs(builder, mergeOutputs):
-    return BidirectionalSequenceLSTMOptionsAddMergeOutputs(builder, mergeOutputs)
-def BidirectionalSequenceLSTMOptionsAddTimeMajor(builder, timeMajor): builder.PrependBoolSlot(4, timeMajor, 1)
-def AddTimeMajor(builder, timeMajor):
-    return BidirectionalSequenceLSTMOptionsAddTimeMajor(builder, timeMajor)
-def BidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(5, asymmetricQuantizeInputs, 0)
-def AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs):
-    return BidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs)
-def BidirectionalSequenceLSTMOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return BidirectionalSequenceLSTMOptionsEnd(builder)
-
-class BidirectionalSequenceLSTMOptionsT(object):
-
-    # BidirectionalSequenceLSTMOptionsT
-    def __init__(self):
-        self.fusedActivationFunction = 0  # type: int
-        self.cellClip = 0.0  # type: float
-        self.projClip = 0.0  # type: float
-        self.mergeOutputs = False  # type: bool
-        self.timeMajor = True  # type: bool
-        self.asymmetricQuantizeInputs = False  # type: bool
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        bidirectionalSequenceLstmoptions = BidirectionalSequenceLSTMOptions()
-        bidirectionalSequenceLstmoptions.Init(buf, pos)
-        return cls.InitFromObj(bidirectionalSequenceLstmoptions)
-
-    @classmethod
-    def InitFromObj(cls, bidirectionalSequenceLstmoptions):
-        x = BidirectionalSequenceLSTMOptionsT()
-        x._UnPack(bidirectionalSequenceLstmoptions)
-        return x
-
-    # BidirectionalSequenceLSTMOptionsT
-    def _UnPack(self, bidirectionalSequenceLstmoptions):
-        if bidirectionalSequenceLstmoptions is None:
-            return
-        self.fusedActivationFunction = bidirectionalSequenceLstmoptions.FusedActivationFunction()
-        self.cellClip = bidirectionalSequenceLstmoptions.CellClip()
-        self.projClip = bidirectionalSequenceLstmoptions.ProjClip()
-        self.mergeOutputs = bidirectionalSequenceLstmoptions.MergeOutputs()
-        self.timeMajor = bidirectionalSequenceLstmoptions.TimeMajor()
-        self.asymmetricQuantizeInputs = bidirectionalSequenceLstmoptions.AsymmetricQuantizeInputs()
-
-    # BidirectionalSequenceLSTMOptionsT
-    def Pack(self, builder):
-        BidirectionalSequenceLSTMOptionsStart(builder)
-        BidirectionalSequenceLSTMOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
-        BidirectionalSequenceLSTMOptionsAddCellClip(builder, self.cellClip)
-        BidirectionalSequenceLSTMOptionsAddProjClip(builder, self.projClip)
-        BidirectionalSequenceLSTMOptionsAddMergeOutputs(builder, self.mergeOutputs)
-        BidirectionalSequenceLSTMOptionsAddTimeMajor(builder, self.timeMajor)
-        BidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs)
-        bidirectionalSequenceLstmoptions = BidirectionalSequenceLSTMOptionsEnd(builder)
-        return bidirectionalSequenceLstmoptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class BidirectionalSequenceRNNOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = BidirectionalSequenceRNNOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsBidirectionalSequenceRNNOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def BidirectionalSequenceRNNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # BidirectionalSequenceRNNOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # BidirectionalSequenceRNNOptions
-    def TimeMajor(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-    # BidirectionalSequenceRNNOptions
-    def FusedActivationFunction(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-    # BidirectionalSequenceRNNOptions
-    def MergeOutputs(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-    # BidirectionalSequenceRNNOptions
-    def AsymmetricQuantizeInputs(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-def BidirectionalSequenceRNNOptionsStart(builder): builder.StartObject(4)
-def Start(builder):
-    return BidirectionalSequenceRNNOptionsStart(builder)
-def BidirectionalSequenceRNNOptionsAddTimeMajor(builder, timeMajor): builder.PrependBoolSlot(0, timeMajor, 0)
-def AddTimeMajor(builder, timeMajor):
-    return BidirectionalSequenceRNNOptionsAddTimeMajor(builder, timeMajor)
-def BidirectionalSequenceRNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(1, fusedActivationFunction, 0)
-def AddFusedActivationFunction(builder, fusedActivationFunction):
-    return BidirectionalSequenceRNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction)
-def BidirectionalSequenceRNNOptionsAddMergeOutputs(builder, mergeOutputs): builder.PrependBoolSlot(2, mergeOutputs, 0)
-def AddMergeOutputs(builder, mergeOutputs):
-    return BidirectionalSequenceRNNOptionsAddMergeOutputs(builder, mergeOutputs)
-def BidirectionalSequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(3, asymmetricQuantizeInputs, 0)
-def AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs):
-    return BidirectionalSequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs)
-def BidirectionalSequenceRNNOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return BidirectionalSequenceRNNOptionsEnd(builder)
-
-class BidirectionalSequenceRNNOptionsT(object):
-
-    # BidirectionalSequenceRNNOptionsT
-    def __init__(self):
-        self.timeMajor = False  # type: bool
-        self.fusedActivationFunction = 0  # type: int
-        self.mergeOutputs = False  # type: bool
-        self.asymmetricQuantizeInputs = False  # type: bool
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        bidirectionalSequenceRnnoptions = BidirectionalSequenceRNNOptions()
-        bidirectionalSequenceRnnoptions.Init(buf, pos)
-        return cls.InitFromObj(bidirectionalSequenceRnnoptions)
-
-    @classmethod
-    def InitFromObj(cls, bidirectionalSequenceRnnoptions):
-        x = BidirectionalSequenceRNNOptionsT()
-        x._UnPack(bidirectionalSequenceRnnoptions)
-        return x
-
-    # BidirectionalSequenceRNNOptionsT
-    def _UnPack(self, bidirectionalSequenceRnnoptions):
-        if bidirectionalSequenceRnnoptions is None:
-            return
-        self.timeMajor = bidirectionalSequenceRnnoptions.TimeMajor()
-        self.fusedActivationFunction = bidirectionalSequenceRnnoptions.FusedActivationFunction()
-        self.mergeOutputs = bidirectionalSequenceRnnoptions.MergeOutputs()
-        self.asymmetricQuantizeInputs = bidirectionalSequenceRnnoptions.AsymmetricQuantizeInputs()
-
-    # BidirectionalSequenceRNNOptionsT
-    def Pack(self, builder):
-        BidirectionalSequenceRNNOptionsStart(builder)
-        BidirectionalSequenceRNNOptionsAddTimeMajor(builder, self.timeMajor)
-        BidirectionalSequenceRNNOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
-        BidirectionalSequenceRNNOptionsAddMergeOutputs(builder, self.mergeOutputs)
-        BidirectionalSequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs)
-        bidirectionalSequenceRnnoptions = BidirectionalSequenceRNNOptionsEnd(builder)
-        return bidirectionalSequenceRnnoptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class BitcastOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = BitcastOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsBitcastOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def BitcastOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # BitcastOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def BitcastOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return BitcastOptionsStart(builder)
-def BitcastOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return BitcastOptionsEnd(builder)
-
-class BitcastOptionsT(object):
-
-    # BitcastOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        bitcastOptions = BitcastOptions()
-        bitcastOptions.Init(buf, pos)
-        return cls.InitFromObj(bitcastOptions)
-
-    @classmethod
-    def InitFromObj(cls, bitcastOptions):
-        x = BitcastOptionsT()
-        x._UnPack(bitcastOptions)
-        return x
-
-    # BitcastOptionsT
-    def _UnPack(self, bitcastOptions):
-        if bitcastOptions is None:
-            return
-
-    # BitcastOptionsT
-    def Pack(self, builder):
-        BitcastOptionsStart(builder)
-        bitcastOptions = BitcastOptionsEnd(builder)
-        return bitcastOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class BitwiseXorOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = BitwiseXorOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsBitwiseXorOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def BitwiseXorOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # BitwiseXorOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def BitwiseXorOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return BitwiseXorOptionsStart(builder)
-def BitwiseXorOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return BitwiseXorOptionsEnd(builder)
-
-class BitwiseXorOptionsT(object):
-
-    # BitwiseXorOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        bitwiseXorOptions = BitwiseXorOptions()
-        bitwiseXorOptions.Init(buf, pos)
-        return cls.InitFromObj(bitwiseXorOptions)
-
-    @classmethod
-    def InitFromObj(cls, bitwiseXorOptions):
-        x = BitwiseXorOptionsT()
-        x._UnPack(bitwiseXorOptions)
-        return x
-
-    # BitwiseXorOptionsT
-    def _UnPack(self, bitwiseXorOptions):
-        if bitwiseXorOptions is None:
-            return
-
-    # BitwiseXorOptionsT
-    def Pack(self, builder):
-        BitwiseXorOptionsStart(builder)
-        bitwiseXorOptions = BitwiseXorOptionsEnd(builder)
-        return bitwiseXorOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class BroadcastToOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = BroadcastToOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsBroadcastToOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def BroadcastToOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # BroadcastToOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def BroadcastToOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return BroadcastToOptionsStart(builder)
-def BroadcastToOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return BroadcastToOptionsEnd(builder)
-
-class BroadcastToOptionsT(object):
-
-    # BroadcastToOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        broadcastToOptions = BroadcastToOptions()
-        broadcastToOptions.Init(buf, pos)
-        return cls.InitFromObj(broadcastToOptions)
-
-    @classmethod
-    def InitFromObj(cls, broadcastToOptions):
-        x = BroadcastToOptionsT()
-        x._UnPack(broadcastToOptions)
-        return x
-
-    # BroadcastToOptionsT
-    def _UnPack(self, broadcastToOptions):
-        if broadcastToOptions is None:
-            return
-
-    # BroadcastToOptionsT
-    def Pack(self, builder):
-        BroadcastToOptionsStart(builder)
-        broadcastToOptions = BroadcastToOptionsEnd(builder)
-        return broadcastToOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class BucketizeOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = BucketizeOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsBucketizeOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def BucketizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # BucketizeOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # BucketizeOptions
-    def Boundaries(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            a = self._tab.Vector(o)
-            return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
-        return 0
-
-    # BucketizeOptions
-    def BoundariesAsNumpy(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
-        return 0
-
-    # BucketizeOptions
-    def BoundariesLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
-
-    # BucketizeOptions
-    def BoundariesIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        return o == 0
-
-def BucketizeOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return BucketizeOptionsStart(builder)
-def BucketizeOptionsAddBoundaries(builder, boundaries): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(boundaries), 0)
-def AddBoundaries(builder, boundaries):
-    return BucketizeOptionsAddBoundaries(builder, boundaries)
-def BucketizeOptionsStartBoundariesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartBoundariesVector(builder, numElems):
-    return BucketizeOptionsStartBoundariesVector(builder, numElems)
-def BucketizeOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return BucketizeOptionsEnd(builder)
-try:
-    from typing import List
-except:
-    pass
-
-class BucketizeOptionsT(object):
-
-    # BucketizeOptionsT
-    def __init__(self):
-        self.boundaries = None  # type: List[float]
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        bucketizeOptions = BucketizeOptions()
-        bucketizeOptions.Init(buf, pos)
-        return cls.InitFromObj(bucketizeOptions)
-
-    @classmethod
-    def InitFromObj(cls, bucketizeOptions):
-        x = BucketizeOptionsT()
-        x._UnPack(bucketizeOptions)
-        return x
-
-    # BucketizeOptionsT
-    def _UnPack(self, bucketizeOptions):
-        if bucketizeOptions is None:
-            return
-        if not bucketizeOptions.BoundariesIsNone():
-            if np is None:
-                self.boundaries = []
-                for i in range(bucketizeOptions.BoundariesLength()):
-                    self.boundaries.append(bucketizeOptions.Boundaries(i))
-            else:
-                self.boundaries = bucketizeOptions.BoundariesAsNumpy()
-
-    # BucketizeOptionsT
-    def Pack(self, builder):
-        if self.boundaries is not None:
-            if np is not None and type(self.boundaries) is np.ndarray:
-                boundaries = builder.CreateNumpyVector(self.boundaries)
-            else:
-                BucketizeOptionsStartBoundariesVector(builder, len(self.boundaries))
-                for i in reversed(range(len(self.boundaries))):
-                    builder.PrependFloat32(self.boundaries[i])
-                boundaries = builder.EndVector()
-        BucketizeOptionsStart(builder)
-        if self.boundaries is not None:
-            BucketizeOptionsAddBoundaries(builder, boundaries)
-        bucketizeOptions = BucketizeOptionsEnd(builder)
-        return bucketizeOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class Buffer(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = Buffer()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsBuffer(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def BufferBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # Buffer
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # Buffer
-    def Data(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            a = self._tab.Vector(o)
-            return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
-        return 0
-
-    # Buffer
-    def DataAsNumpy(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
-        return 0
-
-    # Buffer
-    def DataLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
-
-    # Buffer
-    def DataIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        return o == 0
-
-    # Buffer
-    def Offset(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
-        return 0
-
-    # Buffer
-    def Size(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
-        return 0
-
-def BufferStart(builder): builder.StartObject(3)
-def Start(builder):
-    return BufferStart(builder)
-def BufferAddData(builder, data): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(data), 0)
-def AddData(builder, data):
-    return BufferAddData(builder, data)
-def BufferStartDataVector(builder, numElems): return builder.StartVector(1, numElems, 1)
-def StartDataVector(builder, numElems):
-    return BufferStartDataVector(builder, numElems)
-def BufferAddOffset(builder, offset): builder.PrependUint64Slot(1, offset, 0)
-def AddOffset(builder, offset):
-    return BufferAddOffset(builder, offset)
-def BufferAddSize(builder, size): builder.PrependUint64Slot(2, size, 0)
-def AddSize(builder, size):
-    return BufferAddSize(builder, size)
-def BufferEnd(builder): return builder.EndObject()
-def End(builder):
-    return BufferEnd(builder)
-try:
-    from typing import List
-except:
-    pass
-
-class BufferT(object):
-
-    # BufferT
-    def __init__(self):
-        self.data = None  # type: List[int]
-        self.offset = 0  # type: int
-        self.size = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        buffer = Buffer()
-        buffer.Init(buf, pos)
-        return cls.InitFromObj(buffer)
-
-    @classmethod
-    def InitFromObj(cls, buffer):
-        x = BufferT()
-        x._UnPack(buffer)
-        return x
-
-    # BufferT
-    def _UnPack(self, buffer):
-        if buffer is None:
-            return
-        if not buffer.DataIsNone():
-            if np is None:
-                self.data = []
-                for i in range(buffer.DataLength()):
-                    self.data.append(buffer.Data(i))
-            else:
-                self.data = buffer.DataAsNumpy()
-        self.offset = buffer.Offset()
-        self.size = buffer.Size()
-
-    # BufferT
-    def Pack(self, builder):
-        if self.data is not None:
-            if np is not None and type(self.data) is np.ndarray:
-                data = builder.CreateNumpyVector(self.data)
-            else:
-                BufferStartDataVector(builder, len(self.data))
-                for i in reversed(range(len(self.data))):
-                    builder.PrependUint8(self.data[i])
-                data = builder.EndVector()
-        BufferStart(builder)
-        if self.data is not None:
-            BufferAddData(builder, data)
-        BufferAddOffset(builder, self.offset)
-        BufferAddSize(builder, self.size)
-        buffer = BufferEnd(builder)
-        return buffer
-# automatically generated by the FlatBuffers compiler, do not modify
+def SparseIndexVectorCreator(unionType, table):
+    from flatbuffers.table import Table
+    if not isinstance(table, Table):
+        return None
+    if unionType == SparseIndexVector().Int32Vector:
+        return Int32VectorT.InitFromBuf(table.Bytes, table.Pos)
+    if unionType == SparseIndexVector().Uint16Vector:
+        return Uint16VectorT.InitFromBuf(table.Bytes, table.Pos)
+    if unionType == SparseIndexVector().Uint8Vector:
+        return Uint8VectorT.InitFromBuf(table.Bytes, table.Pos)
+    return None
 
-# namespace: tflite
 
 class BuiltinOperator(object):
     ADD = 0
@@ -1551,9 +229,52 @@
     BITCAST = 159
     BITWISE_XOR = 160
     RIGHT_SHIFT = 161
-# automatically generated by the FlatBuffers compiler, do not modify
+    STABLEHLO_LOGISTIC = 162
+    STABLEHLO_ADD = 163
+    STABLEHLO_DIVIDE = 164
+    STABLEHLO_MULTIPLY = 165
+    STABLEHLO_MAXIMUM = 166
+    STABLEHLO_RESHAPE = 167
+    STABLEHLO_CLAMP = 168
+    STABLEHLO_CONCATENATE = 169
+    STABLEHLO_BROADCAST_IN_DIM = 170
+    STABLEHLO_CONVOLUTION = 171
+    STABLEHLO_SLICE = 172
+    STABLEHLO_CUSTOM_CALL = 173
+    STABLEHLO_REDUCE = 174
+    STABLEHLO_ABS = 175
+    STABLEHLO_AND = 176
+    STABLEHLO_COSINE = 177
+    STABLEHLO_EXPONENTIAL = 178
+    STABLEHLO_FLOOR = 179
+    STABLEHLO_LOG = 180
+    STABLEHLO_MINIMUM = 181
+    STABLEHLO_NEGATE = 182
+    STABLEHLO_OR = 183
+    STABLEHLO_POWER = 184
+    STABLEHLO_REMAINDER = 185
+    STABLEHLO_RSQRT = 186
+    STABLEHLO_SELECT = 187
+    STABLEHLO_SUBTRACT = 188
+    STABLEHLO_TANH = 189
+    STABLEHLO_SCATTER = 190
+    STABLEHLO_COMPARE = 191
+    STABLEHLO_CONVERT = 192
+    STABLEHLO_DYNAMIC_SLICE = 193
+    STABLEHLO_DYNAMIC_UPDATE_SLICE = 194
+    STABLEHLO_PAD = 195
+    STABLEHLO_IOTA = 196
+    STABLEHLO_DOT_GENERAL = 197
+    STABLEHLO_REDUCE_WINDOW = 198
+    STABLEHLO_SORT = 199
+    STABLEHLO_WHILE = 200
+    STABLEHLO_GATHER = 201
+    STABLEHLO_TRANSPOSE = 202
+    DILATE = 203
+    STABLEHLO_RNG_BIT_GENERATOR = 204
+    REDUCE_WINDOW = 205
+    STABLEHLO_COMPOSITE = 206
 
-# namespace: tflite
 
 class BuiltinOptions(object):
     NONE = 0
@@ -1941,261 +662,5960 @@
     if unionType == BuiltinOptions().RightShiftOptions:
         return RightShiftOptionsT.InitFromBuf(table.Bytes, table.Pos)
     return None
-# automatically generated by the FlatBuffers compiler, do not modify
 
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
+class BuiltinOptions2(object):
+    NONE = 0
+    StablehloConcatenateOptions = 1
+    StablehloBroadcastInDimOptions = 2
+    StablehloSliceOptions = 3
+    StablehloConvolutionOptions = 4
+    StablehloCustomCallOptions = 5
+    StablehloReduceOptions = 6
+    StablehloScatterOptions = 7
+    StablehloCompareOptions = 8
+    StablehloDynamicSliceOptions = 9
+    StablehloPadOptions = 10
+    StablehloIotaOptions = 11
+    StablehloDotGeneralOptions = 12
+    StablehloReduceWindowOptions = 13
+    StablehloSortOptions = 14
+    StablehloWhileOptions = 15
+    StablehloGatherOptions = 16
+    StablehloTransposeOptions = 17
+    DilateOptions = 18
+    StablehloRngBitGeneratorOptions = 19
+    ReduceWindowOptions = 20
+    StableHLOCompositeOptions = 21
 
-class CallOnceOptions(object):
-    __slots__ = ['_tab']
+def BuiltinOptions2Creator(unionType, table):
+    from flatbuffers.table import Table
+    if not isinstance(table, Table):
+        return None
+    if unionType == BuiltinOptions2().StablehloConcatenateOptions:
+        return StablehloConcatenateOptionsT.InitFromBuf(table.Bytes, table.Pos)
+    if unionType == BuiltinOptions2().StablehloBroadcastInDimOptions:
+        return StablehloBroadcastInDimOptionsT.InitFromBuf(table.Bytes, table.Pos)
+    if unionType == BuiltinOptions2().StablehloSliceOptions:
+        return StablehloSliceOptionsT.InitFromBuf(table.Bytes, table.Pos)
+    if unionType == BuiltinOptions2().StablehloConvolutionOptions:
+        return StablehloConvolutionOptionsT.InitFromBuf(table.Bytes, table.Pos)
+    if unionType == BuiltinOptions2().StablehloCustomCallOptions:
+        return StablehloCustomCallOptionsT.InitFromBuf(table.Bytes, table.Pos)
+    if unionType == BuiltinOptions2().StablehloReduceOptions:
+        return StablehloReduceOptionsT.InitFromBuf(table.Bytes, table.Pos)
+    if unionType == BuiltinOptions2().StablehloScatterOptions:
+        return StablehloScatterOptionsT.InitFromBuf(table.Bytes, table.Pos)
+    if unionType == BuiltinOptions2().StablehloCompareOptions:
+        return StablehloCompareOptionsT.InitFromBuf(table.Bytes, table.Pos)
+    if unionType == BuiltinOptions2().StablehloDynamicSliceOptions:
+        return StablehloDynamicSliceOptionsT.InitFromBuf(table.Bytes, table.Pos)
+    if unionType == BuiltinOptions2().StablehloPadOptions:
+        return StablehloPadOptionsT.InitFromBuf(table.Bytes, table.Pos)
+    if unionType == BuiltinOptions2().StablehloIotaOptions:
+        return StablehloIotaOptionsT.InitFromBuf(table.Bytes, table.Pos)
+    if unionType == BuiltinOptions2().StablehloDotGeneralOptions:
+        return StablehloDotGeneralOptionsT.InitFromBuf(table.Bytes, table.Pos)
+    if unionType == BuiltinOptions2().StablehloReduceWindowOptions:
+        return StablehloReduceWindowOptionsT.InitFromBuf(table.Bytes, table.Pos)
+    if unionType == BuiltinOptions2().StablehloSortOptions:
+        return StablehloSortOptionsT.InitFromBuf(table.Bytes, table.Pos)
+    if unionType == BuiltinOptions2().StablehloWhileOptions:
+        return StablehloWhileOptionsT.InitFromBuf(table.Bytes, table.Pos)
+    if unionType == BuiltinOptions2().StablehloGatherOptions:
+        return StablehloGatherOptionsT.InitFromBuf(table.Bytes, table.Pos)
+    if unionType == BuiltinOptions2().StablehloTransposeOptions:
+        return StablehloTransposeOptionsT.InitFromBuf(table.Bytes, table.Pos)
+    if unionType == BuiltinOptions2().DilateOptions:
+        return DilateOptionsT.InitFromBuf(table.Bytes, table.Pos)
+    if unionType == BuiltinOptions2().StablehloRngBitGeneratorOptions:
+        return StablehloRngBitGeneratorOptionsT.InitFromBuf(table.Bytes, table.Pos)
+    if unionType == BuiltinOptions2().ReduceWindowOptions:
+        return ReduceWindowOptionsT.InitFromBuf(table.Bytes, table.Pos)
+    if unionType == BuiltinOptions2().StableHLOCompositeOptions:
+        return StableHLOCompositeOptionsT.InitFromBuf(table.Bytes, table.Pos)
+    return None
 
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = CallOnceOptions()
-        x.Init(buf, n + offset)
-        return x
 
-    @classmethod
-    def GetRootAsCallOnceOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def CallOnceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+class StablehloPrecisionConfig(object):
+    DEFAULT = 0
+    HIGH = 1
+    HIGHEST = 2
 
-    # CallOnceOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
 
-    # CallOnceOptions
-    def InitSubgraphIndex(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
+class StablehloComparisonDirection(object):
+    STABLEHLO_COMPARISON_DIRECTION_EQ = 0
+    STABLEHLO_COMPARISON_DIRECTION_NE = 1
+    STABLEHLO_COMPARISON_DIRECTION_GE = 2
+    STABLEHLO_COMPARISON_DIRECTION_GT = 3
+    STABLEHLO_COMPARISON_DIRECTION_LE = 4
+    STABLEHLO_COMPARISON_DIRECTION_LT = 5
 
-def CallOnceOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return CallOnceOptionsStart(builder)
-def CallOnceOptionsAddInitSubgraphIndex(builder, initSubgraphIndex): builder.PrependInt32Slot(0, initSubgraphIndex, 0)
-def AddInitSubgraphIndex(builder, initSubgraphIndex):
-    return CallOnceOptionsAddInitSubgraphIndex(builder, initSubgraphIndex)
-def CallOnceOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return CallOnceOptionsEnd(builder)
 
-class CallOnceOptionsT(object):
+class StablehloComparisonType(object):
+    STABLEHLO_COMPARISON_TYPE_NOTYPE = 0
+    STABLEHLO_COMPARISON_TYPE_FLOAT = 1
+    STABLEHLO_COMPARISON_TYPE_FLOAT_TOTAL_ORDER = 2
+    STABLEHLO_COMPARISON_TYPE_SIGNED = 3
+    STABLEHLO_COMPARISON_TYPE_UNSIGNED = 4
 
-    # CallOnceOptionsT
-    def __init__(self):
-        self.initSubgraphIndex = 0  # type: int
 
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        callOnceOptions = CallOnceOptions()
-        callOnceOptions.Init(buf, pos)
-        return cls.InitFromObj(callOnceOptions)
+class RngAlgorithm(object):
+    DEFAULT = 0
+    PHILOX = 1
+    THREEFRY = 2
 
-    @classmethod
-    def InitFromObj(cls, callOnceOptions):
-        x = CallOnceOptionsT()
-        x._UnPack(callOnceOptions)
-        return x
 
-    # CallOnceOptionsT
-    def _UnPack(self, callOnceOptions):
-        if callOnceOptions is None:
-            return
-        self.initSubgraphIndex = callOnceOptions.InitSubgraphIndex()
+class Padding(object):
+    SAME = 0
+    VALID = 1
 
-    # CallOnceOptionsT
-    def Pack(self, builder):
-        CallOnceOptionsStart(builder)
-        CallOnceOptionsAddInitSubgraphIndex(builder, self.initSubgraphIndex)
-        callOnceOptions = CallOnceOptionsEnd(builder)
-        return callOnceOptions
-# automatically generated by the FlatBuffers compiler, do not modify
 
-# namespace: tflite
+class ActivationFunctionType(object):
+    NONE = 0
+    RELU = 1
+    RELU_N1_TO_1 = 2
+    RELU6 = 3
+    TANH = 4
+    SIGN_BIT = 5
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
 
-class CallOptions(object):
-    __slots__ = ['_tab']
+class LSHProjectionType(object):
+    UNKNOWN = 0
+    SPARSE = 1
+    DENSE = 2
 
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = CallOptions()
-        x.Init(buf, n + offset)
-        return x
 
-    @classmethod
-    def GetRootAsCallOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def CallOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+class FullyConnectedOptionsWeightsFormat(object):
+    DEFAULT = 0
+    SHUFFLED4x16INT8 = 1
 
-    # CallOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
 
-    # CallOptions
-    def Subgraph(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
-        return 0
+class LSTMKernelType(object):
+    FULL = 0
+    BASIC = 1
 
-def CallOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return CallOptionsStart(builder)
-def CallOptionsAddSubgraph(builder, subgraph): builder.PrependUint32Slot(0, subgraph, 0)
-def AddSubgraph(builder, subgraph):
-    return CallOptionsAddSubgraph(builder, subgraph)
-def CallOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return CallOptionsEnd(builder)
-
-class CallOptionsT(object):
-
-    # CallOptionsT
-    def __init__(self):
-        self.subgraph = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        callOptions = CallOptions()
-        callOptions.Init(buf, pos)
-        return cls.InitFromObj(callOptions)
-
-    @classmethod
-    def InitFromObj(cls, callOptions):
-        x = CallOptionsT()
-        x._UnPack(callOptions)
-        return x
-
-    # CallOptionsT
-    def _UnPack(self, callOptions):
-        if callOptions is None:
-            return
-        self.subgraph = callOptions.Subgraph()
-
-    # CallOptionsT
-    def Pack(self, builder):
-        CallOptionsStart(builder)
-        CallOptionsAddSubgraph(builder, self.subgraph)
-        callOptions = CallOptionsEnd(builder)
-        return callOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class CastOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = CastOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsCastOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def CastOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # CastOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # CastOptions
-    def InDataType(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-    # CastOptions
-    def OutDataType(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-def CastOptionsStart(builder): builder.StartObject(2)
-def Start(builder):
-    return CastOptionsStart(builder)
-def CastOptionsAddInDataType(builder, inDataType): builder.PrependInt8Slot(0, inDataType, 0)
-def AddInDataType(builder, inDataType):
-    return CastOptionsAddInDataType(builder, inDataType)
-def CastOptionsAddOutDataType(builder, outDataType): builder.PrependInt8Slot(1, outDataType, 0)
-def AddOutDataType(builder, outDataType):
-    return CastOptionsAddOutDataType(builder, outDataType)
-def CastOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return CastOptionsEnd(builder)
-
-class CastOptionsT(object):
-
-    # CastOptionsT
-    def __init__(self):
-        self.inDataType = 0  # type: int
-        self.outDataType = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        castOptions = CastOptions()
-        castOptions.Init(buf, pos)
-        return cls.InitFromObj(castOptions)
-
-    @classmethod
-    def InitFromObj(cls, castOptions):
-        x = CastOptionsT()
-        x._UnPack(castOptions)
-        return x
-
-    # CastOptionsT
-    def _UnPack(self, castOptions):
-        if castOptions is None:
-            return
-        self.inDataType = castOptions.InDataType()
-        self.outDataType = castOptions.OutDataType()
-
-    # CastOptionsT
-    def Pack(self, builder):
-        CastOptionsStart(builder)
-        CastOptionsAddInDataType(builder, self.inDataType)
-        CastOptionsAddOutDataType(builder, self.outDataType)
-        castOptions = CastOptionsEnd(builder)
-        return castOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
 
 class CombinerType(object):
     SUM = 0
     MEAN = 1
     SQRTN = 2
-# automatically generated by the FlatBuffers compiler, do not modify
 
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
+class MirrorPadMode(object):
+    REFLECT = 0
+    SYMMETRIC = 1
+
+
+class ReduceWindowFunction(object):
+    UNSUPPORTED = 0
+    ADD = 1
+    MUL = 2
+    MINIMUM = 3
+    MAXIMUM = 4
+    ALL = 5
+    ANY = 6
+
+
+class CustomOptionsFormat(object):
+    FLEXBUFFERS = 0
+
+
+class CustomQuantization(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = CustomQuantization()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsCustomQuantization(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def CustomQuantizationBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # CustomQuantization
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # CustomQuantization
+    def Custom(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
+        return 0
+
+    # CustomQuantization
+    def CustomAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
+        return 0
+
+    # CustomQuantization
+    def CustomLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # CustomQuantization
+    def CustomIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        return o == 0
+
+def CustomQuantizationStart(builder):
+    builder.StartObject(1)
+
+def CustomQuantizationAddCustom(builder, custom):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(custom), 0)
+
+def CustomQuantizationStartCustomVector(builder, numElems):
+    return builder.StartVector(1, numElems, 1)
+
+def CustomQuantizationEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class CustomQuantizationT(object):
+
+    # CustomQuantizationT
+    def __init__(self):
+        self.custom = None  # type: List[int]
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        customQuantization = CustomQuantization()
+        customQuantization.Init(buf, pos)
+        return cls.InitFromObj(customQuantization)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, customQuantization):
+        x = CustomQuantizationT()
+        x._UnPack(customQuantization)
+        return x
+
+    # CustomQuantizationT
+    def _UnPack(self, customQuantization):
+        if customQuantization is None:
+            return
+        if not customQuantization.CustomIsNone():
+            if np is None:
+                self.custom = []
+                for i in range(customQuantization.CustomLength()):
+                    self.custom.append(customQuantization.Custom(i))
+            else:
+                self.custom = customQuantization.CustomAsNumpy()
+
+    # CustomQuantizationT
+    def Pack(self, builder):
+        if self.custom is not None:
+            if np is not None and type(self.custom) is np.ndarray:
+                custom = builder.CreateNumpyVector(self.custom)
+            else:
+                CustomQuantizationStartCustomVector(builder, len(self.custom))
+                for i in reversed(range(len(self.custom))):
+                    builder.PrependUint8(self.custom[i])
+                custom = builder.EndVector()
+        CustomQuantizationStart(builder)
+        if self.custom is not None:
+            CustomQuantizationAddCustom(builder, custom)
+        customQuantization = CustomQuantizationEnd(builder)
+        return customQuantization
+
+
+class QuantizationParameters(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = QuantizationParameters()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsQuantizationParameters(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def QuantizationParametersBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # QuantizationParameters
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # QuantizationParameters
+    def Min(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+        return 0
+
+    # QuantizationParameters
+    def MinAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
+        return 0
+
+    # QuantizationParameters
+    def MinLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # QuantizationParameters
+    def MinIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        return o == 0
+
+    # QuantizationParameters
+    def Max(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+        return 0
+
+    # QuantizationParameters
+    def MaxAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
+        return 0
+
+    # QuantizationParameters
+    def MaxLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # QuantizationParameters
+    def MaxIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        return o == 0
+
+    # QuantizationParameters
+    def Scale(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+        return 0
+
+    # QuantizationParameters
+    def ScaleAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
+        return 0
+
+    # QuantizationParameters
+    def ScaleLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # QuantizationParameters
+    def ScaleIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        return o == 0
+
+    # QuantizationParameters
+    def ZeroPoint(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # QuantizationParameters
+    def ZeroPointAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # QuantizationParameters
+    def ZeroPointLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # QuantizationParameters
+    def ZeroPointIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        return o == 0
+
+    # QuantizationParameters
+    def DetailsType(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
+        return 0
+
+    # QuantizationParameters
+    def Details(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+        if o != 0:
+            from flatbuffers.table import Table
+            obj = Table(bytearray(), 0)
+            self._tab.Union(obj, o)
+            return obj
+        return None
+
+    # QuantizationParameters
+    def QuantizedDimension(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+def QuantizationParametersStart(builder):
+    builder.StartObject(7)
+
+def QuantizationParametersAddMin(builder, min):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(min), 0)
+
+def QuantizationParametersStartMinVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def QuantizationParametersAddMax(builder, max):
+    builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(max), 0)
+
+def QuantizationParametersStartMaxVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def QuantizationParametersAddScale(builder, scale):
+    builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(scale), 0)
+
+def QuantizationParametersStartScaleVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def QuantizationParametersAddZeroPoint(builder, zeroPoint):
+    builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(zeroPoint), 0)
+
+def QuantizationParametersStartZeroPointVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def QuantizationParametersAddDetailsType(builder, detailsType):
+    builder.PrependUint8Slot(4, detailsType, 0)
+
+def QuantizationParametersAddDetails(builder, details):
+    builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(details), 0)
+
+def QuantizationParametersAddQuantizedDimension(builder, quantizedDimension):
+    builder.PrependInt32Slot(6, quantizedDimension, 0)
+
+def QuantizationParametersEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List, Union
+except:
+    pass
+
+class QuantizationParametersT(object):
+
+    # QuantizationParametersT
+    def __init__(self):
+        self.min = None  # type: List[float]
+        self.max = None  # type: List[float]
+        self.scale = None  # type: List[float]
+        self.zeroPoint = None  # type: List[int]
+        self.detailsType = 0  # type: int
+        self.details = None  # type: Union[None, CustomQuantizationT]
+        self.quantizedDimension = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        quantizationParameters = QuantizationParameters()
+        quantizationParameters.Init(buf, pos)
+        return cls.InitFromObj(quantizationParameters)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, quantizationParameters):
+        x = QuantizationParametersT()
+        x._UnPack(quantizationParameters)
+        return x
+
+    # QuantizationParametersT
+    def _UnPack(self, quantizationParameters):
+        if quantizationParameters is None:
+            return
+        if not quantizationParameters.MinIsNone():
+            if np is None:
+                self.min = []
+                for i in range(quantizationParameters.MinLength()):
+                    self.min.append(quantizationParameters.Min(i))
+            else:
+                self.min = quantizationParameters.MinAsNumpy()
+        if not quantizationParameters.MaxIsNone():
+            if np is None:
+                self.max = []
+                for i in range(quantizationParameters.MaxLength()):
+                    self.max.append(quantizationParameters.Max(i))
+            else:
+                self.max = quantizationParameters.MaxAsNumpy()
+        if not quantizationParameters.ScaleIsNone():
+            if np is None:
+                self.scale = []
+                for i in range(quantizationParameters.ScaleLength()):
+                    self.scale.append(quantizationParameters.Scale(i))
+            else:
+                self.scale = quantizationParameters.ScaleAsNumpy()
+        if not quantizationParameters.ZeroPointIsNone():
+            if np is None:
+                self.zeroPoint = []
+                for i in range(quantizationParameters.ZeroPointLength()):
+                    self.zeroPoint.append(quantizationParameters.ZeroPoint(i))
+            else:
+                self.zeroPoint = quantizationParameters.ZeroPointAsNumpy()
+        self.detailsType = quantizationParameters.DetailsType()
+        self.details = QuantizationDetailsCreator(self.detailsType, quantizationParameters.Details())
+        self.quantizedDimension = quantizationParameters.QuantizedDimension()
+
+    # QuantizationParametersT
+    def Pack(self, builder):
+        if self.min is not None:
+            if np is not None and type(self.min) is np.ndarray:
+                min = builder.CreateNumpyVector(self.min)
+            else:
+                QuantizationParametersStartMinVector(builder, len(self.min))
+                for i in reversed(range(len(self.min))):
+                    builder.PrependFloat32(self.min[i])
+                min = builder.EndVector()
+        if self.max is not None:
+            if np is not None and type(self.max) is np.ndarray:
+                max = builder.CreateNumpyVector(self.max)
+            else:
+                QuantizationParametersStartMaxVector(builder, len(self.max))
+                for i in reversed(range(len(self.max))):
+                    builder.PrependFloat32(self.max[i])
+                max = builder.EndVector()
+        if self.scale is not None:
+            if np is not None and type(self.scale) is np.ndarray:
+                scale = builder.CreateNumpyVector(self.scale)
+            else:
+                QuantizationParametersStartScaleVector(builder, len(self.scale))
+                for i in reversed(range(len(self.scale))):
+                    builder.PrependFloat32(self.scale[i])
+                scale = builder.EndVector()
+        if self.zeroPoint is not None:
+            if np is not None and type(self.zeroPoint) is np.ndarray:
+                zeroPoint = builder.CreateNumpyVector(self.zeroPoint)
+            else:
+                QuantizationParametersStartZeroPointVector(builder, len(self.zeroPoint))
+                for i in reversed(range(len(self.zeroPoint))):
+                    builder.PrependInt64(self.zeroPoint[i])
+                zeroPoint = builder.EndVector()
+        if self.details is not None:
+            details = self.details.Pack(builder)
+        QuantizationParametersStart(builder)
+        if self.min is not None:
+            QuantizationParametersAddMin(builder, min)
+        if self.max is not None:
+            QuantizationParametersAddMax(builder, max)
+        if self.scale is not None:
+            QuantizationParametersAddScale(builder, scale)
+        if self.zeroPoint is not None:
+            QuantizationParametersAddZeroPoint(builder, zeroPoint)
+        QuantizationParametersAddDetailsType(builder, self.detailsType)
+        if self.details is not None:
+            QuantizationParametersAddDetails(builder, details)
+        QuantizationParametersAddQuantizedDimension(builder, self.quantizedDimension)
+        quantizationParameters = QuantizationParametersEnd(builder)
+        return quantizationParameters
+
+
+class Int32Vector(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = Int32Vector()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsInt32Vector(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def Int32VectorBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # Int32Vector
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # Int32Vector
+    def Values(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+        return 0
+
+    # Int32Vector
+    def ValuesAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+        return 0
+
+    # Int32Vector
+    def ValuesLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # Int32Vector
+    def ValuesIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        return o == 0
+
+def Int32VectorStart(builder):
+    builder.StartObject(1)
+
+def Int32VectorAddValues(builder, values):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0)
+
+def Int32VectorStartValuesVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def Int32VectorEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class Int32VectorT(object):
+
+    # Int32VectorT
+    def __init__(self):
+        self.values = None  # type: List[int]
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        int32Vector = Int32Vector()
+        int32Vector.Init(buf, pos)
+        return cls.InitFromObj(int32Vector)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, int32Vector):
+        x = Int32VectorT()
+        x._UnPack(int32Vector)
+        return x
+
+    # Int32VectorT
+    def _UnPack(self, int32Vector):
+        if int32Vector is None:
+            return
+        if not int32Vector.ValuesIsNone():
+            if np is None:
+                self.values = []
+                for i in range(int32Vector.ValuesLength()):
+                    self.values.append(int32Vector.Values(i))
+            else:
+                self.values = int32Vector.ValuesAsNumpy()
+
+    # Int32VectorT
+    def Pack(self, builder):
+        if self.values is not None:
+            if np is not None and type(self.values) is np.ndarray:
+                values = builder.CreateNumpyVector(self.values)
+            else:
+                Int32VectorStartValuesVector(builder, len(self.values))
+                for i in reversed(range(len(self.values))):
+                    builder.PrependInt32(self.values[i])
+                values = builder.EndVector()
+        Int32VectorStart(builder)
+        if self.values is not None:
+            Int32VectorAddValues(builder, values)
+        int32Vector = Int32VectorEnd(builder)
+        return int32Vector
+
+
+class Uint16Vector(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = Uint16Vector()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsUint16Vector(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def Uint16VectorBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # Uint16Vector
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # Uint16Vector
+    def Values(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Uint16Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 2))
+        return 0
+
+    # Uint16Vector
+    def ValuesAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint16Flags, o)
+        return 0
+
+    # Uint16Vector
+    def ValuesLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # Uint16Vector
+    def ValuesIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        return o == 0
+
+def Uint16VectorStart(builder):
+    builder.StartObject(1)
+
+def Uint16VectorAddValues(builder, values):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0)
+
+def Uint16VectorStartValuesVector(builder, numElems):
+    return builder.StartVector(2, numElems, 2)
+
+def Uint16VectorEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class Uint16VectorT(object):
+
+    # Uint16VectorT
+    def __init__(self):
+        self.values = None  # type: List[int]
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        uint16Vector = Uint16Vector()
+        uint16Vector.Init(buf, pos)
+        return cls.InitFromObj(uint16Vector)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, uint16Vector):
+        x = Uint16VectorT()
+        x._UnPack(uint16Vector)
+        return x
+
+    # Uint16VectorT
+    def _UnPack(self, uint16Vector):
+        if uint16Vector is None:
+            return
+        if not uint16Vector.ValuesIsNone():
+            if np is None:
+                self.values = []
+                for i in range(uint16Vector.ValuesLength()):
+                    self.values.append(uint16Vector.Values(i))
+            else:
+                self.values = uint16Vector.ValuesAsNumpy()
+
+    # Uint16VectorT
+    def Pack(self, builder):
+        if self.values is not None:
+            if np is not None and type(self.values) is np.ndarray:
+                values = builder.CreateNumpyVector(self.values)
+            else:
+                Uint16VectorStartValuesVector(builder, len(self.values))
+                for i in reversed(range(len(self.values))):
+                    builder.PrependUint16(self.values[i])
+                values = builder.EndVector()
+        Uint16VectorStart(builder)
+        if self.values is not None:
+            Uint16VectorAddValues(builder, values)
+        uint16Vector = Uint16VectorEnd(builder)
+        return uint16Vector
+
+
+class Uint8Vector(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = Uint8Vector()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsUint8Vector(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def Uint8VectorBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # Uint8Vector
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # Uint8Vector
+    def Values(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
+        return 0
+
+    # Uint8Vector
+    def ValuesAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
+        return 0
+
+    # Uint8Vector
+    def ValuesLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # Uint8Vector
+    def ValuesIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        return o == 0
+
+def Uint8VectorStart(builder):
+    builder.StartObject(1)
+
+def Uint8VectorAddValues(builder, values):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0)
+
+def Uint8VectorStartValuesVector(builder, numElems):
+    return builder.StartVector(1, numElems, 1)
+
+def Uint8VectorEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class Uint8VectorT(object):
+
+    # Uint8VectorT
+    def __init__(self):
+        self.values = None  # type: List[int]
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        uint8Vector = Uint8Vector()
+        uint8Vector.Init(buf, pos)
+        return cls.InitFromObj(uint8Vector)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, uint8Vector):
+        x = Uint8VectorT()
+        x._UnPack(uint8Vector)
+        return x
+
+    # Uint8VectorT
+    def _UnPack(self, uint8Vector):
+        if uint8Vector is None:
+            return
+        if not uint8Vector.ValuesIsNone():
+            if np is None:
+                self.values = []
+                for i in range(uint8Vector.ValuesLength()):
+                    self.values.append(uint8Vector.Values(i))
+            else:
+                self.values = uint8Vector.ValuesAsNumpy()
+
+    # Uint8VectorT
+    def Pack(self, builder):
+        if self.values is not None:
+            if np is not None and type(self.values) is np.ndarray:
+                values = builder.CreateNumpyVector(self.values)
+            else:
+                Uint8VectorStartValuesVector(builder, len(self.values))
+                for i in reversed(range(len(self.values))):
+                    builder.PrependUint8(self.values[i])
+                values = builder.EndVector()
+        Uint8VectorStart(builder)
+        if self.values is not None:
+            Uint8VectorAddValues(builder, values)
+        uint8Vector = Uint8VectorEnd(builder)
+        return uint8Vector
+
+
+class DimensionMetadata(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = DimensionMetadata()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsDimensionMetadata(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def DimensionMetadataBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # DimensionMetadata
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # DimensionMetadata
+    def Format(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+    # DimensionMetadata
+    def DenseSize(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # DimensionMetadata
+    def ArraySegmentsType(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
+        return 0
+
+    # DimensionMetadata
+    def ArraySegments(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            from flatbuffers.table import Table
+            obj = Table(bytearray(), 0)
+            self._tab.Union(obj, o)
+            return obj
+        return None
+
+    # DimensionMetadata
+    def ArrayIndicesType(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
+        return 0
+
+    # DimensionMetadata
+    def ArrayIndices(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+        if o != 0:
+            from flatbuffers.table import Table
+            obj = Table(bytearray(), 0)
+            self._tab.Union(obj, o)
+            return obj
+        return None
+
+def DimensionMetadataStart(builder):
+    builder.StartObject(6)
+
+def DimensionMetadataAddFormat(builder, format):
+    builder.PrependInt8Slot(0, format, 0)
+
+def DimensionMetadataAddDenseSize(builder, denseSize):
+    builder.PrependInt32Slot(1, denseSize, 0)
+
+def DimensionMetadataAddArraySegmentsType(builder, arraySegmentsType):
+    builder.PrependUint8Slot(2, arraySegmentsType, 0)
+
+def DimensionMetadataAddArraySegments(builder, arraySegments):
+    builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(arraySegments), 0)
+
+def DimensionMetadataAddArrayIndicesType(builder, arrayIndicesType):
+    builder.PrependUint8Slot(4, arrayIndicesType, 0)
+
+def DimensionMetadataAddArrayIndices(builder, arrayIndices):
+    builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(arrayIndices), 0)
+
+def DimensionMetadataEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import Union
+except:
+    pass
+
+class DimensionMetadataT(object):
+
+    # DimensionMetadataT
+    def __init__(self):
+        self.format = 0  # type: int
+        self.denseSize = 0  # type: int
+        self.arraySegmentsType = 0  # type: int
+        self.arraySegments = None  # type: Union[None, Int32VectorT, Uint16VectorT, Uint8VectorT]
+        self.arrayIndicesType = 0  # type: int
+        self.arrayIndices = None  # type: Union[None, Int32VectorT, Uint16VectorT, Uint8VectorT]
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        dimensionMetadata = DimensionMetadata()
+        dimensionMetadata.Init(buf, pos)
+        return cls.InitFromObj(dimensionMetadata)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, dimensionMetadata):
+        x = DimensionMetadataT()
+        x._UnPack(dimensionMetadata)
+        return x
+
+    # DimensionMetadataT
+    def _UnPack(self, dimensionMetadata):
+        if dimensionMetadata is None:
+            return
+        self.format = dimensionMetadata.Format()
+        self.denseSize = dimensionMetadata.DenseSize()
+        self.arraySegmentsType = dimensionMetadata.ArraySegmentsType()
+        self.arraySegments = SparseIndexVectorCreator(self.arraySegmentsType, dimensionMetadata.ArraySegments())
+        self.arrayIndicesType = dimensionMetadata.ArrayIndicesType()
+        self.arrayIndices = SparseIndexVectorCreator(self.arrayIndicesType, dimensionMetadata.ArrayIndices())
+
+    # DimensionMetadataT
+    def Pack(self, builder):
+        if self.arraySegments is not None:
+            arraySegments = self.arraySegments.Pack(builder)
+        if self.arrayIndices is not None:
+            arrayIndices = self.arrayIndices.Pack(builder)
+        DimensionMetadataStart(builder)
+        DimensionMetadataAddFormat(builder, self.format)
+        DimensionMetadataAddDenseSize(builder, self.denseSize)
+        DimensionMetadataAddArraySegmentsType(builder, self.arraySegmentsType)
+        if self.arraySegments is not None:
+            DimensionMetadataAddArraySegments(builder, arraySegments)
+        DimensionMetadataAddArrayIndicesType(builder, self.arrayIndicesType)
+        if self.arrayIndices is not None:
+            DimensionMetadataAddArrayIndices(builder, arrayIndices)
+        dimensionMetadata = DimensionMetadataEnd(builder)
+        return dimensionMetadata
+
+
+class SparsityParameters(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = SparsityParameters()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsSparsityParameters(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def SparsityParametersBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # SparsityParameters
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # SparsityParameters
+    def TraversalOrder(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+        return 0
+
+    # SparsityParameters
+    def TraversalOrderAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+        return 0
+
+    # SparsityParameters
+    def TraversalOrderLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # SparsityParameters
+    def TraversalOrderIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        return o == 0
+
+    # SparsityParameters
+    def BlockMap(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+        return 0
+
+    # SparsityParameters
+    def BlockMapAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+        return 0
+
+    # SparsityParameters
+    def BlockMapLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # SparsityParameters
+    def BlockMapIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        return o == 0
+
+    # SparsityParameters
+    def DimMetadata(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            x = self._tab.Vector(o)
+            x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
+            x = self._tab.Indirect(x)
+            obj = DimensionMetadata()
+            obj.Init(self._tab.Bytes, x)
+            return obj
+        return None
+
+    # SparsityParameters
+    def DimMetadataLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # SparsityParameters
+    def DimMetadataIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        return o == 0
+
+def SparsityParametersStart(builder):
+    builder.StartObject(3)
+
+def SparsityParametersAddTraversalOrder(builder, traversalOrder):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(traversalOrder), 0)
+
+def SparsityParametersStartTraversalOrderVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def SparsityParametersAddBlockMap(builder, blockMap):
+    builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(blockMap), 0)
+
+def SparsityParametersStartBlockMapVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def SparsityParametersAddDimMetadata(builder, dimMetadata):
+    builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(dimMetadata), 0)
+
+def SparsityParametersStartDimMetadataVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def SparsityParametersEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class SparsityParametersT(object):
+
+    # SparsityParametersT
+    def __init__(self):
+        self.traversalOrder = None  # type: List[int]
+        self.blockMap = None  # type: List[int]
+        self.dimMetadata = None  # type: List[DimensionMetadataT]
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        sparsityParameters = SparsityParameters()
+        sparsityParameters.Init(buf, pos)
+        return cls.InitFromObj(sparsityParameters)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, sparsityParameters):
+        x = SparsityParametersT()
+        x._UnPack(sparsityParameters)
+        return x
+
+    # SparsityParametersT
+    def _UnPack(self, sparsityParameters):
+        if sparsityParameters is None:
+            return
+        if not sparsityParameters.TraversalOrderIsNone():
+            if np is None:
+                self.traversalOrder = []
+                for i in range(sparsityParameters.TraversalOrderLength()):
+                    self.traversalOrder.append(sparsityParameters.TraversalOrder(i))
+            else:
+                self.traversalOrder = sparsityParameters.TraversalOrderAsNumpy()
+        if not sparsityParameters.BlockMapIsNone():
+            if np is None:
+                self.blockMap = []
+                for i in range(sparsityParameters.BlockMapLength()):
+                    self.blockMap.append(sparsityParameters.BlockMap(i))
+            else:
+                self.blockMap = sparsityParameters.BlockMapAsNumpy()
+        if not sparsityParameters.DimMetadataIsNone():
+            self.dimMetadata = []
+            for i in range(sparsityParameters.DimMetadataLength()):
+                if sparsityParameters.DimMetadata(i) is None:
+                    self.dimMetadata.append(None)
+                else:
+                    dimensionMetadata_ = DimensionMetadataT.InitFromObj(sparsityParameters.DimMetadata(i))
+                    self.dimMetadata.append(dimensionMetadata_)
+
+    # SparsityParametersT
+    def Pack(self, builder):
+        if self.traversalOrder is not None:
+            if np is not None and type(self.traversalOrder) is np.ndarray:
+                traversalOrder = builder.CreateNumpyVector(self.traversalOrder)
+            else:
+                SparsityParametersStartTraversalOrderVector(builder, len(self.traversalOrder))
+                for i in reversed(range(len(self.traversalOrder))):
+                    builder.PrependInt32(self.traversalOrder[i])
+                traversalOrder = builder.EndVector()
+        if self.blockMap is not None:
+            if np is not None and type(self.blockMap) is np.ndarray:
+                blockMap = builder.CreateNumpyVector(self.blockMap)
+            else:
+                SparsityParametersStartBlockMapVector(builder, len(self.blockMap))
+                for i in reversed(range(len(self.blockMap))):
+                    builder.PrependInt32(self.blockMap[i])
+                blockMap = builder.EndVector()
+        if self.dimMetadata is not None:
+            dimMetadatalist = []
+            for i in range(len(self.dimMetadata)):
+                dimMetadatalist.append(self.dimMetadata[i].Pack(builder))
+            SparsityParametersStartDimMetadataVector(builder, len(self.dimMetadata))
+            for i in reversed(range(len(self.dimMetadata))):
+                builder.PrependUOffsetTRelative(dimMetadatalist[i])
+            dimMetadata = builder.EndVector()
+        SparsityParametersStart(builder)
+        if self.traversalOrder is not None:
+            SparsityParametersAddTraversalOrder(builder, traversalOrder)
+        if self.blockMap is not None:
+            SparsityParametersAddBlockMap(builder, blockMap)
+        if self.dimMetadata is not None:
+            SparsityParametersAddDimMetadata(builder, dimMetadata)
+        sparsityParameters = SparsityParametersEnd(builder)
+        return sparsityParameters
+
+
+class VariantSubType(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = VariantSubType()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsVariantSubType(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def VariantSubTypeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # VariantSubType
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # VariantSubType
+    def Shape(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+        return 0
+
+    # VariantSubType
+    def ShapeAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+        return 0
+
+    # VariantSubType
+    def ShapeLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # VariantSubType
+    def ShapeIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        return o == 0
+
+    # VariantSubType
+    def Type(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+    # VariantSubType
+    def HasRank(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+def VariantSubTypeStart(builder):
+    builder.StartObject(3)
+
+def VariantSubTypeAddShape(builder, shape):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0)
+
+def VariantSubTypeStartShapeVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def VariantSubTypeAddType(builder, type):
+    builder.PrependInt8Slot(1, type, 0)
+
+def VariantSubTypeAddHasRank(builder, hasRank):
+    builder.PrependBoolSlot(2, hasRank, 0)
+
+def VariantSubTypeEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class VariantSubTypeT(object):
+
+    # VariantSubTypeT
+    def __init__(self):
+        self.shape = None  # type: List[int]
+        self.type = 0  # type: int
+        self.hasRank = False  # type: bool
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        variantSubType = VariantSubType()
+        variantSubType.Init(buf, pos)
+        return cls.InitFromObj(variantSubType)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, variantSubType):
+        x = VariantSubTypeT()
+        x._UnPack(variantSubType)
+        return x
+
+    # VariantSubTypeT
+    def _UnPack(self, variantSubType):
+        if variantSubType is None:
+            return
+        if not variantSubType.ShapeIsNone():
+            if np is None:
+                self.shape = []
+                for i in range(variantSubType.ShapeLength()):
+                    self.shape.append(variantSubType.Shape(i))
+            else:
+                self.shape = variantSubType.ShapeAsNumpy()
+        self.type = variantSubType.Type()
+        self.hasRank = variantSubType.HasRank()
+
+    # VariantSubTypeT
+    def Pack(self, builder):
+        if self.shape is not None:
+            if np is not None and type(self.shape) is np.ndarray:
+                shape = builder.CreateNumpyVector(self.shape)
+            else:
+                VariantSubTypeStartShapeVector(builder, len(self.shape))
+                for i in reversed(range(len(self.shape))):
+                    builder.PrependInt32(self.shape[i])
+                shape = builder.EndVector()
+        VariantSubTypeStart(builder)
+        if self.shape is not None:
+            VariantSubTypeAddShape(builder, shape)
+        VariantSubTypeAddType(builder, self.type)
+        VariantSubTypeAddHasRank(builder, self.hasRank)
+        variantSubType = VariantSubTypeEnd(builder)
+        return variantSubType
+
+
+class Tensor(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = Tensor()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsTensor(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def TensorBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # Tensor
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # Tensor
+    def Shape(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+        return 0
+
+    # Tensor
+    def ShapeAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+        return 0
+
+    # Tensor
+    def ShapeLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # Tensor
+    def ShapeIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        return o == 0
+
+    # Tensor
+    def Type(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+    # Tensor
+    def Buffer(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
+        return 0
+
+    # Tensor
+    def Name(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.String(o + self._tab.Pos)
+        return None
+
+    # Tensor
+    def Quantization(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            x = self._tab.Indirect(o + self._tab.Pos)
+            obj = QuantizationParameters()
+            obj.Init(self._tab.Bytes, x)
+            return obj
+        return None
+
+    # Tensor
+    def IsVariable(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+    # Tensor
+    def Sparsity(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
+        if o != 0:
+            x = self._tab.Indirect(o + self._tab.Pos)
+            obj = SparsityParameters()
+            obj.Init(self._tab.Bytes, x)
+            return obj
+        return None
+
+    # Tensor
+    def ShapeSignature(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+        return 0
+
+    # Tensor
+    def ShapeSignatureAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+        return 0
+
+    # Tensor
+    def ShapeSignatureLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # Tensor
+    def ShapeSignatureIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
+        return o == 0
+
+    # Tensor
+    def HasRank(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+    # Tensor
+    def VariantTensors(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
+        if o != 0:
+            x = self._tab.Vector(o)
+            x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
+            x = self._tab.Indirect(x)
+            obj = VariantSubType()
+            obj.Init(self._tab.Bytes, x)
+            return obj
+        return None
+
+    # Tensor
+    def VariantTensorsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # Tensor
+    def VariantTensorsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
+        return o == 0
+
+def TensorStart(builder):
+    builder.StartObject(10)
+
+def TensorAddShape(builder, shape):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0)
+
+def TensorStartShapeVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def TensorAddType(builder, type):
+    builder.PrependInt8Slot(1, type, 0)
+
+def TensorAddBuffer(builder, buffer):
+    builder.PrependUint32Slot(2, buffer, 0)
+
+def TensorAddName(builder, name):
+    builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
+
+def TensorAddQuantization(builder, quantization):
+    builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(quantization), 0)
+
+def TensorAddIsVariable(builder, isVariable):
+    builder.PrependBoolSlot(5, isVariable, 0)
+
+def TensorAddSparsity(builder, sparsity):
+    builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(sparsity), 0)
+
+def TensorAddShapeSignature(builder, shapeSignature):
+    builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(shapeSignature), 0)
+
+def TensorStartShapeSignatureVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def TensorAddHasRank(builder, hasRank):
+    builder.PrependBoolSlot(8, hasRank, 0)
+
+def TensorAddVariantTensors(builder, variantTensors):
+    builder.PrependUOffsetTRelativeSlot(9, flatbuffers.number_types.UOffsetTFlags.py_type(variantTensors), 0)
+
+def TensorStartVariantTensorsVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def TensorEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List, Optional
+except:
+    pass
+
+class TensorT(object):
+
+    # TensorT
+    def __init__(self):
+        self.shape = None  # type: List[int]
+        self.type = 0  # type: int
+        self.buffer = 0  # type: int
+        self.name = None  # type: str
+        self.quantization = None  # type: Optional[QuantizationParametersT]
+        self.isVariable = False  # type: bool
+        self.sparsity = None  # type: Optional[SparsityParametersT]
+        self.shapeSignature = None  # type: List[int]
+        self.hasRank = False  # type: bool
+        self.variantTensors = None  # type: List[VariantSubTypeT]
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        tensor = Tensor()
+        tensor.Init(buf, pos)
+        return cls.InitFromObj(tensor)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, tensor):
+        x = TensorT()
+        x._UnPack(tensor)
+        return x
+
+    # TensorT
+    def _UnPack(self, tensor):
+        if tensor is None:
+            return
+        if not tensor.ShapeIsNone():
+            if np is None:
+                self.shape = []
+                for i in range(tensor.ShapeLength()):
+                    self.shape.append(tensor.Shape(i))
+            else:
+                self.shape = tensor.ShapeAsNumpy()
+        self.type = tensor.Type()
+        self.buffer = tensor.Buffer()
+        self.name = tensor.Name()
+        if tensor.Quantization() is not None:
+            self.quantization = QuantizationParametersT.InitFromObj(tensor.Quantization())
+        self.isVariable = tensor.IsVariable()
+        if tensor.Sparsity() is not None:
+            self.sparsity = SparsityParametersT.InitFromObj(tensor.Sparsity())
+        if not tensor.ShapeSignatureIsNone():
+            if np is None:
+                self.shapeSignature = []
+                for i in range(tensor.ShapeSignatureLength()):
+                    self.shapeSignature.append(tensor.ShapeSignature(i))
+            else:
+                self.shapeSignature = tensor.ShapeSignatureAsNumpy()
+        self.hasRank = tensor.HasRank()
+        if not tensor.VariantTensorsIsNone():
+            self.variantTensors = []
+            for i in range(tensor.VariantTensorsLength()):
+                if tensor.VariantTensors(i) is None:
+                    self.variantTensors.append(None)
+                else:
+                    variantSubType_ = VariantSubTypeT.InitFromObj(tensor.VariantTensors(i))
+                    self.variantTensors.append(variantSubType_)
+
+    # TensorT
+    def Pack(self, builder):
+        if self.shape is not None:
+            if np is not None and type(self.shape) is np.ndarray:
+                shape = builder.CreateNumpyVector(self.shape)
+            else:
+                TensorStartShapeVector(builder, len(self.shape))
+                for i in reversed(range(len(self.shape))):
+                    builder.PrependInt32(self.shape[i])
+                shape = builder.EndVector()
+        if self.name is not None:
+            name = builder.CreateString(self.name)
+        if self.quantization is not None:
+            quantization = self.quantization.Pack(builder)
+        if self.sparsity is not None:
+            sparsity = self.sparsity.Pack(builder)
+        if self.shapeSignature is not None:
+            if np is not None and type(self.shapeSignature) is np.ndarray:
+                shapeSignature = builder.CreateNumpyVector(self.shapeSignature)
+            else:
+                TensorStartShapeSignatureVector(builder, len(self.shapeSignature))
+                for i in reversed(range(len(self.shapeSignature))):
+                    builder.PrependInt32(self.shapeSignature[i])
+                shapeSignature = builder.EndVector()
+        if self.variantTensors is not None:
+            variantTensorslist = []
+            for i in range(len(self.variantTensors)):
+                variantTensorslist.append(self.variantTensors[i].Pack(builder))
+            TensorStartVariantTensorsVector(builder, len(self.variantTensors))
+            for i in reversed(range(len(self.variantTensors))):
+                builder.PrependUOffsetTRelative(variantTensorslist[i])
+            variantTensors = builder.EndVector()
+        TensorStart(builder)
+        if self.shape is not None:
+            TensorAddShape(builder, shape)
+        TensorAddType(builder, self.type)
+        TensorAddBuffer(builder, self.buffer)
+        if self.name is not None:
+            TensorAddName(builder, name)
+        if self.quantization is not None:
+            TensorAddQuantization(builder, quantization)
+        TensorAddIsVariable(builder, self.isVariable)
+        if self.sparsity is not None:
+            TensorAddSparsity(builder, sparsity)
+        if self.shapeSignature is not None:
+            TensorAddShapeSignature(builder, shapeSignature)
+        TensorAddHasRank(builder, self.hasRank)
+        if self.variantTensors is not None:
+            TensorAddVariantTensors(builder, variantTensors)
+        tensor = TensorEnd(builder)
+        return tensor
+
+
+class StablehloGatherOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = StablehloGatherOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsStablehloGatherOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def StablehloGatherOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # StablehloGatherOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # StablehloGatherOptions
+    def OffsetDims(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloGatherOptions
+    def OffsetDimsAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloGatherOptions
+    def OffsetDimsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloGatherOptions
+    def OffsetDimsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        return o == 0
+
+    # StablehloGatherOptions
+    def CollapsedSliceDims(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloGatherOptions
+    def CollapsedSliceDimsAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloGatherOptions
+    def CollapsedSliceDimsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloGatherOptions
+    def CollapsedSliceDimsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        return o == 0
+
+    # StablehloGatherOptions
+    def StartIndexMap(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloGatherOptions
+    def StartIndexMapAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloGatherOptions
+    def StartIndexMapLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloGatherOptions
+    def StartIndexMapIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        return o == 0
+
+    # StablehloGatherOptions
+    def IndexVectorDim(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
+        return 0
+
+    # StablehloGatherOptions
+    def SliceSizes(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloGatherOptions
+    def SliceSizesAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloGatherOptions
+    def SliceSizesLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloGatherOptions
+    def SliceSizesIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        return o == 0
+
+    # StablehloGatherOptions
+    def IndicesAreSorted(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+def StablehloGatherOptionsStart(builder):
+    builder.StartObject(6)
+
+def StablehloGatherOptionsAddOffsetDims(builder, offsetDims):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(offsetDims), 0)
+
+def StablehloGatherOptionsStartOffsetDimsVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloGatherOptionsAddCollapsedSliceDims(builder, collapsedSliceDims):
+    builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(collapsedSliceDims), 0)
+
+def StablehloGatherOptionsStartCollapsedSliceDimsVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloGatherOptionsAddStartIndexMap(builder, startIndexMap):
+    builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(startIndexMap), 0)
+
+def StablehloGatherOptionsStartStartIndexMapVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloGatherOptionsAddIndexVectorDim(builder, indexVectorDim):
+    builder.PrependInt64Slot(3, indexVectorDim, 0)
+
+def StablehloGatherOptionsAddSliceSizes(builder, sliceSizes):
+    builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(sliceSizes), 0)
+
+def StablehloGatherOptionsStartSliceSizesVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloGatherOptionsAddIndicesAreSorted(builder, indicesAreSorted):
+    builder.PrependBoolSlot(5, indicesAreSorted, 0)
+
+def StablehloGatherOptionsEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class StablehloGatherOptionsT(object):
+
+    # StablehloGatherOptionsT
+    def __init__(self):
+        self.offsetDims = None  # type: List[int]
+        self.collapsedSliceDims = None  # type: List[int]
+        self.startIndexMap = None  # type: List[int]
+        self.indexVectorDim = 0  # type: int
+        self.sliceSizes = None  # type: List[int]
+        self.indicesAreSorted = False  # type: bool
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        stablehloGatherOptions = StablehloGatherOptions()
+        stablehloGatherOptions.Init(buf, pos)
+        return cls.InitFromObj(stablehloGatherOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, stablehloGatherOptions):
+        x = StablehloGatherOptionsT()
+        x._UnPack(stablehloGatherOptions)
+        return x
+
+    # StablehloGatherOptionsT
+    def _UnPack(self, stablehloGatherOptions):
+        if stablehloGatherOptions is None:
+            return
+        if not stablehloGatherOptions.OffsetDimsIsNone():
+            if np is None:
+                self.offsetDims = []
+                for i in range(stablehloGatherOptions.OffsetDimsLength()):
+                    self.offsetDims.append(stablehloGatherOptions.OffsetDims(i))
+            else:
+                self.offsetDims = stablehloGatherOptions.OffsetDimsAsNumpy()
+        if not stablehloGatherOptions.CollapsedSliceDimsIsNone():
+            if np is None:
+                self.collapsedSliceDims = []
+                for i in range(stablehloGatherOptions.CollapsedSliceDimsLength()):
+                    self.collapsedSliceDims.append(stablehloGatherOptions.CollapsedSliceDims(i))
+            else:
+                self.collapsedSliceDims = stablehloGatherOptions.CollapsedSliceDimsAsNumpy()
+        if not stablehloGatherOptions.StartIndexMapIsNone():
+            if np is None:
+                self.startIndexMap = []
+                for i in range(stablehloGatherOptions.StartIndexMapLength()):
+                    self.startIndexMap.append(stablehloGatherOptions.StartIndexMap(i))
+            else:
+                self.startIndexMap = stablehloGatherOptions.StartIndexMapAsNumpy()
+        self.indexVectorDim = stablehloGatherOptions.IndexVectorDim()
+        if not stablehloGatherOptions.SliceSizesIsNone():
+            if np is None:
+                self.sliceSizes = []
+                for i in range(stablehloGatherOptions.SliceSizesLength()):
+                    self.sliceSizes.append(stablehloGatherOptions.SliceSizes(i))
+            else:
+                self.sliceSizes = stablehloGatherOptions.SliceSizesAsNumpy()
+        self.indicesAreSorted = stablehloGatherOptions.IndicesAreSorted()
+
+    # StablehloGatherOptionsT
+    def Pack(self, builder):
+        if self.offsetDims is not None:
+            if np is not None and type(self.offsetDims) is np.ndarray:
+                offsetDims = builder.CreateNumpyVector(self.offsetDims)
+            else:
+                StablehloGatherOptionsStartOffsetDimsVector(builder, len(self.offsetDims))
+                for i in reversed(range(len(self.offsetDims))):
+                    builder.PrependInt64(self.offsetDims[i])
+                offsetDims = builder.EndVector()
+        if self.collapsedSliceDims is not None:
+            if np is not None and type(self.collapsedSliceDims) is np.ndarray:
+                collapsedSliceDims = builder.CreateNumpyVector(self.collapsedSliceDims)
+            else:
+                StablehloGatherOptionsStartCollapsedSliceDimsVector(builder, len(self.collapsedSliceDims))
+                for i in reversed(range(len(self.collapsedSliceDims))):
+                    builder.PrependInt64(self.collapsedSliceDims[i])
+                collapsedSliceDims = builder.EndVector()
+        if self.startIndexMap is not None:
+            if np is not None and type(self.startIndexMap) is np.ndarray:
+                startIndexMap = builder.CreateNumpyVector(self.startIndexMap)
+            else:
+                StablehloGatherOptionsStartStartIndexMapVector(builder, len(self.startIndexMap))
+                for i in reversed(range(len(self.startIndexMap))):
+                    builder.PrependInt64(self.startIndexMap[i])
+                startIndexMap = builder.EndVector()
+        if self.sliceSizes is not None:
+            if np is not None and type(self.sliceSizes) is np.ndarray:
+                sliceSizes = builder.CreateNumpyVector(self.sliceSizes)
+            else:
+                StablehloGatherOptionsStartSliceSizesVector(builder, len(self.sliceSizes))
+                for i in reversed(range(len(self.sliceSizes))):
+                    builder.PrependInt64(self.sliceSizes[i])
+                sliceSizes = builder.EndVector()
+        StablehloGatherOptionsStart(builder)
+        if self.offsetDims is not None:
+            StablehloGatherOptionsAddOffsetDims(builder, offsetDims)
+        if self.collapsedSliceDims is not None:
+            StablehloGatherOptionsAddCollapsedSliceDims(builder, collapsedSliceDims)
+        if self.startIndexMap is not None:
+            StablehloGatherOptionsAddStartIndexMap(builder, startIndexMap)
+        StablehloGatherOptionsAddIndexVectorDim(builder, self.indexVectorDim)
+        if self.sliceSizes is not None:
+            StablehloGatherOptionsAddSliceSizes(builder, sliceSizes)
+        StablehloGatherOptionsAddIndicesAreSorted(builder, self.indicesAreSorted)
+        stablehloGatherOptions = StablehloGatherOptionsEnd(builder)
+        return stablehloGatherOptions
+
+
+class StablehloTransposeOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = StablehloTransposeOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsStablehloTransposeOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def StablehloTransposeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # StablehloTransposeOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # StablehloTransposeOptions
+    def Permutation(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloTransposeOptions
+    def PermutationAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloTransposeOptions
+    def PermutationLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloTransposeOptions
+    def PermutationIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        return o == 0
+
+def StablehloTransposeOptionsStart(builder):
+    builder.StartObject(1)
+
+def StablehloTransposeOptionsAddPermutation(builder, permutation):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(permutation), 0)
+
+def StablehloTransposeOptionsStartPermutationVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloTransposeOptionsEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class StablehloTransposeOptionsT(object):
+
+    # StablehloTransposeOptionsT
+    def __init__(self):
+        self.permutation = None  # type: List[int]
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        stablehloTransposeOptions = StablehloTransposeOptions()
+        stablehloTransposeOptions.Init(buf, pos)
+        return cls.InitFromObj(stablehloTransposeOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, stablehloTransposeOptions):
+        x = StablehloTransposeOptionsT()
+        x._UnPack(stablehloTransposeOptions)
+        return x
+
+    # StablehloTransposeOptionsT
+    def _UnPack(self, stablehloTransposeOptions):
+        if stablehloTransposeOptions is None:
+            return
+        if not stablehloTransposeOptions.PermutationIsNone():
+            if np is None:
+                self.permutation = []
+                for i in range(stablehloTransposeOptions.PermutationLength()):
+                    self.permutation.append(stablehloTransposeOptions.Permutation(i))
+            else:
+                self.permutation = stablehloTransposeOptions.PermutationAsNumpy()
+
+    # StablehloTransposeOptionsT
+    def Pack(self, builder):
+        if self.permutation is not None:
+            if np is not None and type(self.permutation) is np.ndarray:
+                permutation = builder.CreateNumpyVector(self.permutation)
+            else:
+                StablehloTransposeOptionsStartPermutationVector(builder, len(self.permutation))
+                for i in reversed(range(len(self.permutation))):
+                    builder.PrependInt64(self.permutation[i])
+                permutation = builder.EndVector()
+        StablehloTransposeOptionsStart(builder)
+        if self.permutation is not None:
+            StablehloTransposeOptionsAddPermutation(builder, permutation)
+        stablehloTransposeOptions = StablehloTransposeOptionsEnd(builder)
+        return stablehloTransposeOptions
+
+
+class StablehloDotGeneralOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = StablehloDotGeneralOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsStablehloDotGeneralOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def StablehloDotGeneralOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # StablehloDotGeneralOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # StablehloDotGeneralOptions
+    def LhsBatchingDimensions(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloDotGeneralOptions
+    def LhsBatchingDimensionsAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloDotGeneralOptions
+    def LhsBatchingDimensionsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloDotGeneralOptions
+    def LhsBatchingDimensionsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        return o == 0
+
+    # StablehloDotGeneralOptions
+    def RhsBatchingDimensions(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloDotGeneralOptions
+    def RhsBatchingDimensionsAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloDotGeneralOptions
+    def RhsBatchingDimensionsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloDotGeneralOptions
+    def RhsBatchingDimensionsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        return o == 0
+
+    # StablehloDotGeneralOptions
+    def LhsContractingDimensions(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloDotGeneralOptions
+    def LhsContractingDimensionsAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloDotGeneralOptions
+    def LhsContractingDimensionsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloDotGeneralOptions
+    def LhsContractingDimensionsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        return o == 0
+
+    # StablehloDotGeneralOptions
+    def RhsContractingDimensions(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloDotGeneralOptions
+    def RhsContractingDimensionsAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloDotGeneralOptions
+    def RhsContractingDimensionsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloDotGeneralOptions
+    def RhsContractingDimensionsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        return o == 0
+
+    # StablehloDotGeneralOptions
+    def PrecisionConfig(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Uint32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+        return 0
+
+    # StablehloDotGeneralOptions
+    def PrecisionConfigAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint32Flags, o)
+        return 0
+
+    # StablehloDotGeneralOptions
+    def PrecisionConfigLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloDotGeneralOptions
+    def PrecisionConfigIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        return o == 0
+
+def StablehloDotGeneralOptionsStart(builder):
+    builder.StartObject(5)
+
+def StablehloDotGeneralOptionsAddLhsBatchingDimensions(builder, lhsBatchingDimensions):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(lhsBatchingDimensions), 0)
+
+def StablehloDotGeneralOptionsStartLhsBatchingDimensionsVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloDotGeneralOptionsAddRhsBatchingDimensions(builder, rhsBatchingDimensions):
+    builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(rhsBatchingDimensions), 0)
+
+def StablehloDotGeneralOptionsStartRhsBatchingDimensionsVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloDotGeneralOptionsAddLhsContractingDimensions(builder, lhsContractingDimensions):
+    builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(lhsContractingDimensions), 0)
+
+def StablehloDotGeneralOptionsStartLhsContractingDimensionsVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloDotGeneralOptionsAddRhsContractingDimensions(builder, rhsContractingDimensions):
+    builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(rhsContractingDimensions), 0)
+
+def StablehloDotGeneralOptionsStartRhsContractingDimensionsVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloDotGeneralOptionsAddPrecisionConfig(builder, precisionConfig):
+    builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(precisionConfig), 0)
+
+def StablehloDotGeneralOptionsStartPrecisionConfigVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def StablehloDotGeneralOptionsEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class StablehloDotGeneralOptionsT(object):
+
+    # StablehloDotGeneralOptionsT
+    def __init__(self):
+        self.lhsBatchingDimensions = None  # type: List[int]
+        self.rhsBatchingDimensions = None  # type: List[int]
+        self.lhsContractingDimensions = None  # type: List[int]
+        self.rhsContractingDimensions = None  # type: List[int]
+        self.precisionConfig = None  # type: List[int]
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        stablehloDotGeneralOptions = StablehloDotGeneralOptions()
+        stablehloDotGeneralOptions.Init(buf, pos)
+        return cls.InitFromObj(stablehloDotGeneralOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, stablehloDotGeneralOptions):
+        x = StablehloDotGeneralOptionsT()
+        x._UnPack(stablehloDotGeneralOptions)
+        return x
+
+    # StablehloDotGeneralOptionsT
+    def _UnPack(self, stablehloDotGeneralOptions):
+        if stablehloDotGeneralOptions is None:
+            return
+        if not stablehloDotGeneralOptions.LhsBatchingDimensionsIsNone():
+            if np is None:
+                self.lhsBatchingDimensions = []
+                for i in range(stablehloDotGeneralOptions.LhsBatchingDimensionsLength()):
+                    self.lhsBatchingDimensions.append(stablehloDotGeneralOptions.LhsBatchingDimensions(i))
+            else:
+                self.lhsBatchingDimensions = stablehloDotGeneralOptions.LhsBatchingDimensionsAsNumpy()
+        if not stablehloDotGeneralOptions.RhsBatchingDimensionsIsNone():
+            if np is None:
+                self.rhsBatchingDimensions = []
+                for i in range(stablehloDotGeneralOptions.RhsBatchingDimensionsLength()):
+                    self.rhsBatchingDimensions.append(stablehloDotGeneralOptions.RhsBatchingDimensions(i))
+            else:
+                self.rhsBatchingDimensions = stablehloDotGeneralOptions.RhsBatchingDimensionsAsNumpy()
+        if not stablehloDotGeneralOptions.LhsContractingDimensionsIsNone():
+            if np is None:
+                self.lhsContractingDimensions = []
+                for i in range(stablehloDotGeneralOptions.LhsContractingDimensionsLength()):
+                    self.lhsContractingDimensions.append(stablehloDotGeneralOptions.LhsContractingDimensions(i))
+            else:
+                self.lhsContractingDimensions = stablehloDotGeneralOptions.LhsContractingDimensionsAsNumpy()
+        if not stablehloDotGeneralOptions.RhsContractingDimensionsIsNone():
+            if np is None:
+                self.rhsContractingDimensions = []
+                for i in range(stablehloDotGeneralOptions.RhsContractingDimensionsLength()):
+                    self.rhsContractingDimensions.append(stablehloDotGeneralOptions.RhsContractingDimensions(i))
+            else:
+                self.rhsContractingDimensions = stablehloDotGeneralOptions.RhsContractingDimensionsAsNumpy()
+        if not stablehloDotGeneralOptions.PrecisionConfigIsNone():
+            if np is None:
+                self.precisionConfig = []
+                for i in range(stablehloDotGeneralOptions.PrecisionConfigLength()):
+                    self.precisionConfig.append(stablehloDotGeneralOptions.PrecisionConfig(i))
+            else:
+                self.precisionConfig = stablehloDotGeneralOptions.PrecisionConfigAsNumpy()
+
+    # StablehloDotGeneralOptionsT
+    def Pack(self, builder):
+        if self.lhsBatchingDimensions is not None:
+            if np is not None and type(self.lhsBatchingDimensions) is np.ndarray:
+                lhsBatchingDimensions = builder.CreateNumpyVector(self.lhsBatchingDimensions)
+            else:
+                StablehloDotGeneralOptionsStartLhsBatchingDimensionsVector(builder, len(self.lhsBatchingDimensions))
+                for i in reversed(range(len(self.lhsBatchingDimensions))):
+                    builder.PrependInt64(self.lhsBatchingDimensions[i])
+                lhsBatchingDimensions = builder.EndVector()
+        if self.rhsBatchingDimensions is not None:
+            if np is not None and type(self.rhsBatchingDimensions) is np.ndarray:
+                rhsBatchingDimensions = builder.CreateNumpyVector(self.rhsBatchingDimensions)
+            else:
+                StablehloDotGeneralOptionsStartRhsBatchingDimensionsVector(builder, len(self.rhsBatchingDimensions))
+                for i in reversed(range(len(self.rhsBatchingDimensions))):
+                    builder.PrependInt64(self.rhsBatchingDimensions[i])
+                rhsBatchingDimensions = builder.EndVector()
+        if self.lhsContractingDimensions is not None:
+            if np is not None and type(self.lhsContractingDimensions) is np.ndarray:
+                lhsContractingDimensions = builder.CreateNumpyVector(self.lhsContractingDimensions)
+            else:
+                StablehloDotGeneralOptionsStartLhsContractingDimensionsVector(builder, len(self.lhsContractingDimensions))
+                for i in reversed(range(len(self.lhsContractingDimensions))):
+                    builder.PrependInt64(self.lhsContractingDimensions[i])
+                lhsContractingDimensions = builder.EndVector()
+        if self.rhsContractingDimensions is not None:
+            if np is not None and type(self.rhsContractingDimensions) is np.ndarray:
+                rhsContractingDimensions = builder.CreateNumpyVector(self.rhsContractingDimensions)
+            else:
+                StablehloDotGeneralOptionsStartRhsContractingDimensionsVector(builder, len(self.rhsContractingDimensions))
+                for i in reversed(range(len(self.rhsContractingDimensions))):
+                    builder.PrependInt64(self.rhsContractingDimensions[i])
+                rhsContractingDimensions = builder.EndVector()
+        if self.precisionConfig is not None:
+            if np is not None and type(self.precisionConfig) is np.ndarray:
+                precisionConfig = builder.CreateNumpyVector(self.precisionConfig)
+            else:
+                StablehloDotGeneralOptionsStartPrecisionConfigVector(builder, len(self.precisionConfig))
+                for i in reversed(range(len(self.precisionConfig))):
+                    builder.PrependUint32(self.precisionConfig[i])
+                precisionConfig = builder.EndVector()
+        StablehloDotGeneralOptionsStart(builder)
+        if self.lhsBatchingDimensions is not None:
+            StablehloDotGeneralOptionsAddLhsBatchingDimensions(builder, lhsBatchingDimensions)
+        if self.rhsBatchingDimensions is not None:
+            StablehloDotGeneralOptionsAddRhsBatchingDimensions(builder, rhsBatchingDimensions)
+        if self.lhsContractingDimensions is not None:
+            StablehloDotGeneralOptionsAddLhsContractingDimensions(builder, lhsContractingDimensions)
+        if self.rhsContractingDimensions is not None:
+            StablehloDotGeneralOptionsAddRhsContractingDimensions(builder, rhsContractingDimensions)
+        if self.precisionConfig is not None:
+            StablehloDotGeneralOptionsAddPrecisionConfig(builder, precisionConfig)
+        stablehloDotGeneralOptions = StablehloDotGeneralOptionsEnd(builder)
+        return stablehloDotGeneralOptions
+
+
+class StablehloReduceWindowOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = StablehloReduceWindowOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsStablehloReduceWindowOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def StablehloReduceWindowOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # StablehloReduceWindowOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # StablehloReduceWindowOptions
+    def WindowDimensions(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloReduceWindowOptions
+    def WindowDimensionsAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloReduceWindowOptions
+    def WindowDimensionsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloReduceWindowOptions
+    def WindowDimensionsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        return o == 0
+
+    # StablehloReduceWindowOptions
+    def WindowStrides(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloReduceWindowOptions
+    def WindowStridesAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloReduceWindowOptions
+    def WindowStridesLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloReduceWindowOptions
+    def WindowStridesIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        return o == 0
+
+    # StablehloReduceWindowOptions
+    def BaseDilations(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloReduceWindowOptions
+    def BaseDilationsAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloReduceWindowOptions
+    def BaseDilationsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloReduceWindowOptions
+    def BaseDilationsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        return o == 0
+
+    # StablehloReduceWindowOptions
+    def WindowDilations(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloReduceWindowOptions
+    def WindowDilationsAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloReduceWindowOptions
+    def WindowDilationsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloReduceWindowOptions
+    def WindowDilationsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        return o == 0
+
+    # StablehloReduceWindowOptions
+    def Padding(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloReduceWindowOptions
+    def PaddingAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloReduceWindowOptions
+    def PaddingLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloReduceWindowOptions
+    def PaddingIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        return o == 0
+
+    # StablehloReduceWindowOptions
+    def BodySubgraphIndex(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+def StablehloReduceWindowOptionsStart(builder):
+    builder.StartObject(6)
+
+def StablehloReduceWindowOptionsAddWindowDimensions(builder, windowDimensions):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(windowDimensions), 0)
+
+def StablehloReduceWindowOptionsStartWindowDimensionsVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloReduceWindowOptionsAddWindowStrides(builder, windowStrides):
+    builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(windowStrides), 0)
+
+def StablehloReduceWindowOptionsStartWindowStridesVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloReduceWindowOptionsAddBaseDilations(builder, baseDilations):
+    builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(baseDilations), 0)
+
+def StablehloReduceWindowOptionsStartBaseDilationsVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloReduceWindowOptionsAddWindowDilations(builder, windowDilations):
+    builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(windowDilations), 0)
+
+def StablehloReduceWindowOptionsStartWindowDilationsVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloReduceWindowOptionsAddPadding(builder, padding):
+    builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(padding), 0)
+
+def StablehloReduceWindowOptionsStartPaddingVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloReduceWindowOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex):
+    builder.PrependInt32Slot(5, bodySubgraphIndex, 0)
+
+def StablehloReduceWindowOptionsEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class StablehloReduceWindowOptionsT(object):
+
+    # StablehloReduceWindowOptionsT
+    def __init__(self):
+        self.windowDimensions = None  # type: List[int]
+        self.windowStrides = None  # type: List[int]
+        self.baseDilations = None  # type: List[int]
+        self.windowDilations = None  # type: List[int]
+        self.padding = None  # type: List[int]
+        self.bodySubgraphIndex = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        stablehloReduceWindowOptions = StablehloReduceWindowOptions()
+        stablehloReduceWindowOptions.Init(buf, pos)
+        return cls.InitFromObj(stablehloReduceWindowOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, stablehloReduceWindowOptions):
+        x = StablehloReduceWindowOptionsT()
+        x._UnPack(stablehloReduceWindowOptions)
+        return x
+
+    # StablehloReduceWindowOptionsT
+    def _UnPack(self, stablehloReduceWindowOptions):
+        if stablehloReduceWindowOptions is None:
+            return
+        if not stablehloReduceWindowOptions.WindowDimensionsIsNone():
+            if np is None:
+                self.windowDimensions = []
+                for i in range(stablehloReduceWindowOptions.WindowDimensionsLength()):
+                    self.windowDimensions.append(stablehloReduceWindowOptions.WindowDimensions(i))
+            else:
+                self.windowDimensions = stablehloReduceWindowOptions.WindowDimensionsAsNumpy()
+        if not stablehloReduceWindowOptions.WindowStridesIsNone():
+            if np is None:
+                self.windowStrides = []
+                for i in range(stablehloReduceWindowOptions.WindowStridesLength()):
+                    self.windowStrides.append(stablehloReduceWindowOptions.WindowStrides(i))
+            else:
+                self.windowStrides = stablehloReduceWindowOptions.WindowStridesAsNumpy()
+        if not stablehloReduceWindowOptions.BaseDilationsIsNone():
+            if np is None:
+                self.baseDilations = []
+                for i in range(stablehloReduceWindowOptions.BaseDilationsLength()):
+                    self.baseDilations.append(stablehloReduceWindowOptions.BaseDilations(i))
+            else:
+                self.baseDilations = stablehloReduceWindowOptions.BaseDilationsAsNumpy()
+        if not stablehloReduceWindowOptions.WindowDilationsIsNone():
+            if np is None:
+                self.windowDilations = []
+                for i in range(stablehloReduceWindowOptions.WindowDilationsLength()):
+                    self.windowDilations.append(stablehloReduceWindowOptions.WindowDilations(i))
+            else:
+                self.windowDilations = stablehloReduceWindowOptions.WindowDilationsAsNumpy()
+        if not stablehloReduceWindowOptions.PaddingIsNone():
+            if np is None:
+                self.padding = []
+                for i in range(stablehloReduceWindowOptions.PaddingLength()):
+                    self.padding.append(stablehloReduceWindowOptions.Padding(i))
+            else:
+                self.padding = stablehloReduceWindowOptions.PaddingAsNumpy()
+        self.bodySubgraphIndex = stablehloReduceWindowOptions.BodySubgraphIndex()
+
+    # StablehloReduceWindowOptionsT
+    def Pack(self, builder):
+        if self.windowDimensions is not None:
+            if np is not None and type(self.windowDimensions) is np.ndarray:
+                windowDimensions = builder.CreateNumpyVector(self.windowDimensions)
+            else:
+                StablehloReduceWindowOptionsStartWindowDimensionsVector(builder, len(self.windowDimensions))
+                for i in reversed(range(len(self.windowDimensions))):
+                    builder.PrependInt64(self.windowDimensions[i])
+                windowDimensions = builder.EndVector()
+        if self.windowStrides is not None:
+            if np is not None and type(self.windowStrides) is np.ndarray:
+                windowStrides = builder.CreateNumpyVector(self.windowStrides)
+            else:
+                StablehloReduceWindowOptionsStartWindowStridesVector(builder, len(self.windowStrides))
+                for i in reversed(range(len(self.windowStrides))):
+                    builder.PrependInt64(self.windowStrides[i])
+                windowStrides = builder.EndVector()
+        if self.baseDilations is not None:
+            if np is not None and type(self.baseDilations) is np.ndarray:
+                baseDilations = builder.CreateNumpyVector(self.baseDilations)
+            else:
+                StablehloReduceWindowOptionsStartBaseDilationsVector(builder, len(self.baseDilations))
+                for i in reversed(range(len(self.baseDilations))):
+                    builder.PrependInt64(self.baseDilations[i])
+                baseDilations = builder.EndVector()
+        if self.windowDilations is not None:
+            if np is not None and type(self.windowDilations) is np.ndarray:
+                windowDilations = builder.CreateNumpyVector(self.windowDilations)
+            else:
+                StablehloReduceWindowOptionsStartWindowDilationsVector(builder, len(self.windowDilations))
+                for i in reversed(range(len(self.windowDilations))):
+                    builder.PrependInt64(self.windowDilations[i])
+                windowDilations = builder.EndVector()
+        if self.padding is not None:
+            if np is not None and type(self.padding) is np.ndarray:
+                padding = builder.CreateNumpyVector(self.padding)
+            else:
+                StablehloReduceWindowOptionsStartPaddingVector(builder, len(self.padding))
+                for i in reversed(range(len(self.padding))):
+                    builder.PrependInt64(self.padding[i])
+                padding = builder.EndVector()
+        StablehloReduceWindowOptionsStart(builder)
+        if self.windowDimensions is not None:
+            StablehloReduceWindowOptionsAddWindowDimensions(builder, windowDimensions)
+        if self.windowStrides is not None:
+            StablehloReduceWindowOptionsAddWindowStrides(builder, windowStrides)
+        if self.baseDilations is not None:
+            StablehloReduceWindowOptionsAddBaseDilations(builder, baseDilations)
+        if self.windowDilations is not None:
+            StablehloReduceWindowOptionsAddWindowDilations(builder, windowDilations)
+        if self.padding is not None:
+            StablehloReduceWindowOptionsAddPadding(builder, padding)
+        StablehloReduceWindowOptionsAddBodySubgraphIndex(builder, self.bodySubgraphIndex)
+        stablehloReduceWindowOptions = StablehloReduceWindowOptionsEnd(builder)
+        return stablehloReduceWindowOptions
+
+
+class StablehloWhileOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = StablehloWhileOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsStablehloWhileOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def StablehloWhileOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # StablehloWhileOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # StablehloWhileOptions
+    def CondSubgraphIndex(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # StablehloWhileOptions
+    def BodySubgraphIndex(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+def StablehloWhileOptionsStart(builder):
+    builder.StartObject(2)
+
+def StablehloWhileOptionsAddCondSubgraphIndex(builder, condSubgraphIndex):
+    builder.PrependInt32Slot(0, condSubgraphIndex, 0)
+
+def StablehloWhileOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex):
+    builder.PrependInt32Slot(1, bodySubgraphIndex, 0)
+
+def StablehloWhileOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class StablehloWhileOptionsT(object):
+
+    # StablehloWhileOptionsT
+    def __init__(self):
+        self.condSubgraphIndex = 0  # type: int
+        self.bodySubgraphIndex = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        stablehloWhileOptions = StablehloWhileOptions()
+        stablehloWhileOptions.Init(buf, pos)
+        return cls.InitFromObj(stablehloWhileOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, stablehloWhileOptions):
+        x = StablehloWhileOptionsT()
+        x._UnPack(stablehloWhileOptions)
+        return x
+
+    # StablehloWhileOptionsT
+    def _UnPack(self, stablehloWhileOptions):
+        if stablehloWhileOptions is None:
+            return
+        self.condSubgraphIndex = stablehloWhileOptions.CondSubgraphIndex()
+        self.bodySubgraphIndex = stablehloWhileOptions.BodySubgraphIndex()
+
+    # StablehloWhileOptionsT
+    def Pack(self, builder):
+        StablehloWhileOptionsStart(builder)
+        StablehloWhileOptionsAddCondSubgraphIndex(builder, self.condSubgraphIndex)
+        StablehloWhileOptionsAddBodySubgraphIndex(builder, self.bodySubgraphIndex)
+        stablehloWhileOptions = StablehloWhileOptionsEnd(builder)
+        return stablehloWhileOptions
+
+
+class StablehloSortOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = StablehloSortOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsStablehloSortOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def StablehloSortOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # StablehloSortOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # StablehloSortOptions
+    def Dimension(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
+        return 0
+
+    # StablehloSortOptions
+    def IsStable(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+    # StablehloSortOptions
+    def ComparatorSubgraphIndex(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+def StablehloSortOptionsStart(builder):
+    builder.StartObject(3)
+
+def StablehloSortOptionsAddDimension(builder, dimension):
+    builder.PrependInt64Slot(0, dimension, 0)
+
+def StablehloSortOptionsAddIsStable(builder, isStable):
+    builder.PrependBoolSlot(1, isStable, 0)
+
+def StablehloSortOptionsAddComparatorSubgraphIndex(builder, comparatorSubgraphIndex):
+    builder.PrependInt32Slot(2, comparatorSubgraphIndex, 0)
+
+def StablehloSortOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class StablehloSortOptionsT(object):
+
+    # StablehloSortOptionsT
+    def __init__(self):
+        self.dimension = 0  # type: int
+        self.isStable = False  # type: bool
+        self.comparatorSubgraphIndex = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        stablehloSortOptions = StablehloSortOptions()
+        stablehloSortOptions.Init(buf, pos)
+        return cls.InitFromObj(stablehloSortOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, stablehloSortOptions):
+        x = StablehloSortOptionsT()
+        x._UnPack(stablehloSortOptions)
+        return x
+
+    # StablehloSortOptionsT
+    def _UnPack(self, stablehloSortOptions):
+        if stablehloSortOptions is None:
+            return
+        self.dimension = stablehloSortOptions.Dimension()
+        self.isStable = stablehloSortOptions.IsStable()
+        self.comparatorSubgraphIndex = stablehloSortOptions.ComparatorSubgraphIndex()
+
+    # StablehloSortOptionsT
+    def Pack(self, builder):
+        StablehloSortOptionsStart(builder)
+        StablehloSortOptionsAddDimension(builder, self.dimension)
+        StablehloSortOptionsAddIsStable(builder, self.isStable)
+        StablehloSortOptionsAddComparatorSubgraphIndex(builder, self.comparatorSubgraphIndex)
+        stablehloSortOptions = StablehloSortOptionsEnd(builder)
+        return stablehloSortOptions
+
+
+class StablehloConcatenateOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = StablehloConcatenateOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsStablehloConcatenateOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def StablehloConcatenateOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # StablehloConcatenateOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # StablehloConcatenateOptions
+    def Dimension(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
+        return 0
+
+def StablehloConcatenateOptionsStart(builder):
+    builder.StartObject(1)
+
+def StablehloConcatenateOptionsAddDimension(builder, dimension):
+    builder.PrependInt64Slot(0, dimension, 0)
+
+def StablehloConcatenateOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class StablehloConcatenateOptionsT(object):
+
+    # StablehloConcatenateOptionsT
+    def __init__(self):
+        self.dimension = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        stablehloConcatenateOptions = StablehloConcatenateOptions()
+        stablehloConcatenateOptions.Init(buf, pos)
+        return cls.InitFromObj(stablehloConcatenateOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, stablehloConcatenateOptions):
+        x = StablehloConcatenateOptionsT()
+        x._UnPack(stablehloConcatenateOptions)
+        return x
+
+    # StablehloConcatenateOptionsT
+    def _UnPack(self, stablehloConcatenateOptions):
+        if stablehloConcatenateOptions is None:
+            return
+        self.dimension = stablehloConcatenateOptions.Dimension()
+
+    # StablehloConcatenateOptionsT
+    def Pack(self, builder):
+        StablehloConcatenateOptionsStart(builder)
+        StablehloConcatenateOptionsAddDimension(builder, self.dimension)
+        stablehloConcatenateOptions = StablehloConcatenateOptionsEnd(builder)
+        return stablehloConcatenateOptions
+
+
+class StablehloBroadcastInDimOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = StablehloBroadcastInDimOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsStablehloBroadcastInDimOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def StablehloBroadcastInDimOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # StablehloBroadcastInDimOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # StablehloBroadcastInDimOptions
+    def BroadcastDimensions(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloBroadcastInDimOptions
+    def BroadcastDimensionsAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloBroadcastInDimOptions
+    def BroadcastDimensionsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloBroadcastInDimOptions
+    def BroadcastDimensionsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        return o == 0
+
+def StablehloBroadcastInDimOptionsStart(builder):
+    builder.StartObject(1)
+
+def StablehloBroadcastInDimOptionsAddBroadcastDimensions(builder, broadcastDimensions):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(broadcastDimensions), 0)
+
+def StablehloBroadcastInDimOptionsStartBroadcastDimensionsVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloBroadcastInDimOptionsEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class StablehloBroadcastInDimOptionsT(object):
+
+    # StablehloBroadcastInDimOptionsT
+    def __init__(self):
+        self.broadcastDimensions = None  # type: List[int]
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        stablehloBroadcastInDimOptions = StablehloBroadcastInDimOptions()
+        stablehloBroadcastInDimOptions.Init(buf, pos)
+        return cls.InitFromObj(stablehloBroadcastInDimOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, stablehloBroadcastInDimOptions):
+        x = StablehloBroadcastInDimOptionsT()
+        x._UnPack(stablehloBroadcastInDimOptions)
+        return x
+
+    # StablehloBroadcastInDimOptionsT
+    def _UnPack(self, stablehloBroadcastInDimOptions):
+        if stablehloBroadcastInDimOptions is None:
+            return
+        if not stablehloBroadcastInDimOptions.BroadcastDimensionsIsNone():
+            if np is None:
+                self.broadcastDimensions = []
+                for i in range(stablehloBroadcastInDimOptions.BroadcastDimensionsLength()):
+                    self.broadcastDimensions.append(stablehloBroadcastInDimOptions.BroadcastDimensions(i))
+            else:
+                self.broadcastDimensions = stablehloBroadcastInDimOptions.BroadcastDimensionsAsNumpy()
+
+    # StablehloBroadcastInDimOptionsT
+    def Pack(self, builder):
+        if self.broadcastDimensions is not None:
+            if np is not None and type(self.broadcastDimensions) is np.ndarray:
+                broadcastDimensions = builder.CreateNumpyVector(self.broadcastDimensions)
+            else:
+                StablehloBroadcastInDimOptionsStartBroadcastDimensionsVector(builder, len(self.broadcastDimensions))
+                for i in reversed(range(len(self.broadcastDimensions))):
+                    builder.PrependInt64(self.broadcastDimensions[i])
+                broadcastDimensions = builder.EndVector()
+        StablehloBroadcastInDimOptionsStart(builder)
+        if self.broadcastDimensions is not None:
+            StablehloBroadcastInDimOptionsAddBroadcastDimensions(builder, broadcastDimensions)
+        stablehloBroadcastInDimOptions = StablehloBroadcastInDimOptionsEnd(builder)
+        return stablehloBroadcastInDimOptions
+
+
+class StablehloCompareOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = StablehloCompareOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsStablehloCompareOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def StablehloCompareOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # StablehloCompareOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # StablehloCompareOptions
+    def ComparisonDirection(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
+        return 0
+
+    # StablehloCompareOptions
+    def CompareType(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
+        return 0
+
+def StablehloCompareOptionsStart(builder):
+    builder.StartObject(2)
+
+def StablehloCompareOptionsAddComparisonDirection(builder, comparisonDirection):
+    builder.PrependUint32Slot(0, comparisonDirection, 0)
+
+def StablehloCompareOptionsAddCompareType(builder, compareType):
+    builder.PrependUint32Slot(1, compareType, 0)
+
+def StablehloCompareOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class StablehloCompareOptionsT(object):
+
+    # StablehloCompareOptionsT
+    def __init__(self):
+        self.comparisonDirection = 0  # type: int
+        self.compareType = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        stablehloCompareOptions = StablehloCompareOptions()
+        stablehloCompareOptions.Init(buf, pos)
+        return cls.InitFromObj(stablehloCompareOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, stablehloCompareOptions):
+        x = StablehloCompareOptionsT()
+        x._UnPack(stablehloCompareOptions)
+        return x
+
+    # StablehloCompareOptionsT
+    def _UnPack(self, stablehloCompareOptions):
+        if stablehloCompareOptions is None:
+            return
+        self.comparisonDirection = stablehloCompareOptions.ComparisonDirection()
+        self.compareType = stablehloCompareOptions.CompareType()
+
+    # StablehloCompareOptionsT
+    def Pack(self, builder):
+        StablehloCompareOptionsStart(builder)
+        StablehloCompareOptionsAddComparisonDirection(builder, self.comparisonDirection)
+        StablehloCompareOptionsAddCompareType(builder, self.compareType)
+        stablehloCompareOptions = StablehloCompareOptionsEnd(builder)
+        return stablehloCompareOptions
+
+
+class StablehloDynamicSliceOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = StablehloDynamicSliceOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsStablehloDynamicSliceOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def StablehloDynamicSliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # StablehloDynamicSliceOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # StablehloDynamicSliceOptions
+    def SliceSizes(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloDynamicSliceOptions
+    def SliceSizesAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloDynamicSliceOptions
+    def SliceSizesLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloDynamicSliceOptions
+    def SliceSizesIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        return o == 0
+
+def StablehloDynamicSliceOptionsStart(builder):
+    builder.StartObject(1)
+
+def StablehloDynamicSliceOptionsAddSliceSizes(builder, sliceSizes):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(sliceSizes), 0)
+
+def StablehloDynamicSliceOptionsStartSliceSizesVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloDynamicSliceOptionsEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class StablehloDynamicSliceOptionsT(object):
+
+    # StablehloDynamicSliceOptionsT
+    def __init__(self):
+        self.sliceSizes = None  # type: List[int]
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        stablehloDynamicSliceOptions = StablehloDynamicSliceOptions()
+        stablehloDynamicSliceOptions.Init(buf, pos)
+        return cls.InitFromObj(stablehloDynamicSliceOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, stablehloDynamicSliceOptions):
+        x = StablehloDynamicSliceOptionsT()
+        x._UnPack(stablehloDynamicSliceOptions)
+        return x
+
+    # StablehloDynamicSliceOptionsT
+    def _UnPack(self, stablehloDynamicSliceOptions):
+        if stablehloDynamicSliceOptions is None:
+            return
+        if not stablehloDynamicSliceOptions.SliceSizesIsNone():
+            if np is None:
+                self.sliceSizes = []
+                for i in range(stablehloDynamicSliceOptions.SliceSizesLength()):
+                    self.sliceSizes.append(stablehloDynamicSliceOptions.SliceSizes(i))
+            else:
+                self.sliceSizes = stablehloDynamicSliceOptions.SliceSizesAsNumpy()
+
+    # StablehloDynamicSliceOptionsT
+    def Pack(self, builder):
+        if self.sliceSizes is not None:
+            if np is not None and type(self.sliceSizes) is np.ndarray:
+                sliceSizes = builder.CreateNumpyVector(self.sliceSizes)
+            else:
+                StablehloDynamicSliceOptionsStartSliceSizesVector(builder, len(self.sliceSizes))
+                for i in reversed(range(len(self.sliceSizes))):
+                    builder.PrependInt64(self.sliceSizes[i])
+                sliceSizes = builder.EndVector()
+        StablehloDynamicSliceOptionsStart(builder)
+        if self.sliceSizes is not None:
+            StablehloDynamicSliceOptionsAddSliceSizes(builder, sliceSizes)
+        stablehloDynamicSliceOptions = StablehloDynamicSliceOptionsEnd(builder)
+        return stablehloDynamicSliceOptions
+
+
+class StablehloPadOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = StablehloPadOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsStablehloPadOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def StablehloPadOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # StablehloPadOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # StablehloPadOptions
+    def EdgePaddingLow(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloPadOptions
+    def EdgePaddingLowAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloPadOptions
+    def EdgePaddingLowLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloPadOptions
+    def EdgePaddingLowIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        return o == 0
+
+    # StablehloPadOptions
+    def EdgePaddingHigh(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloPadOptions
+    def EdgePaddingHighAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloPadOptions
+    def EdgePaddingHighLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloPadOptions
+    def EdgePaddingHighIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        return o == 0
+
+    # StablehloPadOptions
+    def InteriorPadding(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloPadOptions
+    def InteriorPaddingAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloPadOptions
+    def InteriorPaddingLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloPadOptions
+    def InteriorPaddingIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        return o == 0
+
+def StablehloPadOptionsStart(builder):
+    builder.StartObject(3)
+
+def StablehloPadOptionsAddEdgePaddingLow(builder, edgePaddingLow):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(edgePaddingLow), 0)
+
+def StablehloPadOptionsStartEdgePaddingLowVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloPadOptionsAddEdgePaddingHigh(builder, edgePaddingHigh):
+    builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(edgePaddingHigh), 0)
+
+def StablehloPadOptionsStartEdgePaddingHighVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloPadOptionsAddInteriorPadding(builder, interiorPadding):
+    builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(interiorPadding), 0)
+
+def StablehloPadOptionsStartInteriorPaddingVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloPadOptionsEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class StablehloPadOptionsT(object):
+
+    # StablehloPadOptionsT
+    def __init__(self):
+        self.edgePaddingLow = None  # type: List[int]
+        self.edgePaddingHigh = None  # type: List[int]
+        self.interiorPadding = None  # type: List[int]
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        stablehloPadOptions = StablehloPadOptions()
+        stablehloPadOptions.Init(buf, pos)
+        return cls.InitFromObj(stablehloPadOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, stablehloPadOptions):
+        x = StablehloPadOptionsT()
+        x._UnPack(stablehloPadOptions)
+        return x
+
+    # StablehloPadOptionsT
+    def _UnPack(self, stablehloPadOptions):
+        if stablehloPadOptions is None:
+            return
+        if not stablehloPadOptions.EdgePaddingLowIsNone():
+            if np is None:
+                self.edgePaddingLow = []
+                for i in range(stablehloPadOptions.EdgePaddingLowLength()):
+                    self.edgePaddingLow.append(stablehloPadOptions.EdgePaddingLow(i))
+            else:
+                self.edgePaddingLow = stablehloPadOptions.EdgePaddingLowAsNumpy()
+        if not stablehloPadOptions.EdgePaddingHighIsNone():
+            if np is None:
+                self.edgePaddingHigh = []
+                for i in range(stablehloPadOptions.EdgePaddingHighLength()):
+                    self.edgePaddingHigh.append(stablehloPadOptions.EdgePaddingHigh(i))
+            else:
+                self.edgePaddingHigh = stablehloPadOptions.EdgePaddingHighAsNumpy()
+        if not stablehloPadOptions.InteriorPaddingIsNone():
+            if np is None:
+                self.interiorPadding = []
+                for i in range(stablehloPadOptions.InteriorPaddingLength()):
+                    self.interiorPadding.append(stablehloPadOptions.InteriorPadding(i))
+            else:
+                self.interiorPadding = stablehloPadOptions.InteriorPaddingAsNumpy()
+
+    # StablehloPadOptionsT
+    def Pack(self, builder):
+        if self.edgePaddingLow is not None:
+            if np is not None and type(self.edgePaddingLow) is np.ndarray:
+                edgePaddingLow = builder.CreateNumpyVector(self.edgePaddingLow)
+            else:
+                StablehloPadOptionsStartEdgePaddingLowVector(builder, len(self.edgePaddingLow))
+                for i in reversed(range(len(self.edgePaddingLow))):
+                    builder.PrependInt64(self.edgePaddingLow[i])
+                edgePaddingLow = builder.EndVector()
+        if self.edgePaddingHigh is not None:
+            if np is not None and type(self.edgePaddingHigh) is np.ndarray:
+                edgePaddingHigh = builder.CreateNumpyVector(self.edgePaddingHigh)
+            else:
+                StablehloPadOptionsStartEdgePaddingHighVector(builder, len(self.edgePaddingHigh))
+                for i in reversed(range(len(self.edgePaddingHigh))):
+                    builder.PrependInt64(self.edgePaddingHigh[i])
+                edgePaddingHigh = builder.EndVector()
+        if self.interiorPadding is not None:
+            if np is not None and type(self.interiorPadding) is np.ndarray:
+                interiorPadding = builder.CreateNumpyVector(self.interiorPadding)
+            else:
+                StablehloPadOptionsStartInteriorPaddingVector(builder, len(self.interiorPadding))
+                for i in reversed(range(len(self.interiorPadding))):
+                    builder.PrependInt64(self.interiorPadding[i])
+                interiorPadding = builder.EndVector()
+        StablehloPadOptionsStart(builder)
+        if self.edgePaddingLow is not None:
+            StablehloPadOptionsAddEdgePaddingLow(builder, edgePaddingLow)
+        if self.edgePaddingHigh is not None:
+            StablehloPadOptionsAddEdgePaddingHigh(builder, edgePaddingHigh)
+        if self.interiorPadding is not None:
+            StablehloPadOptionsAddInteriorPadding(builder, interiorPadding)
+        stablehloPadOptions = StablehloPadOptionsEnd(builder)
+        return stablehloPadOptions
+
+
+class StablehloIotaOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = StablehloIotaOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsStablehloIotaOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def StablehloIotaOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # StablehloIotaOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # StablehloIotaOptions
+    def IotaDimension(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
+        return 0
+
+def StablehloIotaOptionsStart(builder):
+    builder.StartObject(1)
+
+def StablehloIotaOptionsAddIotaDimension(builder, iotaDimension):
+    builder.PrependInt64Slot(0, iotaDimension, 0)
+
+def StablehloIotaOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class StablehloIotaOptionsT(object):
+
+    # StablehloIotaOptionsT
+    def __init__(self):
+        self.iotaDimension = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        stablehloIotaOptions = StablehloIotaOptions()
+        stablehloIotaOptions.Init(buf, pos)
+        return cls.InitFromObj(stablehloIotaOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, stablehloIotaOptions):
+        x = StablehloIotaOptionsT()
+        x._UnPack(stablehloIotaOptions)
+        return x
+
+    # StablehloIotaOptionsT
+    def _UnPack(self, stablehloIotaOptions):
+        if stablehloIotaOptions is None:
+            return
+        self.iotaDimension = stablehloIotaOptions.IotaDimension()
+
+    # StablehloIotaOptionsT
+    def Pack(self, builder):
+        StablehloIotaOptionsStart(builder)
+        StablehloIotaOptionsAddIotaDimension(builder, self.iotaDimension)
+        stablehloIotaOptions = StablehloIotaOptionsEnd(builder)
+        return stablehloIotaOptions
+
+
+class StablehloCustomCallOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = StablehloCustomCallOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsStablehloCustomCallOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def StablehloCustomCallOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # StablehloCustomCallOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # StablehloCustomCallOptions
+    def CallTargetName(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.String(o + self._tab.Pos)
+        return None
+
+    # StablehloCustomCallOptions
+    def HasSideEffect(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+    # StablehloCustomCallOptions
+    def BackendConfig(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.String(o + self._tab.Pos)
+        return None
+
+    # StablehloCustomCallOptions
+    def ApiVersion(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # StablehloCustomCallOptions
+    def CalledComputations(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+        return 0
+
+    # StablehloCustomCallOptions
+    def CalledComputationsAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+        return 0
+
+    # StablehloCustomCallOptions
+    def CalledComputationsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloCustomCallOptions
+    def CalledComputationsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        return o == 0
+
+    # StablehloCustomCallOptions
+    def CustomAttributes(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
+        return 0
+
+    # StablehloCustomCallOptions
+    def CustomAttributesAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
+        return 0
+
+    # StablehloCustomCallOptions
+    def CustomAttributesLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloCustomCallOptions
+    def CustomAttributesIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+        return o == 0
+
+def StablehloCustomCallOptionsStart(builder):
+    builder.StartObject(6)
+
+def StablehloCustomCallOptionsAddCallTargetName(builder, callTargetName):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(callTargetName), 0)
+
+def StablehloCustomCallOptionsAddHasSideEffect(builder, hasSideEffect):
+    builder.PrependBoolSlot(1, hasSideEffect, 0)
+
+def StablehloCustomCallOptionsAddBackendConfig(builder, backendConfig):
+    builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(backendConfig), 0)
+
+def StablehloCustomCallOptionsAddApiVersion(builder, apiVersion):
+    builder.PrependInt32Slot(3, apiVersion, 0)
+
+def StablehloCustomCallOptionsAddCalledComputations(builder, calledComputations):
+    builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(calledComputations), 0)
+
+def StablehloCustomCallOptionsStartCalledComputationsVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def StablehloCustomCallOptionsAddCustomAttributes(builder, customAttributes):
+    builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(customAttributes), 0)
+
+def StablehloCustomCallOptionsStartCustomAttributesVector(builder, numElems):
+    return builder.StartVector(1, numElems, 1)
+
+def StablehloCustomCallOptionsEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class StablehloCustomCallOptionsT(object):
+
+    # StablehloCustomCallOptionsT
+    def __init__(self):
+        self.callTargetName = None  # type: str
+        self.hasSideEffect = False  # type: bool
+        self.backendConfig = None  # type: str
+        self.apiVersion = 0  # type: int
+        self.calledComputations = None  # type: List[int]
+        self.customAttributes = None  # type: List[int]
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        stablehloCustomCallOptions = StablehloCustomCallOptions()
+        stablehloCustomCallOptions.Init(buf, pos)
+        return cls.InitFromObj(stablehloCustomCallOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, stablehloCustomCallOptions):
+        x = StablehloCustomCallOptionsT()
+        x._UnPack(stablehloCustomCallOptions)
+        return x
+
+    # StablehloCustomCallOptionsT
+    def _UnPack(self, stablehloCustomCallOptions):
+        if stablehloCustomCallOptions is None:
+            return
+        self.callTargetName = stablehloCustomCallOptions.CallTargetName()
+        self.hasSideEffect = stablehloCustomCallOptions.HasSideEffect()
+        self.backendConfig = stablehloCustomCallOptions.BackendConfig()
+        self.apiVersion = stablehloCustomCallOptions.ApiVersion()
+        if not stablehloCustomCallOptions.CalledComputationsIsNone():
+            if np is None:
+                self.calledComputations = []
+                for i in range(stablehloCustomCallOptions.CalledComputationsLength()):
+                    self.calledComputations.append(stablehloCustomCallOptions.CalledComputations(i))
+            else:
+                self.calledComputations = stablehloCustomCallOptions.CalledComputationsAsNumpy()
+        if not stablehloCustomCallOptions.CustomAttributesIsNone():
+            if np is None:
+                self.customAttributes = []
+                for i in range(stablehloCustomCallOptions.CustomAttributesLength()):
+                    self.customAttributes.append(stablehloCustomCallOptions.CustomAttributes(i))
+            else:
+                self.customAttributes = stablehloCustomCallOptions.CustomAttributesAsNumpy()
+
+    # StablehloCustomCallOptionsT
+    def Pack(self, builder):
+        if self.callTargetName is not None:
+            callTargetName = builder.CreateString(self.callTargetName)
+        if self.backendConfig is not None:
+            backendConfig = builder.CreateString(self.backendConfig)
+        if self.calledComputations is not None:
+            if np is not None and type(self.calledComputations) is np.ndarray:
+                calledComputations = builder.CreateNumpyVector(self.calledComputations)
+            else:
+                StablehloCustomCallOptionsStartCalledComputationsVector(builder, len(self.calledComputations))
+                for i in reversed(range(len(self.calledComputations))):
+                    builder.PrependInt32(self.calledComputations[i])
+                calledComputations = builder.EndVector()
+        if self.customAttributes is not None:
+            if np is not None and type(self.customAttributes) is np.ndarray:
+                customAttributes = builder.CreateNumpyVector(self.customAttributes)
+            else:
+                StablehloCustomCallOptionsStartCustomAttributesVector(builder, len(self.customAttributes))
+                for i in reversed(range(len(self.customAttributes))):
+                    builder.PrependUint8(self.customAttributes[i])
+                customAttributes = builder.EndVector()
+        StablehloCustomCallOptionsStart(builder)
+        if self.callTargetName is not None:
+            StablehloCustomCallOptionsAddCallTargetName(builder, callTargetName)
+        StablehloCustomCallOptionsAddHasSideEffect(builder, self.hasSideEffect)
+        if self.backendConfig is not None:
+            StablehloCustomCallOptionsAddBackendConfig(builder, backendConfig)
+        StablehloCustomCallOptionsAddApiVersion(builder, self.apiVersion)
+        if self.calledComputations is not None:
+            StablehloCustomCallOptionsAddCalledComputations(builder, calledComputations)
+        if self.customAttributes is not None:
+            StablehloCustomCallOptionsAddCustomAttributes(builder, customAttributes)
+        stablehloCustomCallOptions = StablehloCustomCallOptionsEnd(builder)
+        return stablehloCustomCallOptions
+
+
+class StablehloReduceOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = StablehloReduceOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsStablehloReduceOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def StablehloReduceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # StablehloReduceOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # StablehloReduceOptions
+    def Dimensions(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloReduceOptions
+    def DimensionsAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloReduceOptions
+    def DimensionsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloReduceOptions
+    def DimensionsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        return o == 0
+
+    # StablehloReduceOptions
+    def BodySubgraphIndex(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+def StablehloReduceOptionsStart(builder):
+    builder.StartObject(2)
+
+def StablehloReduceOptionsAddDimensions(builder, dimensions):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(dimensions), 0)
+
+def StablehloReduceOptionsStartDimensionsVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloReduceOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex):
+    builder.PrependInt32Slot(1, bodySubgraphIndex, 0)
+
+def StablehloReduceOptionsEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class StablehloReduceOptionsT(object):
+
+    # StablehloReduceOptionsT
+    def __init__(self):
+        self.dimensions = None  # type: List[int]
+        self.bodySubgraphIndex = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        stablehloReduceOptions = StablehloReduceOptions()
+        stablehloReduceOptions.Init(buf, pos)
+        return cls.InitFromObj(stablehloReduceOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, stablehloReduceOptions):
+        x = StablehloReduceOptionsT()
+        x._UnPack(stablehloReduceOptions)
+        return x
+
+    # StablehloReduceOptionsT
+    def _UnPack(self, stablehloReduceOptions):
+        if stablehloReduceOptions is None:
+            return
+        if not stablehloReduceOptions.DimensionsIsNone():
+            if np is None:
+                self.dimensions = []
+                for i in range(stablehloReduceOptions.DimensionsLength()):
+                    self.dimensions.append(stablehloReduceOptions.Dimensions(i))
+            else:
+                self.dimensions = stablehloReduceOptions.DimensionsAsNumpy()
+        self.bodySubgraphIndex = stablehloReduceOptions.BodySubgraphIndex()
+
+    # StablehloReduceOptionsT
+    def Pack(self, builder):
+        if self.dimensions is not None:
+            if np is not None and type(self.dimensions) is np.ndarray:
+                dimensions = builder.CreateNumpyVector(self.dimensions)
+            else:
+                StablehloReduceOptionsStartDimensionsVector(builder, len(self.dimensions))
+                for i in reversed(range(len(self.dimensions))):
+                    builder.PrependInt64(self.dimensions[i])
+                dimensions = builder.EndVector()
+        StablehloReduceOptionsStart(builder)
+        if self.dimensions is not None:
+            StablehloReduceOptionsAddDimensions(builder, dimensions)
+        StablehloReduceOptionsAddBodySubgraphIndex(builder, self.bodySubgraphIndex)
+        stablehloReduceOptions = StablehloReduceOptionsEnd(builder)
+        return stablehloReduceOptions
+
+
+class StablehloSliceOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = StablehloSliceOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsStablehloSliceOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def StablehloSliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # StablehloSliceOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # StablehloSliceOptions
+    def StartIndices(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloSliceOptions
+    def StartIndicesAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloSliceOptions
+    def StartIndicesLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloSliceOptions
+    def StartIndicesIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        return o == 0
+
+    # StablehloSliceOptions
+    def LimitIndices(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloSliceOptions
+    def LimitIndicesAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloSliceOptions
+    def LimitIndicesLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloSliceOptions
+    def LimitIndicesIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        return o == 0
+
+    # StablehloSliceOptions
+    def Strides(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloSliceOptions
+    def StridesAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloSliceOptions
+    def StridesLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloSliceOptions
+    def StridesIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        return o == 0
+
+def StablehloSliceOptionsStart(builder):
+    builder.StartObject(3)
+
+def StablehloSliceOptionsAddStartIndices(builder, startIndices):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(startIndices), 0)
+
+def StablehloSliceOptionsStartStartIndicesVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloSliceOptionsAddLimitIndices(builder, limitIndices):
+    builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(limitIndices), 0)
+
+def StablehloSliceOptionsStartLimitIndicesVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloSliceOptionsAddStrides(builder, strides):
+    builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(strides), 0)
+
+def StablehloSliceOptionsStartStridesVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloSliceOptionsEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class StablehloSliceOptionsT(object):
+
+    # StablehloSliceOptionsT
+    def __init__(self):
+        self.startIndices = None  # type: List[int]
+        self.limitIndices = None  # type: List[int]
+        self.strides = None  # type: List[int]
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        stablehloSliceOptions = StablehloSliceOptions()
+        stablehloSliceOptions.Init(buf, pos)
+        return cls.InitFromObj(stablehloSliceOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, stablehloSliceOptions):
+        x = StablehloSliceOptionsT()
+        x._UnPack(stablehloSliceOptions)
+        return x
+
+    # StablehloSliceOptionsT
+    def _UnPack(self, stablehloSliceOptions):
+        if stablehloSliceOptions is None:
+            return
+        if not stablehloSliceOptions.StartIndicesIsNone():
+            if np is None:
+                self.startIndices = []
+                for i in range(stablehloSliceOptions.StartIndicesLength()):
+                    self.startIndices.append(stablehloSliceOptions.StartIndices(i))
+            else:
+                self.startIndices = stablehloSliceOptions.StartIndicesAsNumpy()
+        if not stablehloSliceOptions.LimitIndicesIsNone():
+            if np is None:
+                self.limitIndices = []
+                for i in range(stablehloSliceOptions.LimitIndicesLength()):
+                    self.limitIndices.append(stablehloSliceOptions.LimitIndices(i))
+            else:
+                self.limitIndices = stablehloSliceOptions.LimitIndicesAsNumpy()
+        if not stablehloSliceOptions.StridesIsNone():
+            if np is None:
+                self.strides = []
+                for i in range(stablehloSliceOptions.StridesLength()):
+                    self.strides.append(stablehloSliceOptions.Strides(i))
+            else:
+                self.strides = stablehloSliceOptions.StridesAsNumpy()
+
+    # StablehloSliceOptionsT
+    def Pack(self, builder):
+        if self.startIndices is not None:
+            if np is not None and type(self.startIndices) is np.ndarray:
+                startIndices = builder.CreateNumpyVector(self.startIndices)
+            else:
+                StablehloSliceOptionsStartStartIndicesVector(builder, len(self.startIndices))
+                for i in reversed(range(len(self.startIndices))):
+                    builder.PrependInt64(self.startIndices[i])
+                startIndices = builder.EndVector()
+        if self.limitIndices is not None:
+            if np is not None and type(self.limitIndices) is np.ndarray:
+                limitIndices = builder.CreateNumpyVector(self.limitIndices)
+            else:
+                StablehloSliceOptionsStartLimitIndicesVector(builder, len(self.limitIndices))
+                for i in reversed(range(len(self.limitIndices))):
+                    builder.PrependInt64(self.limitIndices[i])
+                limitIndices = builder.EndVector()
+        if self.strides is not None:
+            if np is not None and type(self.strides) is np.ndarray:
+                strides = builder.CreateNumpyVector(self.strides)
+            else:
+                StablehloSliceOptionsStartStridesVector(builder, len(self.strides))
+                for i in reversed(range(len(self.strides))):
+                    builder.PrependInt64(self.strides[i])
+                strides = builder.EndVector()
+        StablehloSliceOptionsStart(builder)
+        if self.startIndices is not None:
+            StablehloSliceOptionsAddStartIndices(builder, startIndices)
+        if self.limitIndices is not None:
+            StablehloSliceOptionsAddLimitIndices(builder, limitIndices)
+        if self.strides is not None:
+            StablehloSliceOptionsAddStrides(builder, strides)
+        stablehloSliceOptions = StablehloSliceOptionsEnd(builder)
+        return stablehloSliceOptions
+
+
+class StablehloConvolutionOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = StablehloConvolutionOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsStablehloConvolutionOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def StablehloConvolutionOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # StablehloConvolutionOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # StablehloConvolutionOptions
+    def WindowStrides(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloConvolutionOptions
+    def WindowStridesAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloConvolutionOptions
+    def WindowStridesLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloConvolutionOptions
+    def WindowStridesIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        return o == 0
+
+    # StablehloConvolutionOptions
+    def Padding(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloConvolutionOptions
+    def PaddingAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloConvolutionOptions
+    def PaddingLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloConvolutionOptions
+    def PaddingIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        return o == 0
+
+    # StablehloConvolutionOptions
+    def LhsDilation(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloConvolutionOptions
+    def LhsDilationAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloConvolutionOptions
+    def LhsDilationLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloConvolutionOptions
+    def LhsDilationIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        return o == 0
+
+    # StablehloConvolutionOptions
+    def RhsDilation(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloConvolutionOptions
+    def RhsDilationAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloConvolutionOptions
+    def RhsDilationLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloConvolutionOptions
+    def RhsDilationIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        return o == 0
+
+    # StablehloConvolutionOptions
+    def WindowReversal(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.BoolFlags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
+        return 0
+
+    # StablehloConvolutionOptions
+    def WindowReversalAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.BoolFlags, o)
+        return 0
+
+    # StablehloConvolutionOptions
+    def WindowReversalLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloConvolutionOptions
+    def WindowReversalIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        return o == 0
+
+    # StablehloConvolutionOptions
+    def InputBatchDimension(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
+        return 0
+
+    # StablehloConvolutionOptions
+    def InputFeatureDimension(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
+        return 0
+
+    # StablehloConvolutionOptions
+    def InputSpatialDimensions(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloConvolutionOptions
+    def InputSpatialDimensionsAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloConvolutionOptions
+    def InputSpatialDimensionsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloConvolutionOptions
+    def InputSpatialDimensionsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
+        return o == 0
+
+    # StablehloConvolutionOptions
+    def KernelInputFeatureDimension(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
+        return 0
+
+    # StablehloConvolutionOptions
+    def KernelOutputFeatureDimension(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
+        return 0
+
+    # StablehloConvolutionOptions
+    def KernelSpatialDimensions(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloConvolutionOptions
+    def KernelSpatialDimensionsAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloConvolutionOptions
+    def KernelSpatialDimensionsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloConvolutionOptions
+    def KernelSpatialDimensionsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
+        return o == 0
+
+    # StablehloConvolutionOptions
+    def OutputBatchDimension(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
+        return 0
+
+    # StablehloConvolutionOptions
+    def OutputFeatureDimension(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(28))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
+        return 0
+
+    # StablehloConvolutionOptions
+    def OutputSpatialDimensions(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloConvolutionOptions
+    def OutputSpatialDimensionsAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloConvolutionOptions
+    def OutputSpatialDimensionsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloConvolutionOptions
+    def OutputSpatialDimensionsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
+        return o == 0
+
+    # StablehloConvolutionOptions
+    def FeatureGroupCount(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(32))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
+        return 0
+
+    # StablehloConvolutionOptions
+    def BatchGroupCount(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(34))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
+        return 0
+
+    # StablehloConvolutionOptions
+    def PrecisionConfig(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(36))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Uint32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+        return 0
+
+    # StablehloConvolutionOptions
+    def PrecisionConfigAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(36))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint32Flags, o)
+        return 0
+
+    # StablehloConvolutionOptions
+    def PrecisionConfigLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(36))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloConvolutionOptions
+    def PrecisionConfigIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(36))
+        return o == 0
+
+def StablehloConvolutionOptionsStart(builder):
+    builder.StartObject(17)
+
+def StablehloConvolutionOptionsAddWindowStrides(builder, windowStrides):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(windowStrides), 0)
+
+def StablehloConvolutionOptionsStartWindowStridesVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloConvolutionOptionsAddPadding(builder, padding):
+    builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(padding), 0)
+
+def StablehloConvolutionOptionsStartPaddingVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloConvolutionOptionsAddLhsDilation(builder, lhsDilation):
+    builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(lhsDilation), 0)
+
+def StablehloConvolutionOptionsStartLhsDilationVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloConvolutionOptionsAddRhsDilation(builder, rhsDilation):
+    builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(rhsDilation), 0)
+
+def StablehloConvolutionOptionsStartRhsDilationVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloConvolutionOptionsAddWindowReversal(builder, windowReversal):
+    builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(windowReversal), 0)
+
+def StablehloConvolutionOptionsStartWindowReversalVector(builder, numElems):
+    return builder.StartVector(1, numElems, 1)
+
+def StablehloConvolutionOptionsAddInputBatchDimension(builder, inputBatchDimension):
+    builder.PrependInt64Slot(5, inputBatchDimension, 0)
+
+def StablehloConvolutionOptionsAddInputFeatureDimension(builder, inputFeatureDimension):
+    builder.PrependInt64Slot(6, inputFeatureDimension, 0)
+
+def StablehloConvolutionOptionsAddInputSpatialDimensions(builder, inputSpatialDimensions):
+    builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(inputSpatialDimensions), 0)
+
+def StablehloConvolutionOptionsStartInputSpatialDimensionsVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloConvolutionOptionsAddKernelInputFeatureDimension(builder, kernelInputFeatureDimension):
+    builder.PrependInt64Slot(8, kernelInputFeatureDimension, 0)
+
+def StablehloConvolutionOptionsAddKernelOutputFeatureDimension(builder, kernelOutputFeatureDimension):
+    builder.PrependInt64Slot(9, kernelOutputFeatureDimension, 0)
+
+def StablehloConvolutionOptionsAddKernelSpatialDimensions(builder, kernelSpatialDimensions):
+    builder.PrependUOffsetTRelativeSlot(10, flatbuffers.number_types.UOffsetTFlags.py_type(kernelSpatialDimensions), 0)
+
+def StablehloConvolutionOptionsStartKernelSpatialDimensionsVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloConvolutionOptionsAddOutputBatchDimension(builder, outputBatchDimension):
+    builder.PrependInt64Slot(11, outputBatchDimension, 0)
+
+def StablehloConvolutionOptionsAddOutputFeatureDimension(builder, outputFeatureDimension):
+    builder.PrependInt64Slot(12, outputFeatureDimension, 0)
+
+def StablehloConvolutionOptionsAddOutputSpatialDimensions(builder, outputSpatialDimensions):
+    builder.PrependUOffsetTRelativeSlot(13, flatbuffers.number_types.UOffsetTFlags.py_type(outputSpatialDimensions), 0)
+
+def StablehloConvolutionOptionsStartOutputSpatialDimensionsVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloConvolutionOptionsAddFeatureGroupCount(builder, featureGroupCount):
+    builder.PrependInt64Slot(14, featureGroupCount, 0)
+
+def StablehloConvolutionOptionsAddBatchGroupCount(builder, batchGroupCount):
+    builder.PrependInt64Slot(15, batchGroupCount, 0)
+
+def StablehloConvolutionOptionsAddPrecisionConfig(builder, precisionConfig):
+    builder.PrependUOffsetTRelativeSlot(16, flatbuffers.number_types.UOffsetTFlags.py_type(precisionConfig), 0)
+
+def StablehloConvolutionOptionsStartPrecisionConfigVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def StablehloConvolutionOptionsEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class StablehloConvolutionOptionsT(object):
+
+    # StablehloConvolutionOptionsT
+    def __init__(self):
+        self.windowStrides = None  # type: List[int]
+        self.padding = None  # type: List[int]
+        self.lhsDilation = None  # type: List[int]
+        self.rhsDilation = None  # type: List[int]
+        self.windowReversal = None  # type: List[bool]
+        self.inputBatchDimension = 0  # type: int
+        self.inputFeatureDimension = 0  # type: int
+        self.inputSpatialDimensions = None  # type: List[int]
+        self.kernelInputFeatureDimension = 0  # type: int
+        self.kernelOutputFeatureDimension = 0  # type: int
+        self.kernelSpatialDimensions = None  # type: List[int]
+        self.outputBatchDimension = 0  # type: int
+        self.outputFeatureDimension = 0  # type: int
+        self.outputSpatialDimensions = None  # type: List[int]
+        self.featureGroupCount = 0  # type: int
+        self.batchGroupCount = 0  # type: int
+        self.precisionConfig = None  # type: List[int]
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        stablehloConvolutionOptions = StablehloConvolutionOptions()
+        stablehloConvolutionOptions.Init(buf, pos)
+        return cls.InitFromObj(stablehloConvolutionOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, stablehloConvolutionOptions):
+        x = StablehloConvolutionOptionsT()
+        x._UnPack(stablehloConvolutionOptions)
+        return x
+
+    # StablehloConvolutionOptionsT
+    def _UnPack(self, stablehloConvolutionOptions):
+        if stablehloConvolutionOptions is None:
+            return
+        if not stablehloConvolutionOptions.WindowStridesIsNone():
+            if np is None:
+                self.windowStrides = []
+                for i in range(stablehloConvolutionOptions.WindowStridesLength()):
+                    self.windowStrides.append(stablehloConvolutionOptions.WindowStrides(i))
+            else:
+                self.windowStrides = stablehloConvolutionOptions.WindowStridesAsNumpy()
+        if not stablehloConvolutionOptions.PaddingIsNone():
+            if np is None:
+                self.padding = []
+                for i in range(stablehloConvolutionOptions.PaddingLength()):
+                    self.padding.append(stablehloConvolutionOptions.Padding(i))
+            else:
+                self.padding = stablehloConvolutionOptions.PaddingAsNumpy()
+        if not stablehloConvolutionOptions.LhsDilationIsNone():
+            if np is None:
+                self.lhsDilation = []
+                for i in range(stablehloConvolutionOptions.LhsDilationLength()):
+                    self.lhsDilation.append(stablehloConvolutionOptions.LhsDilation(i))
+            else:
+                self.lhsDilation = stablehloConvolutionOptions.LhsDilationAsNumpy()
+        if not stablehloConvolutionOptions.RhsDilationIsNone():
+            if np is None:
+                self.rhsDilation = []
+                for i in range(stablehloConvolutionOptions.RhsDilationLength()):
+                    self.rhsDilation.append(stablehloConvolutionOptions.RhsDilation(i))
+            else:
+                self.rhsDilation = stablehloConvolutionOptions.RhsDilationAsNumpy()
+        if not stablehloConvolutionOptions.WindowReversalIsNone():
+            if np is None:
+                self.windowReversal = []
+                for i in range(stablehloConvolutionOptions.WindowReversalLength()):
+                    self.windowReversal.append(stablehloConvolutionOptions.WindowReversal(i))
+            else:
+                self.windowReversal = stablehloConvolutionOptions.WindowReversalAsNumpy()
+        self.inputBatchDimension = stablehloConvolutionOptions.InputBatchDimension()
+        self.inputFeatureDimension = stablehloConvolutionOptions.InputFeatureDimension()
+        if not stablehloConvolutionOptions.InputSpatialDimensionsIsNone():
+            if np is None:
+                self.inputSpatialDimensions = []
+                for i in range(stablehloConvolutionOptions.InputSpatialDimensionsLength()):
+                    self.inputSpatialDimensions.append(stablehloConvolutionOptions.InputSpatialDimensions(i))
+            else:
+                self.inputSpatialDimensions = stablehloConvolutionOptions.InputSpatialDimensionsAsNumpy()
+        self.kernelInputFeatureDimension = stablehloConvolutionOptions.KernelInputFeatureDimension()
+        self.kernelOutputFeatureDimension = stablehloConvolutionOptions.KernelOutputFeatureDimension()
+        if not stablehloConvolutionOptions.KernelSpatialDimensionsIsNone():
+            if np is None:
+                self.kernelSpatialDimensions = []
+                for i in range(stablehloConvolutionOptions.KernelSpatialDimensionsLength()):
+                    self.kernelSpatialDimensions.append(stablehloConvolutionOptions.KernelSpatialDimensions(i))
+            else:
+                self.kernelSpatialDimensions = stablehloConvolutionOptions.KernelSpatialDimensionsAsNumpy()
+        self.outputBatchDimension = stablehloConvolutionOptions.OutputBatchDimension()
+        self.outputFeatureDimension = stablehloConvolutionOptions.OutputFeatureDimension()
+        if not stablehloConvolutionOptions.OutputSpatialDimensionsIsNone():
+            if np is None:
+                self.outputSpatialDimensions = []
+                for i in range(stablehloConvolutionOptions.OutputSpatialDimensionsLength()):
+                    self.outputSpatialDimensions.append(stablehloConvolutionOptions.OutputSpatialDimensions(i))
+            else:
+                self.outputSpatialDimensions = stablehloConvolutionOptions.OutputSpatialDimensionsAsNumpy()
+        self.featureGroupCount = stablehloConvolutionOptions.FeatureGroupCount()
+        self.batchGroupCount = stablehloConvolutionOptions.BatchGroupCount()
+        if not stablehloConvolutionOptions.PrecisionConfigIsNone():
+            if np is None:
+                self.precisionConfig = []
+                for i in range(stablehloConvolutionOptions.PrecisionConfigLength()):
+                    self.precisionConfig.append(stablehloConvolutionOptions.PrecisionConfig(i))
+            else:
+                self.precisionConfig = stablehloConvolutionOptions.PrecisionConfigAsNumpy()
+
+    # StablehloConvolutionOptionsT
+    def Pack(self, builder):
+        if self.windowStrides is not None:
+            if np is not None and type(self.windowStrides) is np.ndarray:
+                windowStrides = builder.CreateNumpyVector(self.windowStrides)
+            else:
+                StablehloConvolutionOptionsStartWindowStridesVector(builder, len(self.windowStrides))
+                for i in reversed(range(len(self.windowStrides))):
+                    builder.PrependInt64(self.windowStrides[i])
+                windowStrides = builder.EndVector()
+        if self.padding is not None:
+            if np is not None and type(self.padding) is np.ndarray:
+                padding = builder.CreateNumpyVector(self.padding)
+            else:
+                StablehloConvolutionOptionsStartPaddingVector(builder, len(self.padding))
+                for i in reversed(range(len(self.padding))):
+                    builder.PrependInt64(self.padding[i])
+                padding = builder.EndVector()
+        if self.lhsDilation is not None:
+            if np is not None and type(self.lhsDilation) is np.ndarray:
+                lhsDilation = builder.CreateNumpyVector(self.lhsDilation)
+            else:
+                StablehloConvolutionOptionsStartLhsDilationVector(builder, len(self.lhsDilation))
+                for i in reversed(range(len(self.lhsDilation))):
+                    builder.PrependInt64(self.lhsDilation[i])
+                lhsDilation = builder.EndVector()
+        if self.rhsDilation is not None:
+            if np is not None and type(self.rhsDilation) is np.ndarray:
+                rhsDilation = builder.CreateNumpyVector(self.rhsDilation)
+            else:
+                StablehloConvolutionOptionsStartRhsDilationVector(builder, len(self.rhsDilation))
+                for i in reversed(range(len(self.rhsDilation))):
+                    builder.PrependInt64(self.rhsDilation[i])
+                rhsDilation = builder.EndVector()
+        if self.windowReversal is not None:
+            if np is not None and type(self.windowReversal) is np.ndarray:
+                windowReversal = builder.CreateNumpyVector(self.windowReversal)
+            else:
+                StablehloConvolutionOptionsStartWindowReversalVector(builder, len(self.windowReversal))
+                for i in reversed(range(len(self.windowReversal))):
+                    builder.PrependBool(self.windowReversal[i])
+                windowReversal = builder.EndVector()
+        if self.inputSpatialDimensions is not None:
+            if np is not None and type(self.inputSpatialDimensions) is np.ndarray:
+                inputSpatialDimensions = builder.CreateNumpyVector(self.inputSpatialDimensions)
+            else:
+                StablehloConvolutionOptionsStartInputSpatialDimensionsVector(builder, len(self.inputSpatialDimensions))
+                for i in reversed(range(len(self.inputSpatialDimensions))):
+                    builder.PrependInt64(self.inputSpatialDimensions[i])
+                inputSpatialDimensions = builder.EndVector()
+        if self.kernelSpatialDimensions is not None:
+            if np is not None and type(self.kernelSpatialDimensions) is np.ndarray:
+                kernelSpatialDimensions = builder.CreateNumpyVector(self.kernelSpatialDimensions)
+            else:
+                StablehloConvolutionOptionsStartKernelSpatialDimensionsVector(builder, len(self.kernelSpatialDimensions))
+                for i in reversed(range(len(self.kernelSpatialDimensions))):
+                    builder.PrependInt64(self.kernelSpatialDimensions[i])
+                kernelSpatialDimensions = builder.EndVector()
+        if self.outputSpatialDimensions is not None:
+            if np is not None and type(self.outputSpatialDimensions) is np.ndarray:
+                outputSpatialDimensions = builder.CreateNumpyVector(self.outputSpatialDimensions)
+            else:
+                StablehloConvolutionOptionsStartOutputSpatialDimensionsVector(builder, len(self.outputSpatialDimensions))
+                for i in reversed(range(len(self.outputSpatialDimensions))):
+                    builder.PrependInt64(self.outputSpatialDimensions[i])
+                outputSpatialDimensions = builder.EndVector()
+        if self.precisionConfig is not None:
+            if np is not None and type(self.precisionConfig) is np.ndarray:
+                precisionConfig = builder.CreateNumpyVector(self.precisionConfig)
+            else:
+                StablehloConvolutionOptionsStartPrecisionConfigVector(builder, len(self.precisionConfig))
+                for i in reversed(range(len(self.precisionConfig))):
+                    builder.PrependUint32(self.precisionConfig[i])
+                precisionConfig = builder.EndVector()
+        StablehloConvolutionOptionsStart(builder)
+        if self.windowStrides is not None:
+            StablehloConvolutionOptionsAddWindowStrides(builder, windowStrides)
+        if self.padding is not None:
+            StablehloConvolutionOptionsAddPadding(builder, padding)
+        if self.lhsDilation is not None:
+            StablehloConvolutionOptionsAddLhsDilation(builder, lhsDilation)
+        if self.rhsDilation is not None:
+            StablehloConvolutionOptionsAddRhsDilation(builder, rhsDilation)
+        if self.windowReversal is not None:
+            StablehloConvolutionOptionsAddWindowReversal(builder, windowReversal)
+        StablehloConvolutionOptionsAddInputBatchDimension(builder, self.inputBatchDimension)
+        StablehloConvolutionOptionsAddInputFeatureDimension(builder, self.inputFeatureDimension)
+        if self.inputSpatialDimensions is not None:
+            StablehloConvolutionOptionsAddInputSpatialDimensions(builder, inputSpatialDimensions)
+        StablehloConvolutionOptionsAddKernelInputFeatureDimension(builder, self.kernelInputFeatureDimension)
+        StablehloConvolutionOptionsAddKernelOutputFeatureDimension(builder, self.kernelOutputFeatureDimension)
+        if self.kernelSpatialDimensions is not None:
+            StablehloConvolutionOptionsAddKernelSpatialDimensions(builder, kernelSpatialDimensions)
+        StablehloConvolutionOptionsAddOutputBatchDimension(builder, self.outputBatchDimension)
+        StablehloConvolutionOptionsAddOutputFeatureDimension(builder, self.outputFeatureDimension)
+        if self.outputSpatialDimensions is not None:
+            StablehloConvolutionOptionsAddOutputSpatialDimensions(builder, outputSpatialDimensions)
+        StablehloConvolutionOptionsAddFeatureGroupCount(builder, self.featureGroupCount)
+        StablehloConvolutionOptionsAddBatchGroupCount(builder, self.batchGroupCount)
+        if self.precisionConfig is not None:
+            StablehloConvolutionOptionsAddPrecisionConfig(builder, precisionConfig)
+        stablehloConvolutionOptions = StablehloConvolutionOptionsEnd(builder)
+        return stablehloConvolutionOptions
+
+
+class StablehloScatterOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = StablehloScatterOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsStablehloScatterOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def StablehloScatterOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # StablehloScatterOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # StablehloScatterOptions
+    def IndicesAreSorted(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+    # StablehloScatterOptions
+    def UpdateWindowDims(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloScatterOptions
+    def UpdateWindowDimsAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloScatterOptions
+    def UpdateWindowDimsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloScatterOptions
+    def UpdateWindowDimsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        return o == 0
+
+    # StablehloScatterOptions
+    def InsertedWindowDims(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloScatterOptions
+    def InsertedWindowDimsAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloScatterOptions
+    def InsertedWindowDimsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloScatterOptions
+    def InsertedWindowDimsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        return o == 0
+
+    # StablehloScatterOptions
+    def ScatterDimsToOperandDims(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+        return 0
+
+    # StablehloScatterOptions
+    def ScatterDimsToOperandDimsAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+        return 0
+
+    # StablehloScatterOptions
+    def ScatterDimsToOperandDimsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StablehloScatterOptions
+    def ScatterDimsToOperandDimsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        return o == 0
+
+    # StablehloScatterOptions
+    def IndexVectorDim(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
+        return 0
+
+    # StablehloScatterOptions
+    def UniqueIndices(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+    # StablehloScatterOptions
+    def UpdateComputationSubgraphIndex(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+def StablehloScatterOptionsStart(builder):
+    builder.StartObject(7)
+
+def StablehloScatterOptionsAddIndicesAreSorted(builder, indicesAreSorted):
+    builder.PrependBoolSlot(0, indicesAreSorted, 0)
+
+def StablehloScatterOptionsAddUpdateWindowDims(builder, updateWindowDims):
+    builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(updateWindowDims), 0)
+
+def StablehloScatterOptionsStartUpdateWindowDimsVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloScatterOptionsAddInsertedWindowDims(builder, insertedWindowDims):
+    builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(insertedWindowDims), 0)
+
+def StablehloScatterOptionsStartInsertedWindowDimsVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloScatterOptionsAddScatterDimsToOperandDims(builder, scatterDimsToOperandDims):
+    builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(scatterDimsToOperandDims), 0)
+
+def StablehloScatterOptionsStartScatterDimsToOperandDimsVector(builder, numElems):
+    return builder.StartVector(8, numElems, 8)
+
+def StablehloScatterOptionsAddIndexVectorDim(builder, indexVectorDim):
+    builder.PrependInt64Slot(4, indexVectorDim, 0)
+
+def StablehloScatterOptionsAddUniqueIndices(builder, uniqueIndices):
+    builder.PrependBoolSlot(5, uniqueIndices, 0)
+
+def StablehloScatterOptionsAddUpdateComputationSubgraphIndex(builder, updateComputationSubgraphIndex):
+    builder.PrependInt32Slot(6, updateComputationSubgraphIndex, 0)
+
+def StablehloScatterOptionsEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class StablehloScatterOptionsT(object):
+
+    # StablehloScatterOptionsT
+    def __init__(self):
+        self.indicesAreSorted = False  # type: bool
+        self.updateWindowDims = None  # type: List[int]
+        self.insertedWindowDims = None  # type: List[int]
+        self.scatterDimsToOperandDims = None  # type: List[int]
+        self.indexVectorDim = 0  # type: int
+        self.uniqueIndices = False  # type: bool
+        self.updateComputationSubgraphIndex = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        stablehloScatterOptions = StablehloScatterOptions()
+        stablehloScatterOptions.Init(buf, pos)
+        return cls.InitFromObj(stablehloScatterOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, stablehloScatterOptions):
+        x = StablehloScatterOptionsT()
+        x._UnPack(stablehloScatterOptions)
+        return x
+
+    # StablehloScatterOptionsT
+    def _UnPack(self, stablehloScatterOptions):
+        if stablehloScatterOptions is None:
+            return
+        self.indicesAreSorted = stablehloScatterOptions.IndicesAreSorted()
+        if not stablehloScatterOptions.UpdateWindowDimsIsNone():
+            if np is None:
+                self.updateWindowDims = []
+                for i in range(stablehloScatterOptions.UpdateWindowDimsLength()):
+                    self.updateWindowDims.append(stablehloScatterOptions.UpdateWindowDims(i))
+            else:
+                self.updateWindowDims = stablehloScatterOptions.UpdateWindowDimsAsNumpy()
+        if not stablehloScatterOptions.InsertedWindowDimsIsNone():
+            if np is None:
+                self.insertedWindowDims = []
+                for i in range(stablehloScatterOptions.InsertedWindowDimsLength()):
+                    self.insertedWindowDims.append(stablehloScatterOptions.InsertedWindowDims(i))
+            else:
+                self.insertedWindowDims = stablehloScatterOptions.InsertedWindowDimsAsNumpy()
+        if not stablehloScatterOptions.ScatterDimsToOperandDimsIsNone():
+            if np is None:
+                self.scatterDimsToOperandDims = []
+                for i in range(stablehloScatterOptions.ScatterDimsToOperandDimsLength()):
+                    self.scatterDimsToOperandDims.append(stablehloScatterOptions.ScatterDimsToOperandDims(i))
+            else:
+                self.scatterDimsToOperandDims = stablehloScatterOptions.ScatterDimsToOperandDimsAsNumpy()
+        self.indexVectorDim = stablehloScatterOptions.IndexVectorDim()
+        self.uniqueIndices = stablehloScatterOptions.UniqueIndices()
+        self.updateComputationSubgraphIndex = stablehloScatterOptions.UpdateComputationSubgraphIndex()
+
+    # StablehloScatterOptionsT
+    def Pack(self, builder):
+        if self.updateWindowDims is not None:
+            if np is not None and type(self.updateWindowDims) is np.ndarray:
+                updateWindowDims = builder.CreateNumpyVector(self.updateWindowDims)
+            else:
+                StablehloScatterOptionsStartUpdateWindowDimsVector(builder, len(self.updateWindowDims))
+                for i in reversed(range(len(self.updateWindowDims))):
+                    builder.PrependInt64(self.updateWindowDims[i])
+                updateWindowDims = builder.EndVector()
+        if self.insertedWindowDims is not None:
+            if np is not None and type(self.insertedWindowDims) is np.ndarray:
+                insertedWindowDims = builder.CreateNumpyVector(self.insertedWindowDims)
+            else:
+                StablehloScatterOptionsStartInsertedWindowDimsVector(builder, len(self.insertedWindowDims))
+                for i in reversed(range(len(self.insertedWindowDims))):
+                    builder.PrependInt64(self.insertedWindowDims[i])
+                insertedWindowDims = builder.EndVector()
+        if self.scatterDimsToOperandDims is not None:
+            if np is not None and type(self.scatterDimsToOperandDims) is np.ndarray:
+                scatterDimsToOperandDims = builder.CreateNumpyVector(self.scatterDimsToOperandDims)
+            else:
+                StablehloScatterOptionsStartScatterDimsToOperandDimsVector(builder, len(self.scatterDimsToOperandDims))
+                for i in reversed(range(len(self.scatterDimsToOperandDims))):
+                    builder.PrependInt64(self.scatterDimsToOperandDims[i])
+                scatterDimsToOperandDims = builder.EndVector()
+        StablehloScatterOptionsStart(builder)
+        StablehloScatterOptionsAddIndicesAreSorted(builder, self.indicesAreSorted)
+        if self.updateWindowDims is not None:
+            StablehloScatterOptionsAddUpdateWindowDims(builder, updateWindowDims)
+        if self.insertedWindowDims is not None:
+            StablehloScatterOptionsAddInsertedWindowDims(builder, insertedWindowDims)
+        if self.scatterDimsToOperandDims is not None:
+            StablehloScatterOptionsAddScatterDimsToOperandDims(builder, scatterDimsToOperandDims)
+        StablehloScatterOptionsAddIndexVectorDim(builder, self.indexVectorDim)
+        StablehloScatterOptionsAddUniqueIndices(builder, self.uniqueIndices)
+        StablehloScatterOptionsAddUpdateComputationSubgraphIndex(builder, self.updateComputationSubgraphIndex)
+        stablehloScatterOptions = StablehloScatterOptionsEnd(builder)
+        return stablehloScatterOptions
+
+
+class StablehloRngBitGeneratorOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = StablehloRngBitGeneratorOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsStablehloRngBitGeneratorOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def StablehloRngBitGeneratorOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # StablehloRngBitGeneratorOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # StablehloRngBitGeneratorOptions
+    def Algorithm(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+def StablehloRngBitGeneratorOptionsStart(builder):
+    builder.StartObject(1)
+
+def StablehloRngBitGeneratorOptionsAddAlgorithm(builder, algorithm):
+    builder.PrependInt8Slot(0, algorithm, 0)
+
+def StablehloRngBitGeneratorOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class StablehloRngBitGeneratorOptionsT(object):
+
+    # StablehloRngBitGeneratorOptionsT
+    def __init__(self):
+        self.algorithm = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        stablehloRngBitGeneratorOptions = StablehloRngBitGeneratorOptions()
+        stablehloRngBitGeneratorOptions.Init(buf, pos)
+        return cls.InitFromObj(stablehloRngBitGeneratorOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, stablehloRngBitGeneratorOptions):
+        x = StablehloRngBitGeneratorOptionsT()
+        x._UnPack(stablehloRngBitGeneratorOptions)
+        return x
+
+    # StablehloRngBitGeneratorOptionsT
+    def _UnPack(self, stablehloRngBitGeneratorOptions):
+        if stablehloRngBitGeneratorOptions is None:
+            return
+        self.algorithm = stablehloRngBitGeneratorOptions.Algorithm()
+
+    # StablehloRngBitGeneratorOptionsT
+    def Pack(self, builder):
+        StablehloRngBitGeneratorOptionsStart(builder)
+        StablehloRngBitGeneratorOptionsAddAlgorithm(builder, self.algorithm)
+        stablehloRngBitGeneratorOptions = StablehloRngBitGeneratorOptionsEnd(builder)
+        return stablehloRngBitGeneratorOptions
+
+
+class Conv2DOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = Conv2DOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsConv2DOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def Conv2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # Conv2DOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # Conv2DOptions
+    def Padding(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+    # Conv2DOptions
+    def StrideW(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # Conv2DOptions
+    def StrideH(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # Conv2DOptions
+    def FusedActivationFunction(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+    # Conv2DOptions
+    def DilationWFactor(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 1
+
+    # Conv2DOptions
+    def DilationHFactor(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 1
+
+    # Conv2DOptions
+    def QuantizedBiasType(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+def Conv2DOptionsStart(builder):
+    builder.StartObject(7)
+
+def Conv2DOptionsAddPadding(builder, padding):
+    builder.PrependInt8Slot(0, padding, 0)
+
+def Conv2DOptionsAddStrideW(builder, strideW):
+    builder.PrependInt32Slot(1, strideW, 0)
+
+def Conv2DOptionsAddStrideH(builder, strideH):
+    builder.PrependInt32Slot(2, strideH, 0)
+
+def Conv2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+    builder.PrependInt8Slot(3, fusedActivationFunction, 0)
+
+def Conv2DOptionsAddDilationWFactor(builder, dilationWFactor):
+    builder.PrependInt32Slot(4, dilationWFactor, 1)
+
+def Conv2DOptionsAddDilationHFactor(builder, dilationHFactor):
+    builder.PrependInt32Slot(5, dilationHFactor, 1)
+
+def Conv2DOptionsAddQuantizedBiasType(builder, quantizedBiasType):
+    builder.PrependInt8Slot(6, quantizedBiasType, 0)
+
+def Conv2DOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class Conv2DOptionsT(object):
+
+    # Conv2DOptionsT
+    def __init__(self):
+        self.padding = 0  # type: int
+        self.strideW = 0  # type: int
+        self.strideH = 0  # type: int
+        self.fusedActivationFunction = 0  # type: int
+        self.dilationWFactor = 1  # type: int
+        self.dilationHFactor = 1  # type: int
+        self.quantizedBiasType = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        conv2Doptions = Conv2DOptions()
+        conv2Doptions.Init(buf, pos)
+        return cls.InitFromObj(conv2Doptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, conv2Doptions):
+        x = Conv2DOptionsT()
+        x._UnPack(conv2Doptions)
+        return x
+
+    # Conv2DOptionsT
+    def _UnPack(self, conv2Doptions):
+        if conv2Doptions is None:
+            return
+        self.padding = conv2Doptions.Padding()
+        self.strideW = conv2Doptions.StrideW()
+        self.strideH = conv2Doptions.StrideH()
+        self.fusedActivationFunction = conv2Doptions.FusedActivationFunction()
+        self.dilationWFactor = conv2Doptions.DilationWFactor()
+        self.dilationHFactor = conv2Doptions.DilationHFactor()
+        self.quantizedBiasType = conv2Doptions.QuantizedBiasType()
+
+    # Conv2DOptionsT
+    def Pack(self, builder):
+        Conv2DOptionsStart(builder)
+        Conv2DOptionsAddPadding(builder, self.padding)
+        Conv2DOptionsAddStrideW(builder, self.strideW)
+        Conv2DOptionsAddStrideH(builder, self.strideH)
+        Conv2DOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
+        Conv2DOptionsAddDilationWFactor(builder, self.dilationWFactor)
+        Conv2DOptionsAddDilationHFactor(builder, self.dilationHFactor)
+        Conv2DOptionsAddQuantizedBiasType(builder, self.quantizedBiasType)
+        conv2Doptions = Conv2DOptionsEnd(builder)
+        return conv2Doptions
+
+
+class Conv3DOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = Conv3DOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsConv3DOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def Conv3DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # Conv3DOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # Conv3DOptions
+    def Padding(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+    # Conv3DOptions
+    def StrideD(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # Conv3DOptions
+    def StrideW(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # Conv3DOptions
+    def StrideH(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # Conv3DOptions
+    def FusedActivationFunction(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+    # Conv3DOptions
+    def DilationDFactor(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 1
+
+    # Conv3DOptions
+    def DilationWFactor(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 1
+
+    # Conv3DOptions
+    def DilationHFactor(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 1
+
+def Conv3DOptionsStart(builder):
+    builder.StartObject(8)
+
+def Conv3DOptionsAddPadding(builder, padding):
+    builder.PrependInt8Slot(0, padding, 0)
+
+def Conv3DOptionsAddStrideD(builder, strideD):
+    builder.PrependInt32Slot(1, strideD, 0)
+
+def Conv3DOptionsAddStrideW(builder, strideW):
+    builder.PrependInt32Slot(2, strideW, 0)
+
+def Conv3DOptionsAddStrideH(builder, strideH):
+    builder.PrependInt32Slot(3, strideH, 0)
+
+def Conv3DOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+    builder.PrependInt8Slot(4, fusedActivationFunction, 0)
+
+def Conv3DOptionsAddDilationDFactor(builder, dilationDFactor):
+    builder.PrependInt32Slot(5, dilationDFactor, 1)
+
+def Conv3DOptionsAddDilationWFactor(builder, dilationWFactor):
+    builder.PrependInt32Slot(6, dilationWFactor, 1)
+
+def Conv3DOptionsAddDilationHFactor(builder, dilationHFactor):
+    builder.PrependInt32Slot(7, dilationHFactor, 1)
+
+def Conv3DOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class Conv3DOptionsT(object):
+
+    # Conv3DOptionsT
+    def __init__(self):
+        self.padding = 0  # type: int
+        self.strideD = 0  # type: int
+        self.strideW = 0  # type: int
+        self.strideH = 0  # type: int
+        self.fusedActivationFunction = 0  # type: int
+        self.dilationDFactor = 1  # type: int
+        self.dilationWFactor = 1  # type: int
+        self.dilationHFactor = 1  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        conv3Doptions = Conv3DOptions()
+        conv3Doptions.Init(buf, pos)
+        return cls.InitFromObj(conv3Doptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, conv3Doptions):
+        x = Conv3DOptionsT()
+        x._UnPack(conv3Doptions)
+        return x
+
+    # Conv3DOptionsT
+    def _UnPack(self, conv3Doptions):
+        if conv3Doptions is None:
+            return
+        self.padding = conv3Doptions.Padding()
+        self.strideD = conv3Doptions.StrideD()
+        self.strideW = conv3Doptions.StrideW()
+        self.strideH = conv3Doptions.StrideH()
+        self.fusedActivationFunction = conv3Doptions.FusedActivationFunction()
+        self.dilationDFactor = conv3Doptions.DilationDFactor()
+        self.dilationWFactor = conv3Doptions.DilationWFactor()
+        self.dilationHFactor = conv3Doptions.DilationHFactor()
+
+    # Conv3DOptionsT
+    def Pack(self, builder):
+        Conv3DOptionsStart(builder)
+        Conv3DOptionsAddPadding(builder, self.padding)
+        Conv3DOptionsAddStrideD(builder, self.strideD)
+        Conv3DOptionsAddStrideW(builder, self.strideW)
+        Conv3DOptionsAddStrideH(builder, self.strideH)
+        Conv3DOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
+        Conv3DOptionsAddDilationDFactor(builder, self.dilationDFactor)
+        Conv3DOptionsAddDilationWFactor(builder, self.dilationWFactor)
+        Conv3DOptionsAddDilationHFactor(builder, self.dilationHFactor)
+        conv3Doptions = Conv3DOptionsEnd(builder)
+        return conv3Doptions
+
+
+class Pool2DOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = Pool2DOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsPool2DOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def Pool2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # Pool2DOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # Pool2DOptions
+    def Padding(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+    # Pool2DOptions
+    def StrideW(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # Pool2DOptions
+    def StrideH(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # Pool2DOptions
+    def FilterWidth(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # Pool2DOptions
+    def FilterHeight(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # Pool2DOptions
+    def FusedActivationFunction(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+def Pool2DOptionsStart(builder):
+    builder.StartObject(6)
+
+def Pool2DOptionsAddPadding(builder, padding):
+    builder.PrependInt8Slot(0, padding, 0)
+
+def Pool2DOptionsAddStrideW(builder, strideW):
+    builder.PrependInt32Slot(1, strideW, 0)
+
+def Pool2DOptionsAddStrideH(builder, strideH):
+    builder.PrependInt32Slot(2, strideH, 0)
+
+def Pool2DOptionsAddFilterWidth(builder, filterWidth):
+    builder.PrependInt32Slot(3, filterWidth, 0)
+
+def Pool2DOptionsAddFilterHeight(builder, filterHeight):
+    builder.PrependInt32Slot(4, filterHeight, 0)
+
+def Pool2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+    builder.PrependInt8Slot(5, fusedActivationFunction, 0)
+
+def Pool2DOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class Pool2DOptionsT(object):
+
+    # Pool2DOptionsT
+    def __init__(self):
+        self.padding = 0  # type: int
+        self.strideW = 0  # type: int
+        self.strideH = 0  # type: int
+        self.filterWidth = 0  # type: int
+        self.filterHeight = 0  # type: int
+        self.fusedActivationFunction = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        pool2Doptions = Pool2DOptions()
+        pool2Doptions.Init(buf, pos)
+        return cls.InitFromObj(pool2Doptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, pool2Doptions):
+        x = Pool2DOptionsT()
+        x._UnPack(pool2Doptions)
+        return x
+
+    # Pool2DOptionsT
+    def _UnPack(self, pool2Doptions):
+        if pool2Doptions is None:
+            return
+        self.padding = pool2Doptions.Padding()
+        self.strideW = pool2Doptions.StrideW()
+        self.strideH = pool2Doptions.StrideH()
+        self.filterWidth = pool2Doptions.FilterWidth()
+        self.filterHeight = pool2Doptions.FilterHeight()
+        self.fusedActivationFunction = pool2Doptions.FusedActivationFunction()
+
+    # Pool2DOptionsT
+    def Pack(self, builder):
+        Pool2DOptionsStart(builder)
+        Pool2DOptionsAddPadding(builder, self.padding)
+        Pool2DOptionsAddStrideW(builder, self.strideW)
+        Pool2DOptionsAddStrideH(builder, self.strideH)
+        Pool2DOptionsAddFilterWidth(builder, self.filterWidth)
+        Pool2DOptionsAddFilterHeight(builder, self.filterHeight)
+        Pool2DOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
+        pool2Doptions = Pool2DOptionsEnd(builder)
+        return pool2Doptions
+
+
+class DepthwiseConv2DOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = DepthwiseConv2DOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsDepthwiseConv2DOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def DepthwiseConv2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # DepthwiseConv2DOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # DepthwiseConv2DOptions
+    def Padding(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+    # DepthwiseConv2DOptions
+    def StrideW(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # DepthwiseConv2DOptions
+    def StrideH(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # DepthwiseConv2DOptions
+    def DepthMultiplier(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # DepthwiseConv2DOptions
+    def FusedActivationFunction(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+    # DepthwiseConv2DOptions
+    def DilationWFactor(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 1
+
+    # DepthwiseConv2DOptions
+    def DilationHFactor(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 1
+
+def DepthwiseConv2DOptionsStart(builder):
+    builder.StartObject(7)
+
+def DepthwiseConv2DOptionsAddPadding(builder, padding):
+    builder.PrependInt8Slot(0, padding, 0)
+
+def DepthwiseConv2DOptionsAddStrideW(builder, strideW):
+    builder.PrependInt32Slot(1, strideW, 0)
+
+def DepthwiseConv2DOptionsAddStrideH(builder, strideH):
+    builder.PrependInt32Slot(2, strideH, 0)
+
+def DepthwiseConv2DOptionsAddDepthMultiplier(builder, depthMultiplier):
+    builder.PrependInt32Slot(3, depthMultiplier, 0)
+
+def DepthwiseConv2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+    builder.PrependInt8Slot(4, fusedActivationFunction, 0)
+
+def DepthwiseConv2DOptionsAddDilationWFactor(builder, dilationWFactor):
+    builder.PrependInt32Slot(5, dilationWFactor, 1)
+
+def DepthwiseConv2DOptionsAddDilationHFactor(builder, dilationHFactor):
+    builder.PrependInt32Slot(6, dilationHFactor, 1)
+
+def DepthwiseConv2DOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class DepthwiseConv2DOptionsT(object):
+
+    # DepthwiseConv2DOptionsT
+    def __init__(self):
+        self.padding = 0  # type: int
+        self.strideW = 0  # type: int
+        self.strideH = 0  # type: int
+        self.depthMultiplier = 0  # type: int
+        self.fusedActivationFunction = 0  # type: int
+        self.dilationWFactor = 1  # type: int
+        self.dilationHFactor = 1  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        depthwiseConv2Doptions = DepthwiseConv2DOptions()
+        depthwiseConv2Doptions.Init(buf, pos)
+        return cls.InitFromObj(depthwiseConv2Doptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, depthwiseConv2Doptions):
+        x = DepthwiseConv2DOptionsT()
+        x._UnPack(depthwiseConv2Doptions)
+        return x
+
+    # DepthwiseConv2DOptionsT
+    def _UnPack(self, depthwiseConv2Doptions):
+        if depthwiseConv2Doptions is None:
+            return
+        self.padding = depthwiseConv2Doptions.Padding()
+        self.strideW = depthwiseConv2Doptions.StrideW()
+        self.strideH = depthwiseConv2Doptions.StrideH()
+        self.depthMultiplier = depthwiseConv2Doptions.DepthMultiplier()
+        self.fusedActivationFunction = depthwiseConv2Doptions.FusedActivationFunction()
+        self.dilationWFactor = depthwiseConv2Doptions.DilationWFactor()
+        self.dilationHFactor = depthwiseConv2Doptions.DilationHFactor()
+
+    # DepthwiseConv2DOptionsT
+    def Pack(self, builder):
+        DepthwiseConv2DOptionsStart(builder)
+        DepthwiseConv2DOptionsAddPadding(builder, self.padding)
+        DepthwiseConv2DOptionsAddStrideW(builder, self.strideW)
+        DepthwiseConv2DOptionsAddStrideH(builder, self.strideH)
+        DepthwiseConv2DOptionsAddDepthMultiplier(builder, self.depthMultiplier)
+        DepthwiseConv2DOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
+        DepthwiseConv2DOptionsAddDilationWFactor(builder, self.dilationWFactor)
+        DepthwiseConv2DOptionsAddDilationHFactor(builder, self.dilationHFactor)
+        depthwiseConv2Doptions = DepthwiseConv2DOptionsEnd(builder)
+        return depthwiseConv2Doptions
+
 
 class ConcatEmbeddingsOptions(object):
     __slots__ = ['_tab']
@@ -2280,27 +6700,28 @@
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
         return o == 0
 
-def ConcatEmbeddingsOptionsStart(builder): builder.StartObject(3)
-def Start(builder):
-    return ConcatEmbeddingsOptionsStart(builder)
-def ConcatEmbeddingsOptionsAddNumChannels(builder, numChannels): builder.PrependInt32Slot(0, numChannels, 0)
-def AddNumChannels(builder, numChannels):
-    return ConcatEmbeddingsOptionsAddNumChannels(builder, numChannels)
-def ConcatEmbeddingsOptionsAddNumColumnsPerChannel(builder, numColumnsPerChannel): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(numColumnsPerChannel), 0)
-def AddNumColumnsPerChannel(builder, numColumnsPerChannel):
-    return ConcatEmbeddingsOptionsAddNumColumnsPerChannel(builder, numColumnsPerChannel)
-def ConcatEmbeddingsOptionsStartNumColumnsPerChannelVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartNumColumnsPerChannelVector(builder, numElems):
-    return ConcatEmbeddingsOptionsStartNumColumnsPerChannelVector(builder, numElems)
-def ConcatEmbeddingsOptionsAddEmbeddingDimPerChannel(builder, embeddingDimPerChannel): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(embeddingDimPerChannel), 0)
-def AddEmbeddingDimPerChannel(builder, embeddingDimPerChannel):
-    return ConcatEmbeddingsOptionsAddEmbeddingDimPerChannel(builder, embeddingDimPerChannel)
-def ConcatEmbeddingsOptionsStartEmbeddingDimPerChannelVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartEmbeddingDimPerChannelVector(builder, numElems):
-    return ConcatEmbeddingsOptionsStartEmbeddingDimPerChannelVector(builder, numElems)
-def ConcatEmbeddingsOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return ConcatEmbeddingsOptionsEnd(builder)
+def ConcatEmbeddingsOptionsStart(builder):
+    builder.StartObject(3)
+
+def ConcatEmbeddingsOptionsAddNumChannels(builder, numChannels):
+    builder.PrependInt32Slot(0, numChannels, 0)
+
+def ConcatEmbeddingsOptionsAddNumColumnsPerChannel(builder, numColumnsPerChannel):
+    builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(numColumnsPerChannel), 0)
+
+def ConcatEmbeddingsOptionsStartNumColumnsPerChannelVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def ConcatEmbeddingsOptionsAddEmbeddingDimPerChannel(builder, embeddingDimPerChannel):
+    builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(embeddingDimPerChannel), 0)
+
+def ConcatEmbeddingsOptionsStartEmbeddingDimPerChannelVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def ConcatEmbeddingsOptionsEnd(builder):
+    return builder.EndObject()
+
+
 try:
     from typing import List
 except:
@@ -2321,6 +6742,11 @@
         return cls.InitFromObj(concatEmbeddingsOptions)
 
     @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
     def InitFromObj(cls, concatEmbeddingsOptions):
         x = ConcatEmbeddingsOptionsT()
         x._UnPack(concatEmbeddingsOptions)
@@ -2372,1923 +6798,496 @@
             ConcatEmbeddingsOptionsAddEmbeddingDimPerChannel(builder, embeddingDimPerChannel)
         concatEmbeddingsOptions = ConcatEmbeddingsOptionsEnd(builder)
         return concatEmbeddingsOptions
-# automatically generated by the FlatBuffers compiler, do not modify
 
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class ConcatenationOptions(object):
+class LSHProjectionOptions(object):
     __slots__ = ['_tab']
 
     @classmethod
     def GetRootAs(cls, buf, offset=0):
         n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = ConcatenationOptions()
+        x = LSHProjectionOptions()
         x.Init(buf, n + offset)
         return x
 
     @classmethod
-    def GetRootAsConcatenationOptions(cls, buf, offset=0):
+    def GetRootAsLSHProjectionOptions(cls, buf, offset=0):
         """This method is deprecated. Please switch to GetRootAs."""
         return cls.GetRootAs(buf, offset)
     @classmethod
-    def ConcatenationOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+    def LSHProjectionOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
         return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
 
-    # ConcatenationOptions
+    # LSHProjectionOptions
     def Init(self, buf, pos):
         self._tab = flatbuffers.table.Table(buf, pos)
 
-    # ConcatenationOptions
-    def Axis(self):
+    # LSHProjectionOptions
+    def Type(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+def LSHProjectionOptionsStart(builder):
+    builder.StartObject(1)
+
+def LSHProjectionOptionsAddType(builder, type):
+    builder.PrependInt8Slot(0, type, 0)
+
+def LSHProjectionOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class LSHProjectionOptionsT(object):
+
+    # LSHProjectionOptionsT
+    def __init__(self):
+        self.type = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        lshprojectionOptions = LSHProjectionOptions()
+        lshprojectionOptions.Init(buf, pos)
+        return cls.InitFromObj(lshprojectionOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, lshprojectionOptions):
+        x = LSHProjectionOptionsT()
+        x._UnPack(lshprojectionOptions)
+        return x
+
+    # LSHProjectionOptionsT
+    def _UnPack(self, lshprojectionOptions):
+        if lshprojectionOptions is None:
+            return
+        self.type = lshprojectionOptions.Type()
+
+    # LSHProjectionOptionsT
+    def Pack(self, builder):
+        LSHProjectionOptionsStart(builder)
+        LSHProjectionOptionsAddType(builder, self.type)
+        lshprojectionOptions = LSHProjectionOptionsEnd(builder)
+        return lshprojectionOptions
+
+
+class SVDFOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = SVDFOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsSVDFOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def SVDFOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # SVDFOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # SVDFOptions
+    def Rank(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
         if o != 0:
             return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
         return 0
 
-    # ConcatenationOptions
+    # SVDFOptions
     def FusedActivationFunction(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
         if o != 0:
             return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
         return 0
 
-def ConcatenationOptionsStart(builder): builder.StartObject(2)
-def Start(builder):
-    return ConcatenationOptionsStart(builder)
-def ConcatenationOptionsAddAxis(builder, axis): builder.PrependInt32Slot(0, axis, 0)
-def AddAxis(builder, axis):
-    return ConcatenationOptionsAddAxis(builder, axis)
-def ConcatenationOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(1, fusedActivationFunction, 0)
-def AddFusedActivationFunction(builder, fusedActivationFunction):
-    return ConcatenationOptionsAddFusedActivationFunction(builder, fusedActivationFunction)
-def ConcatenationOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return ConcatenationOptionsEnd(builder)
-
-class ConcatenationOptionsT(object):
-
-    # ConcatenationOptionsT
-    def __init__(self):
-        self.axis = 0  # type: int
-        self.fusedActivationFunction = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        concatenationOptions = ConcatenationOptions()
-        concatenationOptions.Init(buf, pos)
-        return cls.InitFromObj(concatenationOptions)
-
-    @classmethod
-    def InitFromObj(cls, concatenationOptions):
-        x = ConcatenationOptionsT()
-        x._UnPack(concatenationOptions)
-        return x
-
-    # ConcatenationOptionsT
-    def _UnPack(self, concatenationOptions):
-        if concatenationOptions is None:
-            return
-        self.axis = concatenationOptions.Axis()
-        self.fusedActivationFunction = concatenationOptions.FusedActivationFunction()
-
-    # ConcatenationOptionsT
-    def Pack(self, builder):
-        ConcatenationOptionsStart(builder)
-        ConcatenationOptionsAddAxis(builder, self.axis)
-        ConcatenationOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
-        concatenationOptions = ConcatenationOptionsEnd(builder)
-        return concatenationOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class Conv2DOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = Conv2DOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsConv2DOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def Conv2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # Conv2DOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # Conv2DOptions
-    def Padding(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-    # Conv2DOptions
-    def StrideW(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # Conv2DOptions
-    def StrideH(self):
+    # SVDFOptions
+    def AsymmetricQuantizeInputs(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
         if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # Conv2DOptions
-    def FusedActivationFunction(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-    # Conv2DOptions
-    def DilationWFactor(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 1
-
-    # Conv2DOptions
-    def DilationHFactor(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 1
-
-def Conv2DOptionsStart(builder): builder.StartObject(6)
-def Start(builder):
-    return Conv2DOptionsStart(builder)
-def Conv2DOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0)
-def AddPadding(builder, padding):
-    return Conv2DOptionsAddPadding(builder, padding)
-def Conv2DOptionsAddStrideW(builder, strideW): builder.PrependInt32Slot(1, strideW, 0)
-def AddStrideW(builder, strideW):
-    return Conv2DOptionsAddStrideW(builder, strideW)
-def Conv2DOptionsAddStrideH(builder, strideH): builder.PrependInt32Slot(2, strideH, 0)
-def AddStrideH(builder, strideH):
-    return Conv2DOptionsAddStrideH(builder, strideH)
-def Conv2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(3, fusedActivationFunction, 0)
-def AddFusedActivationFunction(builder, fusedActivationFunction):
-    return Conv2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction)
-def Conv2DOptionsAddDilationWFactor(builder, dilationWFactor): builder.PrependInt32Slot(4, dilationWFactor, 1)
-def AddDilationWFactor(builder, dilationWFactor):
-    return Conv2DOptionsAddDilationWFactor(builder, dilationWFactor)
-def Conv2DOptionsAddDilationHFactor(builder, dilationHFactor): builder.PrependInt32Slot(5, dilationHFactor, 1)
-def AddDilationHFactor(builder, dilationHFactor):
-    return Conv2DOptionsAddDilationHFactor(builder, dilationHFactor)
-def Conv2DOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return Conv2DOptionsEnd(builder)
-
-class Conv2DOptionsT(object):
-
-    # Conv2DOptionsT
-    def __init__(self):
-        self.padding = 0  # type: int
-        self.strideW = 0  # type: int
-        self.strideH = 0  # type: int
-        self.fusedActivationFunction = 0  # type: int
-        self.dilationWFactor = 1  # type: int
-        self.dilationHFactor = 1  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        conv2doptions = Conv2DOptions()
-        conv2doptions.Init(buf, pos)
-        return cls.InitFromObj(conv2doptions)
-
-    @classmethod
-    def InitFromObj(cls, conv2doptions):
-        x = Conv2DOptionsT()
-        x._UnPack(conv2doptions)
-        return x
-
-    # Conv2DOptionsT
-    def _UnPack(self, conv2doptions):
-        if conv2doptions is None:
-            return
-        self.padding = conv2doptions.Padding()
-        self.strideW = conv2doptions.StrideW()
-        self.strideH = conv2doptions.StrideH()
-        self.fusedActivationFunction = conv2doptions.FusedActivationFunction()
-        self.dilationWFactor = conv2doptions.DilationWFactor()
-        self.dilationHFactor = conv2doptions.DilationHFactor()
-
-    # Conv2DOptionsT
-    def Pack(self, builder):
-        Conv2DOptionsStart(builder)
-        Conv2DOptionsAddPadding(builder, self.padding)
-        Conv2DOptionsAddStrideW(builder, self.strideW)
-        Conv2DOptionsAddStrideH(builder, self.strideH)
-        Conv2DOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
-        Conv2DOptionsAddDilationWFactor(builder, self.dilationWFactor)
-        Conv2DOptionsAddDilationHFactor(builder, self.dilationHFactor)
-        conv2doptions = Conv2DOptionsEnd(builder)
-        return conv2doptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class Conv3DOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = Conv3DOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsConv3DOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def Conv3DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # Conv3DOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # Conv3DOptions
-    def Padding(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-    # Conv3DOptions
-    def StrideD(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # Conv3DOptions
-    def StrideW(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # Conv3DOptions
-    def StrideH(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # Conv3DOptions
-    def FusedActivationFunction(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-    # Conv3DOptions
-    def DilationDFactor(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 1
-
-    # Conv3DOptions
-    def DilationWFactor(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 1
-
-    # Conv3DOptions
-    def DilationHFactor(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 1
-
-def Conv3DOptionsStart(builder): builder.StartObject(8)
-def Start(builder):
-    return Conv3DOptionsStart(builder)
-def Conv3DOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0)
-def AddPadding(builder, padding):
-    return Conv3DOptionsAddPadding(builder, padding)
-def Conv3DOptionsAddStrideD(builder, strideD): builder.PrependInt32Slot(1, strideD, 0)
-def AddStrideD(builder, strideD):
-    return Conv3DOptionsAddStrideD(builder, strideD)
-def Conv3DOptionsAddStrideW(builder, strideW): builder.PrependInt32Slot(2, strideW, 0)
-def AddStrideW(builder, strideW):
-    return Conv3DOptionsAddStrideW(builder, strideW)
-def Conv3DOptionsAddStrideH(builder, strideH): builder.PrependInt32Slot(3, strideH, 0)
-def AddStrideH(builder, strideH):
-    return Conv3DOptionsAddStrideH(builder, strideH)
-def Conv3DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(4, fusedActivationFunction, 0)
-def AddFusedActivationFunction(builder, fusedActivationFunction):
-    return Conv3DOptionsAddFusedActivationFunction(builder, fusedActivationFunction)
-def Conv3DOptionsAddDilationDFactor(builder, dilationDFactor): builder.PrependInt32Slot(5, dilationDFactor, 1)
-def AddDilationDFactor(builder, dilationDFactor):
-    return Conv3DOptionsAddDilationDFactor(builder, dilationDFactor)
-def Conv3DOptionsAddDilationWFactor(builder, dilationWFactor): builder.PrependInt32Slot(6, dilationWFactor, 1)
-def AddDilationWFactor(builder, dilationWFactor):
-    return Conv3DOptionsAddDilationWFactor(builder, dilationWFactor)
-def Conv3DOptionsAddDilationHFactor(builder, dilationHFactor): builder.PrependInt32Slot(7, dilationHFactor, 1)
-def AddDilationHFactor(builder, dilationHFactor):
-    return Conv3DOptionsAddDilationHFactor(builder, dilationHFactor)
-def Conv3DOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return Conv3DOptionsEnd(builder)
-
-class Conv3DOptionsT(object):
-
-    # Conv3DOptionsT
-    def __init__(self):
-        self.padding = 0  # type: int
-        self.strideD = 0  # type: int
-        self.strideW = 0  # type: int
-        self.strideH = 0  # type: int
-        self.fusedActivationFunction = 0  # type: int
-        self.dilationDFactor = 1  # type: int
-        self.dilationWFactor = 1  # type: int
-        self.dilationHFactor = 1  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        conv3doptions = Conv3DOptions()
-        conv3doptions.Init(buf, pos)
-        return cls.InitFromObj(conv3doptions)
-
-    @classmethod
-    def InitFromObj(cls, conv3doptions):
-        x = Conv3DOptionsT()
-        x._UnPack(conv3doptions)
-        return x
-
-    # Conv3DOptionsT
-    def _UnPack(self, conv3doptions):
-        if conv3doptions is None:
-            return
-        self.padding = conv3doptions.Padding()
-        self.strideD = conv3doptions.StrideD()
-        self.strideW = conv3doptions.StrideW()
-        self.strideH = conv3doptions.StrideH()
-        self.fusedActivationFunction = conv3doptions.FusedActivationFunction()
-        self.dilationDFactor = conv3doptions.DilationDFactor()
-        self.dilationWFactor = conv3doptions.DilationWFactor()
-        self.dilationHFactor = conv3doptions.DilationHFactor()
-
-    # Conv3DOptionsT
-    def Pack(self, builder):
-        Conv3DOptionsStart(builder)
-        Conv3DOptionsAddPadding(builder, self.padding)
-        Conv3DOptionsAddStrideD(builder, self.strideD)
-        Conv3DOptionsAddStrideW(builder, self.strideW)
-        Conv3DOptionsAddStrideH(builder, self.strideH)
-        Conv3DOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
-        Conv3DOptionsAddDilationDFactor(builder, self.dilationDFactor)
-        Conv3DOptionsAddDilationWFactor(builder, self.dilationWFactor)
-        Conv3DOptionsAddDilationHFactor(builder, self.dilationHFactor)
-        conv3doptions = Conv3DOptionsEnd(builder)
-        return conv3doptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class CosOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = CosOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsCosOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def CosOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # CosOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def CosOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return CosOptionsStart(builder)
-def CosOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return CosOptionsEnd(builder)
-
-class CosOptionsT(object):
-
-    # CosOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        cosOptions = CosOptions()
-        cosOptions.Init(buf, pos)
-        return cls.InitFromObj(cosOptions)
-
-    @classmethod
-    def InitFromObj(cls, cosOptions):
-        x = CosOptionsT()
-        x._UnPack(cosOptions)
-        return x
-
-    # CosOptionsT
-    def _UnPack(self, cosOptions):
-        if cosOptions is None:
-            return
-
-    # CosOptionsT
-    def Pack(self, builder):
-        CosOptionsStart(builder)
-        cosOptions = CosOptionsEnd(builder)
-        return cosOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class CumsumOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = CumsumOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsCumsumOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def CumsumOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # CumsumOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # CumsumOptions
-    def Exclusive(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
             return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
         return False
 
-    # CumsumOptions
-    def Reverse(self):
+def SVDFOptionsStart(builder):
+    builder.StartObject(3)
+
+def SVDFOptionsAddRank(builder, rank):
+    builder.PrependInt32Slot(0, rank, 0)
+
+def SVDFOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+    builder.PrependInt8Slot(1, fusedActivationFunction, 0)
+
+def SVDFOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs):
+    builder.PrependBoolSlot(2, asymmetricQuantizeInputs, 0)
+
+def SVDFOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class SVDFOptionsT(object):
+
+    # SVDFOptionsT
+    def __init__(self):
+        self.rank = 0  # type: int
+        self.fusedActivationFunction = 0  # type: int
+        self.asymmetricQuantizeInputs = False  # type: bool
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        svdfoptions = SVDFOptions()
+        svdfoptions.Init(buf, pos)
+        return cls.InitFromObj(svdfoptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, svdfoptions):
+        x = SVDFOptionsT()
+        x._UnPack(svdfoptions)
+        return x
+
+    # SVDFOptionsT
+    def _UnPack(self, svdfoptions):
+        if svdfoptions is None:
+            return
+        self.rank = svdfoptions.Rank()
+        self.fusedActivationFunction = svdfoptions.FusedActivationFunction()
+        self.asymmetricQuantizeInputs = svdfoptions.AsymmetricQuantizeInputs()
+
+    # SVDFOptionsT
+    def Pack(self, builder):
+        SVDFOptionsStart(builder)
+        SVDFOptionsAddRank(builder, self.rank)
+        SVDFOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
+        SVDFOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs)
+        svdfoptions = SVDFOptionsEnd(builder)
+        return svdfoptions
+
+
+class RNNOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = RNNOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsRNNOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def RNNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # RNNOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # RNNOptions
+    def FusedActivationFunction(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+    # RNNOptions
+    def AsymmetricQuantizeInputs(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
         if o != 0:
             return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
         return False
 
-def CumsumOptionsStart(builder): builder.StartObject(2)
-def Start(builder):
-    return CumsumOptionsStart(builder)
-def CumsumOptionsAddExclusive(builder, exclusive): builder.PrependBoolSlot(0, exclusive, 0)
-def AddExclusive(builder, exclusive):
-    return CumsumOptionsAddExclusive(builder, exclusive)
-def CumsumOptionsAddReverse(builder, reverse): builder.PrependBoolSlot(1, reverse, 0)
-def AddReverse(builder, reverse):
-    return CumsumOptionsAddReverse(builder, reverse)
-def CumsumOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return CumsumOptionsEnd(builder)
+def RNNOptionsStart(builder):
+    builder.StartObject(2)
 
-class CumsumOptionsT(object):
+def RNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+    builder.PrependInt8Slot(0, fusedActivationFunction, 0)
 
-    # CumsumOptionsT
-    def __init__(self):
-        self.exclusive = False  # type: bool
-        self.reverse = False  # type: bool
+def RNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs):
+    builder.PrependBoolSlot(1, asymmetricQuantizeInputs, 0)
 
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        cumsumOptions = CumsumOptions()
-        cumsumOptions.Init(buf, pos)
-        return cls.InitFromObj(cumsumOptions)
+def RNNOptionsEnd(builder):
+    return builder.EndObject()
 
-    @classmethod
-    def InitFromObj(cls, cumsumOptions):
-        x = CumsumOptionsT()
-        x._UnPack(cumsumOptions)
-        return x
 
-    # CumsumOptionsT
-    def _UnPack(self, cumsumOptions):
-        if cumsumOptions is None:
-            return
-        self.exclusive = cumsumOptions.Exclusive()
-        self.reverse = cumsumOptions.Reverse()
 
-    # CumsumOptionsT
-    def Pack(self, builder):
-        CumsumOptionsStart(builder)
-        CumsumOptionsAddExclusive(builder, self.exclusive)
-        CumsumOptionsAddReverse(builder, self.reverse)
-        cumsumOptions = CumsumOptionsEnd(builder)
-        return cumsumOptions
-# automatically generated by the FlatBuffers compiler, do not modify
+class RNNOptionsT(object):
 
-# namespace: tflite
-
-class CustomOptionsFormat(object):
-    FLEXBUFFERS = 0
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class CustomQuantization(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = CustomQuantization()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsCustomQuantization(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def CustomQuantizationBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # CustomQuantization
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # CustomQuantization
-    def Custom(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            a = self._tab.Vector(o)
-            return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
-        return 0
-
-    # CustomQuantization
-    def CustomAsNumpy(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
-        return 0
-
-    # CustomQuantization
-    def CustomLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
-
-    # CustomQuantization
-    def CustomIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        return o == 0
-
-def CustomQuantizationStart(builder): builder.StartObject(1)
-def Start(builder):
-    return CustomQuantizationStart(builder)
-def CustomQuantizationAddCustom(builder, custom): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(custom), 0)
-def AddCustom(builder, custom):
-    return CustomQuantizationAddCustom(builder, custom)
-def CustomQuantizationStartCustomVector(builder, numElems): return builder.StartVector(1, numElems, 1)
-def StartCustomVector(builder, numElems):
-    return CustomQuantizationStartCustomVector(builder, numElems)
-def CustomQuantizationEnd(builder): return builder.EndObject()
-def End(builder):
-    return CustomQuantizationEnd(builder)
-try:
-    from typing import List
-except:
-    pass
-
-class CustomQuantizationT(object):
-
-    # CustomQuantizationT
-    def __init__(self):
-        self.custom = None  # type: List[int]
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        customQuantization = CustomQuantization()
-        customQuantization.Init(buf, pos)
-        return cls.InitFromObj(customQuantization)
-
-    @classmethod
-    def InitFromObj(cls, customQuantization):
-        x = CustomQuantizationT()
-        x._UnPack(customQuantization)
-        return x
-
-    # CustomQuantizationT
-    def _UnPack(self, customQuantization):
-        if customQuantization is None:
-            return
-        if not customQuantization.CustomIsNone():
-            if np is None:
-                self.custom = []
-                for i in range(customQuantization.CustomLength()):
-                    self.custom.append(customQuantization.Custom(i))
-            else:
-                self.custom = customQuantization.CustomAsNumpy()
-
-    # CustomQuantizationT
-    def Pack(self, builder):
-        if self.custom is not None:
-            if np is not None and type(self.custom) is np.ndarray:
-                custom = builder.CreateNumpyVector(self.custom)
-            else:
-                CustomQuantizationStartCustomVector(builder, len(self.custom))
-                for i in reversed(range(len(self.custom))):
-                    builder.PrependUint8(self.custom[i])
-                custom = builder.EndVector()
-        CustomQuantizationStart(builder)
-        if self.custom is not None:
-            CustomQuantizationAddCustom(builder, custom)
-        customQuantization = CustomQuantizationEnd(builder)
-        return customQuantization
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class DensifyOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = DensifyOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsDensifyOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def DensifyOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # DensifyOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def DensifyOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return DensifyOptionsStart(builder)
-def DensifyOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return DensifyOptionsEnd(builder)
-
-class DensifyOptionsT(object):
-
-    # DensifyOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        densifyOptions = DensifyOptions()
-        densifyOptions.Init(buf, pos)
-        return cls.InitFromObj(densifyOptions)
-
-    @classmethod
-    def InitFromObj(cls, densifyOptions):
-        x = DensifyOptionsT()
-        x._UnPack(densifyOptions)
-        return x
-
-    # DensifyOptionsT
-    def _UnPack(self, densifyOptions):
-        if densifyOptions is None:
-            return
-
-    # DensifyOptionsT
-    def Pack(self, builder):
-        DensifyOptionsStart(builder)
-        densifyOptions = DensifyOptionsEnd(builder)
-        return densifyOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class DepthToSpaceOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = DepthToSpaceOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsDepthToSpaceOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def DepthToSpaceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # DepthToSpaceOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # DepthToSpaceOptions
-    def BlockSize(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-def DepthToSpaceOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return DepthToSpaceOptionsStart(builder)
-def DepthToSpaceOptionsAddBlockSize(builder, blockSize): builder.PrependInt32Slot(0, blockSize, 0)
-def AddBlockSize(builder, blockSize):
-    return DepthToSpaceOptionsAddBlockSize(builder, blockSize)
-def DepthToSpaceOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return DepthToSpaceOptionsEnd(builder)
-
-class DepthToSpaceOptionsT(object):
-
-    # DepthToSpaceOptionsT
-    def __init__(self):
-        self.blockSize = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        depthToSpaceOptions = DepthToSpaceOptions()
-        depthToSpaceOptions.Init(buf, pos)
-        return cls.InitFromObj(depthToSpaceOptions)
-
-    @classmethod
-    def InitFromObj(cls, depthToSpaceOptions):
-        x = DepthToSpaceOptionsT()
-        x._UnPack(depthToSpaceOptions)
-        return x
-
-    # DepthToSpaceOptionsT
-    def _UnPack(self, depthToSpaceOptions):
-        if depthToSpaceOptions is None:
-            return
-        self.blockSize = depthToSpaceOptions.BlockSize()
-
-    # DepthToSpaceOptionsT
-    def Pack(self, builder):
-        DepthToSpaceOptionsStart(builder)
-        DepthToSpaceOptionsAddBlockSize(builder, self.blockSize)
-        depthToSpaceOptions = DepthToSpaceOptionsEnd(builder)
-        return depthToSpaceOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class DepthwiseConv2DOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = DepthwiseConv2DOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsDepthwiseConv2DOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def DepthwiseConv2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # DepthwiseConv2DOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # DepthwiseConv2DOptions
-    def Padding(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-    # DepthwiseConv2DOptions
-    def StrideW(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # DepthwiseConv2DOptions
-    def StrideH(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # DepthwiseConv2DOptions
-    def DepthMultiplier(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # DepthwiseConv2DOptions
-    def FusedActivationFunction(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-    # DepthwiseConv2DOptions
-    def DilationWFactor(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 1
-
-    # DepthwiseConv2DOptions
-    def DilationHFactor(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 1
-
-def DepthwiseConv2DOptionsStart(builder): builder.StartObject(7)
-def Start(builder):
-    return DepthwiseConv2DOptionsStart(builder)
-def DepthwiseConv2DOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0)
-def AddPadding(builder, padding):
-    return DepthwiseConv2DOptionsAddPadding(builder, padding)
-def DepthwiseConv2DOptionsAddStrideW(builder, strideW): builder.PrependInt32Slot(1, strideW, 0)
-def AddStrideW(builder, strideW):
-    return DepthwiseConv2DOptionsAddStrideW(builder, strideW)
-def DepthwiseConv2DOptionsAddStrideH(builder, strideH): builder.PrependInt32Slot(2, strideH, 0)
-def AddStrideH(builder, strideH):
-    return DepthwiseConv2DOptionsAddStrideH(builder, strideH)
-def DepthwiseConv2DOptionsAddDepthMultiplier(builder, depthMultiplier): builder.PrependInt32Slot(3, depthMultiplier, 0)
-def AddDepthMultiplier(builder, depthMultiplier):
-    return DepthwiseConv2DOptionsAddDepthMultiplier(builder, depthMultiplier)
-def DepthwiseConv2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(4, fusedActivationFunction, 0)
-def AddFusedActivationFunction(builder, fusedActivationFunction):
-    return DepthwiseConv2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction)
-def DepthwiseConv2DOptionsAddDilationWFactor(builder, dilationWFactor): builder.PrependInt32Slot(5, dilationWFactor, 1)
-def AddDilationWFactor(builder, dilationWFactor):
-    return DepthwiseConv2DOptionsAddDilationWFactor(builder, dilationWFactor)
-def DepthwiseConv2DOptionsAddDilationHFactor(builder, dilationHFactor): builder.PrependInt32Slot(6, dilationHFactor, 1)
-def AddDilationHFactor(builder, dilationHFactor):
-    return DepthwiseConv2DOptionsAddDilationHFactor(builder, dilationHFactor)
-def DepthwiseConv2DOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return DepthwiseConv2DOptionsEnd(builder)
-
-class DepthwiseConv2DOptionsT(object):
-
-    # DepthwiseConv2DOptionsT
-    def __init__(self):
-        self.padding = 0  # type: int
-        self.strideW = 0  # type: int
-        self.strideH = 0  # type: int
-        self.depthMultiplier = 0  # type: int
-        self.fusedActivationFunction = 0  # type: int
-        self.dilationWFactor = 1  # type: int
-        self.dilationHFactor = 1  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        depthwiseConv2doptions = DepthwiseConv2DOptions()
-        depthwiseConv2doptions.Init(buf, pos)
-        return cls.InitFromObj(depthwiseConv2doptions)
-
-    @classmethod
-    def InitFromObj(cls, depthwiseConv2doptions):
-        x = DepthwiseConv2DOptionsT()
-        x._UnPack(depthwiseConv2doptions)
-        return x
-
-    # DepthwiseConv2DOptionsT
-    def _UnPack(self, depthwiseConv2doptions):
-        if depthwiseConv2doptions is None:
-            return
-        self.padding = depthwiseConv2doptions.Padding()
-        self.strideW = depthwiseConv2doptions.StrideW()
-        self.strideH = depthwiseConv2doptions.StrideH()
-        self.depthMultiplier = depthwiseConv2doptions.DepthMultiplier()
-        self.fusedActivationFunction = depthwiseConv2doptions.FusedActivationFunction()
-        self.dilationWFactor = depthwiseConv2doptions.DilationWFactor()
-        self.dilationHFactor = depthwiseConv2doptions.DilationHFactor()
-
-    # DepthwiseConv2DOptionsT
-    def Pack(self, builder):
-        DepthwiseConv2DOptionsStart(builder)
-        DepthwiseConv2DOptionsAddPadding(builder, self.padding)
-        DepthwiseConv2DOptionsAddStrideW(builder, self.strideW)
-        DepthwiseConv2DOptionsAddStrideH(builder, self.strideH)
-        DepthwiseConv2DOptionsAddDepthMultiplier(builder, self.depthMultiplier)
-        DepthwiseConv2DOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
-        DepthwiseConv2DOptionsAddDilationWFactor(builder, self.dilationWFactor)
-        DepthwiseConv2DOptionsAddDilationHFactor(builder, self.dilationHFactor)
-        depthwiseConv2doptions = DepthwiseConv2DOptionsEnd(builder)
-        return depthwiseConv2doptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class DequantizeOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = DequantizeOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsDequantizeOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def DequantizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # DequantizeOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def DequantizeOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return DequantizeOptionsStart(builder)
-def DequantizeOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return DequantizeOptionsEnd(builder)
-
-class DequantizeOptionsT(object):
-
-    # DequantizeOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        dequantizeOptions = DequantizeOptions()
-        dequantizeOptions.Init(buf, pos)
-        return cls.InitFromObj(dequantizeOptions)
-
-    @classmethod
-    def InitFromObj(cls, dequantizeOptions):
-        x = DequantizeOptionsT()
-        x._UnPack(dequantizeOptions)
-        return x
-
-    # DequantizeOptionsT
-    def _UnPack(self, dequantizeOptions):
-        if dequantizeOptions is None:
-            return
-
-    # DequantizeOptionsT
-    def Pack(self, builder):
-        DequantizeOptionsStart(builder)
-        dequantizeOptions = DequantizeOptionsEnd(builder)
-        return dequantizeOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class DimensionMetadata(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = DimensionMetadata()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsDimensionMetadata(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def DimensionMetadataBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # DimensionMetadata
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # DimensionMetadata
-    def Format(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-    # DimensionMetadata
-    def DenseSize(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # DimensionMetadata
-    def ArraySegmentsType(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
-        return 0
-
-    # DimensionMetadata
-    def ArraySegments(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
-        if o != 0:
-            from flatbuffers.table import Table
-            obj = Table(bytearray(), 0)
-            self._tab.Union(obj, o)
-            return obj
-        return None
-
-    # DimensionMetadata
-    def ArrayIndicesType(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
-        return 0
-
-    # DimensionMetadata
-    def ArrayIndices(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
-        if o != 0:
-            from flatbuffers.table import Table
-            obj = Table(bytearray(), 0)
-            self._tab.Union(obj, o)
-            return obj
-        return None
-
-def DimensionMetadataStart(builder): builder.StartObject(6)
-def Start(builder):
-    return DimensionMetadataStart(builder)
-def DimensionMetadataAddFormat(builder, format): builder.PrependInt8Slot(0, format, 0)
-def AddFormat(builder, format):
-    return DimensionMetadataAddFormat(builder, format)
-def DimensionMetadataAddDenseSize(builder, denseSize): builder.PrependInt32Slot(1, denseSize, 0)
-def AddDenseSize(builder, denseSize):
-    return DimensionMetadataAddDenseSize(builder, denseSize)
-def DimensionMetadataAddArraySegmentsType(builder, arraySegmentsType): builder.PrependUint8Slot(2, arraySegmentsType, 0)
-def AddArraySegmentsType(builder, arraySegmentsType):
-    return DimensionMetadataAddArraySegmentsType(builder, arraySegmentsType)
-def DimensionMetadataAddArraySegments(builder, arraySegments): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(arraySegments), 0)
-def AddArraySegments(builder, arraySegments):
-    return DimensionMetadataAddArraySegments(builder, arraySegments)
-def DimensionMetadataAddArrayIndicesType(builder, arrayIndicesType): builder.PrependUint8Slot(4, arrayIndicesType, 0)
-def AddArrayIndicesType(builder, arrayIndicesType):
-    return DimensionMetadataAddArrayIndicesType(builder, arrayIndicesType)
-def DimensionMetadataAddArrayIndices(builder, arrayIndices): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(arrayIndices), 0)
-def AddArrayIndices(builder, arrayIndices):
-    return DimensionMetadataAddArrayIndices(builder, arrayIndices)
-def DimensionMetadataEnd(builder): return builder.EndObject()
-def End(builder):
-    return DimensionMetadataEnd(builder)
-try:
-    from typing import Union
-except:
-    pass
-
-class DimensionMetadataT(object):
-
-    # DimensionMetadataT
-    def __init__(self):
-        self.format = 0  # type: int
-        self.denseSize = 0  # type: int
-        self.arraySegmentsType = 0  # type: int
-        self.arraySegments = None  # type: Union[None, Int32VectorT, Uint16VectorT, Uint8VectorT]
-        self.arrayIndicesType = 0  # type: int
-        self.arrayIndices = None  # type: Union[None, Int32VectorT, Uint16VectorT, Uint8VectorT]
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        dimensionMetadata = DimensionMetadata()
-        dimensionMetadata.Init(buf, pos)
-        return cls.InitFromObj(dimensionMetadata)
-
-    @classmethod
-    def InitFromObj(cls, dimensionMetadata):
-        x = DimensionMetadataT()
-        x._UnPack(dimensionMetadata)
-        return x
-
-    # DimensionMetadataT
-    def _UnPack(self, dimensionMetadata):
-        if dimensionMetadata is None:
-            return
-        self.format = dimensionMetadata.Format()
-        self.denseSize = dimensionMetadata.DenseSize()
-        self.arraySegmentsType = dimensionMetadata.ArraySegmentsType()
-        self.arraySegments = SparseIndexVectorCreator(self.arraySegmentsType, dimensionMetadata.ArraySegments())
-        self.arrayIndicesType = dimensionMetadata.ArrayIndicesType()
-        self.arrayIndices = SparseIndexVectorCreator(self.arrayIndicesType, dimensionMetadata.ArrayIndices())
-
-    # DimensionMetadataT
-    def Pack(self, builder):
-        if self.arraySegments is not None:
-            arraySegments = self.arraySegments.Pack(builder)
-        if self.arrayIndices is not None:
-            arrayIndices = self.arrayIndices.Pack(builder)
-        DimensionMetadataStart(builder)
-        DimensionMetadataAddFormat(builder, self.format)
-        DimensionMetadataAddDenseSize(builder, self.denseSize)
-        DimensionMetadataAddArraySegmentsType(builder, self.arraySegmentsType)
-        if self.arraySegments is not None:
-            DimensionMetadataAddArraySegments(builder, arraySegments)
-        DimensionMetadataAddArrayIndicesType(builder, self.arrayIndicesType)
-        if self.arrayIndices is not None:
-            DimensionMetadataAddArrayIndices(builder, arrayIndices)
-        dimensionMetadata = DimensionMetadataEnd(builder)
-        return dimensionMetadata
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-class DimensionType(object):
-    DENSE = 0
-    SPARSE_CSR = 1
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class DivOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = DivOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsDivOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def DivOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # DivOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # DivOptions
-    def FusedActivationFunction(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-def DivOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return DivOptionsStart(builder)
-def DivOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0)
-def AddFusedActivationFunction(builder, fusedActivationFunction):
-    return DivOptionsAddFusedActivationFunction(builder, fusedActivationFunction)
-def DivOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return DivOptionsEnd(builder)
-
-class DivOptionsT(object):
-
-    # DivOptionsT
+    # RNNOptionsT
     def __init__(self):
         self.fusedActivationFunction = 0  # type: int
+        self.asymmetricQuantizeInputs = False  # type: bool
 
     @classmethod
     def InitFromBuf(cls, buf, pos):
-        divOptions = DivOptions()
-        divOptions.Init(buf, pos)
-        return cls.InitFromObj(divOptions)
+        rnnoptions = RNNOptions()
+        rnnoptions.Init(buf, pos)
+        return cls.InitFromObj(rnnoptions)
 
     @classmethod
-    def InitFromObj(cls, divOptions):
-        x = DivOptionsT()
-        x._UnPack(divOptions)
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, rnnoptions):
+        x = RNNOptionsT()
+        x._UnPack(rnnoptions)
         return x
 
-    # DivOptionsT
-    def _UnPack(self, divOptions):
-        if divOptions is None:
+    # RNNOptionsT
+    def _UnPack(self, rnnoptions):
+        if rnnoptions is None:
             return
-        self.fusedActivationFunction = divOptions.FusedActivationFunction()
+        self.fusedActivationFunction = rnnoptions.FusedActivationFunction()
+        self.asymmetricQuantizeInputs = rnnoptions.AsymmetricQuantizeInputs()
 
-    # DivOptionsT
+    # RNNOptionsT
     def Pack(self, builder):
-        DivOptionsStart(builder)
-        DivOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
-        divOptions = DivOptionsEnd(builder)
-        return divOptions
-# automatically generated by the FlatBuffers compiler, do not modify
+        RNNOptionsStart(builder)
+        RNNOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
+        RNNOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs)
+        rnnoptions = RNNOptionsEnd(builder)
+        return rnnoptions
 
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class DynamicUpdateSliceOptions(object):
+class SequenceRNNOptions(object):
     __slots__ = ['_tab']
 
     @classmethod
     def GetRootAs(cls, buf, offset=0):
         n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = DynamicUpdateSliceOptions()
+        x = SequenceRNNOptions()
         x.Init(buf, n + offset)
         return x
 
     @classmethod
-    def GetRootAsDynamicUpdateSliceOptions(cls, buf, offset=0):
+    def GetRootAsSequenceRNNOptions(cls, buf, offset=0):
         """This method is deprecated. Please switch to GetRootAs."""
         return cls.GetRootAs(buf, offset)
     @classmethod
-    def DynamicUpdateSliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+    def SequenceRNNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
         return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
 
-    # DynamicUpdateSliceOptions
+    # SequenceRNNOptions
     def Init(self, buf, pos):
         self._tab = flatbuffers.table.Table(buf, pos)
 
-def DynamicUpdateSliceOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return DynamicUpdateSliceOptionsStart(builder)
-def DynamicUpdateSliceOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return DynamicUpdateSliceOptionsEnd(builder)
-
-class DynamicUpdateSliceOptionsT(object):
-
-    # DynamicUpdateSliceOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        dynamicUpdateSliceOptions = DynamicUpdateSliceOptions()
-        dynamicUpdateSliceOptions.Init(buf, pos)
-        return cls.InitFromObj(dynamicUpdateSliceOptions)
-
-    @classmethod
-    def InitFromObj(cls, dynamicUpdateSliceOptions):
-        x = DynamicUpdateSliceOptionsT()
-        x._UnPack(dynamicUpdateSliceOptions)
-        return x
-
-    # DynamicUpdateSliceOptionsT
-    def _UnPack(self, dynamicUpdateSliceOptions):
-        if dynamicUpdateSliceOptions is None:
-            return
-
-    # DynamicUpdateSliceOptionsT
-    def Pack(self, builder):
-        DynamicUpdateSliceOptionsStart(builder)
-        dynamicUpdateSliceOptions = DynamicUpdateSliceOptionsEnd(builder)
-        return dynamicUpdateSliceOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class EmbeddingLookupSparseOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = EmbeddingLookupSparseOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsEmbeddingLookupSparseOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def EmbeddingLookupSparseOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # EmbeddingLookupSparseOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # EmbeddingLookupSparseOptions
-    def Combiner(self):
+    # SequenceRNNOptions
+    def TimeMajor(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
         if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+    # SequenceRNNOptions
+    def FusedActivationFunction(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
             return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
         return 0
 
-def EmbeddingLookupSparseOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return EmbeddingLookupSparseOptionsStart(builder)
-def EmbeddingLookupSparseOptionsAddCombiner(builder, combiner): builder.PrependInt8Slot(0, combiner, 0)
-def AddCombiner(builder, combiner):
-    return EmbeddingLookupSparseOptionsAddCombiner(builder, combiner)
-def EmbeddingLookupSparseOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return EmbeddingLookupSparseOptionsEnd(builder)
-
-class EmbeddingLookupSparseOptionsT(object):
-
-    # EmbeddingLookupSparseOptionsT
-    def __init__(self):
-        self.combiner = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        embeddingLookupSparseOptions = EmbeddingLookupSparseOptions()
-        embeddingLookupSparseOptions.Init(buf, pos)
-        return cls.InitFromObj(embeddingLookupSparseOptions)
-
-    @classmethod
-    def InitFromObj(cls, embeddingLookupSparseOptions):
-        x = EmbeddingLookupSparseOptionsT()
-        x._UnPack(embeddingLookupSparseOptions)
-        return x
-
-    # EmbeddingLookupSparseOptionsT
-    def _UnPack(self, embeddingLookupSparseOptions):
-        if embeddingLookupSparseOptions is None:
-            return
-        self.combiner = embeddingLookupSparseOptions.Combiner()
-
-    # EmbeddingLookupSparseOptionsT
-    def Pack(self, builder):
-        EmbeddingLookupSparseOptionsStart(builder)
-        EmbeddingLookupSparseOptionsAddCombiner(builder, self.combiner)
-        embeddingLookupSparseOptions = EmbeddingLookupSparseOptionsEnd(builder)
-        return embeddingLookupSparseOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class EqualOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = EqualOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsEqualOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def EqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # EqualOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def EqualOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return EqualOptionsStart(builder)
-def EqualOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return EqualOptionsEnd(builder)
-
-class EqualOptionsT(object):
-
-    # EqualOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        equalOptions = EqualOptions()
-        equalOptions.Init(buf, pos)
-        return cls.InitFromObj(equalOptions)
-
-    @classmethod
-    def InitFromObj(cls, equalOptions):
-        x = EqualOptionsT()
-        x._UnPack(equalOptions)
-        return x
-
-    # EqualOptionsT
-    def _UnPack(self, equalOptions):
-        if equalOptions is None:
-            return
-
-    # EqualOptionsT
-    def Pack(self, builder):
-        EqualOptionsStart(builder)
-        equalOptions = EqualOptionsEnd(builder)
-        return equalOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class ExpOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = ExpOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsExpOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def ExpOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # ExpOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def ExpOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return ExpOptionsStart(builder)
-def ExpOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return ExpOptionsEnd(builder)
-
-class ExpOptionsT(object):
-
-    # ExpOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        expOptions = ExpOptions()
-        expOptions.Init(buf, pos)
-        return cls.InitFromObj(expOptions)
-
-    @classmethod
-    def InitFromObj(cls, expOptions):
-        x = ExpOptionsT()
-        x._UnPack(expOptions)
-        return x
-
-    # ExpOptionsT
-    def _UnPack(self, expOptions):
-        if expOptions is None:
-            return
-
-    # ExpOptionsT
-    def Pack(self, builder):
-        ExpOptionsStart(builder)
-        expOptions = ExpOptionsEnd(builder)
-        return expOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class ExpandDimsOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = ExpandDimsOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsExpandDimsOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def ExpandDimsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # ExpandDimsOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def ExpandDimsOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return ExpandDimsOptionsStart(builder)
-def ExpandDimsOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return ExpandDimsOptionsEnd(builder)
-
-class ExpandDimsOptionsT(object):
-
-    # ExpandDimsOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        expandDimsOptions = ExpandDimsOptions()
-        expandDimsOptions.Init(buf, pos)
-        return cls.InitFromObj(expandDimsOptions)
-
-    @classmethod
-    def InitFromObj(cls, expandDimsOptions):
-        x = ExpandDimsOptionsT()
-        x._UnPack(expandDimsOptions)
-        return x
-
-    # ExpandDimsOptionsT
-    def _UnPack(self, expandDimsOptions):
-        if expandDimsOptions is None:
-            return
-
-    # ExpandDimsOptionsT
-    def Pack(self, builder):
-        ExpandDimsOptionsStart(builder)
-        expandDimsOptions = ExpandDimsOptionsEnd(builder)
-        return expandDimsOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class FakeQuantOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = FakeQuantOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsFakeQuantOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def FakeQuantOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # FakeQuantOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # FakeQuantOptions
-    def Min(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
-        return 0.0
-
-    # FakeQuantOptions
-    def Max(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
-        return 0.0
-
-    # FakeQuantOptions
-    def NumBits(self):
+    # SequenceRNNOptions
+    def AsymmetricQuantizeInputs(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
         if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+def SequenceRNNOptionsStart(builder):
+    builder.StartObject(3)
+
+def SequenceRNNOptionsAddTimeMajor(builder, timeMajor):
+    builder.PrependBoolSlot(0, timeMajor, 0)
+
+def SequenceRNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+    builder.PrependInt8Slot(1, fusedActivationFunction, 0)
+
+def SequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs):
+    builder.PrependBoolSlot(2, asymmetricQuantizeInputs, 0)
+
+def SequenceRNNOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class SequenceRNNOptionsT(object):
+
+    # SequenceRNNOptionsT
+    def __init__(self):
+        self.timeMajor = False  # type: bool
+        self.fusedActivationFunction = 0  # type: int
+        self.asymmetricQuantizeInputs = False  # type: bool
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        sequenceRnnoptions = SequenceRNNOptions()
+        sequenceRnnoptions.Init(buf, pos)
+        return cls.InitFromObj(sequenceRnnoptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, sequenceRnnoptions):
+        x = SequenceRNNOptionsT()
+        x._UnPack(sequenceRnnoptions)
+        return x
+
+    # SequenceRNNOptionsT
+    def _UnPack(self, sequenceRnnoptions):
+        if sequenceRnnoptions is None:
+            return
+        self.timeMajor = sequenceRnnoptions.TimeMajor()
+        self.fusedActivationFunction = sequenceRnnoptions.FusedActivationFunction()
+        self.asymmetricQuantizeInputs = sequenceRnnoptions.AsymmetricQuantizeInputs()
+
+    # SequenceRNNOptionsT
+    def Pack(self, builder):
+        SequenceRNNOptionsStart(builder)
+        SequenceRNNOptionsAddTimeMajor(builder, self.timeMajor)
+        SequenceRNNOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
+        SequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs)
+        sequenceRnnoptions = SequenceRNNOptionsEnd(builder)
+        return sequenceRnnoptions
+
+
+class BidirectionalSequenceRNNOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = BidirectionalSequenceRNNOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsBidirectionalSequenceRNNOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def BidirectionalSequenceRNNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # BidirectionalSequenceRNNOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # BidirectionalSequenceRNNOptions
+    def TimeMajor(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+    # BidirectionalSequenceRNNOptions
+    def FusedActivationFunction(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
         return 0
 
-    # FakeQuantOptions
-    def NarrowRange(self):
+    # BidirectionalSequenceRNNOptions
+    def MergeOutputs(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+    # BidirectionalSequenceRNNOptions
+    def AsymmetricQuantizeInputs(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
         if o != 0:
             return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
         return False
 
-def FakeQuantOptionsStart(builder): builder.StartObject(4)
-def Start(builder):
-    return FakeQuantOptionsStart(builder)
-def FakeQuantOptionsAddMin(builder, min): builder.PrependFloat32Slot(0, min, 0.0)
-def AddMin(builder, min):
-    return FakeQuantOptionsAddMin(builder, min)
-def FakeQuantOptionsAddMax(builder, max): builder.PrependFloat32Slot(1, max, 0.0)
-def AddMax(builder, max):
-    return FakeQuantOptionsAddMax(builder, max)
-def FakeQuantOptionsAddNumBits(builder, numBits): builder.PrependInt32Slot(2, numBits, 0)
-def AddNumBits(builder, numBits):
-    return FakeQuantOptionsAddNumBits(builder, numBits)
-def FakeQuantOptionsAddNarrowRange(builder, narrowRange): builder.PrependBoolSlot(3, narrowRange, 0)
-def AddNarrowRange(builder, narrowRange):
-    return FakeQuantOptionsAddNarrowRange(builder, narrowRange)
-def FakeQuantOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return FakeQuantOptionsEnd(builder)
+def BidirectionalSequenceRNNOptionsStart(builder):
+    builder.StartObject(4)
 
-class FakeQuantOptionsT(object):
+def BidirectionalSequenceRNNOptionsAddTimeMajor(builder, timeMajor):
+    builder.PrependBoolSlot(0, timeMajor, 0)
 
-    # FakeQuantOptionsT
+def BidirectionalSequenceRNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+    builder.PrependInt8Slot(1, fusedActivationFunction, 0)
+
+def BidirectionalSequenceRNNOptionsAddMergeOutputs(builder, mergeOutputs):
+    builder.PrependBoolSlot(2, mergeOutputs, 0)
+
+def BidirectionalSequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs):
+    builder.PrependBoolSlot(3, asymmetricQuantizeInputs, 0)
+
+def BidirectionalSequenceRNNOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class BidirectionalSequenceRNNOptionsT(object):
+
+    # BidirectionalSequenceRNNOptionsT
     def __init__(self):
-        self.min = 0.0  # type: float
-        self.max = 0.0  # type: float
-        self.numBits = 0  # type: int
-        self.narrowRange = False  # type: bool
+        self.timeMajor = False  # type: bool
+        self.fusedActivationFunction = 0  # type: int
+        self.mergeOutputs = False  # type: bool
+        self.asymmetricQuantizeInputs = False  # type: bool
 
     @classmethod
     def InitFromBuf(cls, buf, pos):
-        fakeQuantOptions = FakeQuantOptions()
-        fakeQuantOptions.Init(buf, pos)
-        return cls.InitFromObj(fakeQuantOptions)
+        bidirectionalSequenceRnnoptions = BidirectionalSequenceRNNOptions()
+        bidirectionalSequenceRnnoptions.Init(buf, pos)
+        return cls.InitFromObj(bidirectionalSequenceRnnoptions)
 
     @classmethod
-    def InitFromObj(cls, fakeQuantOptions):
-        x = FakeQuantOptionsT()
-        x._UnPack(fakeQuantOptions)
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, bidirectionalSequenceRnnoptions):
+        x = BidirectionalSequenceRNNOptionsT()
+        x._UnPack(bidirectionalSequenceRnnoptions)
         return x
 
-    # FakeQuantOptionsT
-    def _UnPack(self, fakeQuantOptions):
-        if fakeQuantOptions is None:
+    # BidirectionalSequenceRNNOptionsT
+    def _UnPack(self, bidirectionalSequenceRnnoptions):
+        if bidirectionalSequenceRnnoptions is None:
             return
-        self.min = fakeQuantOptions.Min()
-        self.max = fakeQuantOptions.Max()
-        self.numBits = fakeQuantOptions.NumBits()
-        self.narrowRange = fakeQuantOptions.NarrowRange()
+        self.timeMajor = bidirectionalSequenceRnnoptions.TimeMajor()
+        self.fusedActivationFunction = bidirectionalSequenceRnnoptions.FusedActivationFunction()
+        self.mergeOutputs = bidirectionalSequenceRnnoptions.MergeOutputs()
+        self.asymmetricQuantizeInputs = bidirectionalSequenceRnnoptions.AsymmetricQuantizeInputs()
 
-    # FakeQuantOptionsT
+    # BidirectionalSequenceRNNOptionsT
     def Pack(self, builder):
-        FakeQuantOptionsStart(builder)
-        FakeQuantOptionsAddMin(builder, self.min)
-        FakeQuantOptionsAddMax(builder, self.max)
-        FakeQuantOptionsAddNumBits(builder, self.numBits)
-        FakeQuantOptionsAddNarrowRange(builder, self.narrowRange)
-        fakeQuantOptions = FakeQuantOptionsEnd(builder)
-        return fakeQuantOptions
-# automatically generated by the FlatBuffers compiler, do not modify
+        BidirectionalSequenceRNNOptionsStart(builder)
+        BidirectionalSequenceRNNOptionsAddTimeMajor(builder, self.timeMajor)
+        BidirectionalSequenceRNNOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
+        BidirectionalSequenceRNNOptionsAddMergeOutputs(builder, self.mergeOutputs)
+        BidirectionalSequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs)
+        bidirectionalSequenceRnnoptions = BidirectionalSequenceRNNOptionsEnd(builder)
+        return bidirectionalSequenceRnnoptions
 
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class FillOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = FillOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsFillOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def FillOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # FillOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def FillOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return FillOptionsStart(builder)
-def FillOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return FillOptionsEnd(builder)
-
-class FillOptionsT(object):
-
-    # FillOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        fillOptions = FillOptions()
-        fillOptions.Init(buf, pos)
-        return cls.InitFromObj(fillOptions)
-
-    @classmethod
-    def InitFromObj(cls, fillOptions):
-        x = FillOptionsT()
-        x._UnPack(fillOptions)
-        return x
-
-    # FillOptionsT
-    def _UnPack(self, fillOptions):
-        if fillOptions is None:
-            return
-
-    # FillOptionsT
-    def Pack(self, builder):
-        FillOptionsStart(builder)
-        fillOptions = FillOptionsEnd(builder)
-        return fillOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class FloorDivOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = FloorDivOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsFloorDivOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def FloorDivOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # FloorDivOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def FloorDivOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return FloorDivOptionsStart(builder)
-def FloorDivOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return FloorDivOptionsEnd(builder)
-
-class FloorDivOptionsT(object):
-
-    # FloorDivOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        floorDivOptions = FloorDivOptions()
-        floorDivOptions.Init(buf, pos)
-        return cls.InitFromObj(floorDivOptions)
-
-    @classmethod
-    def InitFromObj(cls, floorDivOptions):
-        x = FloorDivOptionsT()
-        x._UnPack(floorDivOptions)
-        return x
-
-    # FloorDivOptionsT
-    def _UnPack(self, floorDivOptions):
-        if floorDivOptions is None:
-            return
-
-    # FloorDivOptionsT
-    def Pack(self, builder):
-        FloorDivOptionsStart(builder)
-        floorDivOptions = FloorDivOptionsEnd(builder)
-        return floorDivOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class FloorModOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = FloorModOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsFloorModOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def FloorModOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # FloorModOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def FloorModOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return FloorModOptionsStart(builder)
-def FloorModOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return FloorModOptionsEnd(builder)
-
-class FloorModOptionsT(object):
-
-    # FloorModOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        floorModOptions = FloorModOptions()
-        floorModOptions.Init(buf, pos)
-        return cls.InitFromObj(floorModOptions)
-
-    @classmethod
-    def InitFromObj(cls, floorModOptions):
-        x = FloorModOptionsT()
-        x._UnPack(floorModOptions)
-        return x
-
-    # FloorModOptionsT
-    def _UnPack(self, floorModOptions):
-        if floorModOptions is None:
-            return
-
-    # FloorModOptionsT
-    def Pack(self, builder):
-        FloorModOptionsStart(builder)
-        floorModOptions = FloorModOptionsEnd(builder)
-        return floorModOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
 
 class FullyConnectedOptions(object):
     __slots__ = ['_tab']
@@ -4340,24 +7339,35 @@
             return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
         return False
 
-def FullyConnectedOptionsStart(builder): builder.StartObject(4)
-def Start(builder):
-    return FullyConnectedOptionsStart(builder)
-def FullyConnectedOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0)
-def AddFusedActivationFunction(builder, fusedActivationFunction):
-    return FullyConnectedOptionsAddFusedActivationFunction(builder, fusedActivationFunction)
-def FullyConnectedOptionsAddWeightsFormat(builder, weightsFormat): builder.PrependInt8Slot(1, weightsFormat, 0)
-def AddWeightsFormat(builder, weightsFormat):
-    return FullyConnectedOptionsAddWeightsFormat(builder, weightsFormat)
-def FullyConnectedOptionsAddKeepNumDims(builder, keepNumDims): builder.PrependBoolSlot(2, keepNumDims, 0)
-def AddKeepNumDims(builder, keepNumDims):
-    return FullyConnectedOptionsAddKeepNumDims(builder, keepNumDims)
-def FullyConnectedOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(3, asymmetricQuantizeInputs, 0)
-def AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs):
-    return FullyConnectedOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs)
-def FullyConnectedOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return FullyConnectedOptionsEnd(builder)
+    # FullyConnectedOptions
+    def QuantizedBiasType(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+def FullyConnectedOptionsStart(builder):
+    builder.StartObject(5)
+
+def FullyConnectedOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+    builder.PrependInt8Slot(0, fusedActivationFunction, 0)
+
+def FullyConnectedOptionsAddWeightsFormat(builder, weightsFormat):
+    builder.PrependInt8Slot(1, weightsFormat, 0)
+
+def FullyConnectedOptionsAddKeepNumDims(builder, keepNumDims):
+    builder.PrependBoolSlot(2, keepNumDims, 0)
+
+def FullyConnectedOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs):
+    builder.PrependBoolSlot(3, asymmetricQuantizeInputs, 0)
+
+def FullyConnectedOptionsAddQuantizedBiasType(builder, quantizedBiasType):
+    builder.PrependInt8Slot(4, quantizedBiasType, 0)
+
+def FullyConnectedOptionsEnd(builder):
+    return builder.EndObject()
+
+
 
 class FullyConnectedOptionsT(object):
 
@@ -4367,6 +7377,7 @@
         self.weightsFormat = 0  # type: int
         self.keepNumDims = False  # type: bool
         self.asymmetricQuantizeInputs = False  # type: bool
+        self.quantizedBiasType = 0  # type: int
 
     @classmethod
     def InitFromBuf(cls, buf, pos):
@@ -4375,6 +7386,11 @@
         return cls.InitFromObj(fullyConnectedOptions)
 
     @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
     def InitFromObj(cls, fullyConnectedOptions):
         x = FullyConnectedOptionsT()
         x._UnPack(fullyConnectedOptions)
@@ -4388,6 +7404,7 @@
         self.weightsFormat = fullyConnectedOptions.WeightsFormat()
         self.keepNumDims = fullyConnectedOptions.KeepNumDims()
         self.asymmetricQuantizeInputs = fullyConnectedOptions.AsymmetricQuantizeInputs()
+        self.quantizedBiasType = fullyConnectedOptions.QuantizedBiasType()
 
     # FullyConnectedOptionsT
     def Pack(self, builder):
@@ -4396,943 +7413,344 @@
         FullyConnectedOptionsAddWeightsFormat(builder, self.weightsFormat)
         FullyConnectedOptionsAddKeepNumDims(builder, self.keepNumDims)
         FullyConnectedOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs)
+        FullyConnectedOptionsAddQuantizedBiasType(builder, self.quantizedBiasType)
         fullyConnectedOptions = FullyConnectedOptionsEnd(builder)
         return fullyConnectedOptions
-# automatically generated by the FlatBuffers compiler, do not modify
 
-# namespace: tflite
 
-class FullyConnectedOptionsWeightsFormat(object):
-    DEFAULT = 0
-    SHUFFLED4x16INT8 = 1
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class GatherNdOptions(object):
+class SoftmaxOptions(object):
     __slots__ = ['_tab']
 
     @classmethod
     def GetRootAs(cls, buf, offset=0):
         n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = GatherNdOptions()
+        x = SoftmaxOptions()
         x.Init(buf, n + offset)
         return x
 
     @classmethod
-    def GetRootAsGatherNdOptions(cls, buf, offset=0):
+    def GetRootAsSoftmaxOptions(cls, buf, offset=0):
         """This method is deprecated. Please switch to GetRootAs."""
         return cls.GetRootAs(buf, offset)
     @classmethod
-    def GatherNdOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+    def SoftmaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
         return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
 
-    # GatherNdOptions
+    # SoftmaxOptions
     def Init(self, buf, pos):
         self._tab = flatbuffers.table.Table(buf, pos)
 
-def GatherNdOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return GatherNdOptionsStart(builder)
-def GatherNdOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return GatherNdOptionsEnd(builder)
+    # SoftmaxOptions
+    def Beta(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
+        return 0.0
 
-class GatherNdOptionsT(object):
+def SoftmaxOptionsStart(builder):
+    builder.StartObject(1)
 
-    # GatherNdOptionsT
+def SoftmaxOptionsAddBeta(builder, beta):
+    builder.PrependFloat32Slot(0, beta, 0.0)
+
+def SoftmaxOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class SoftmaxOptionsT(object):
+
+    # SoftmaxOptionsT
     def __init__(self):
-        pass
+        self.beta = 0.0  # type: float
 
     @classmethod
     def InitFromBuf(cls, buf, pos):
-        gatherNdOptions = GatherNdOptions()
-        gatherNdOptions.Init(buf, pos)
-        return cls.InitFromObj(gatherNdOptions)
+        softmaxOptions = SoftmaxOptions()
+        softmaxOptions.Init(buf, pos)
+        return cls.InitFromObj(softmaxOptions)
 
     @classmethod
-    def InitFromObj(cls, gatherNdOptions):
-        x = GatherNdOptionsT()
-        x._UnPack(gatherNdOptions)
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, softmaxOptions):
+        x = SoftmaxOptionsT()
+        x._UnPack(softmaxOptions)
         return x
 
-    # GatherNdOptionsT
-    def _UnPack(self, gatherNdOptions):
-        if gatherNdOptions is None:
+    # SoftmaxOptionsT
+    def _UnPack(self, softmaxOptions):
+        if softmaxOptions is None:
             return
+        self.beta = softmaxOptions.Beta()
 
-    # GatherNdOptionsT
+    # SoftmaxOptionsT
     def Pack(self, builder):
-        GatherNdOptionsStart(builder)
-        gatherNdOptions = GatherNdOptionsEnd(builder)
-        return gatherNdOptions
-# automatically generated by the FlatBuffers compiler, do not modify
+        SoftmaxOptionsStart(builder)
+        SoftmaxOptionsAddBeta(builder, self.beta)
+        softmaxOptions = SoftmaxOptionsEnd(builder)
+        return softmaxOptions
 
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class GatherOptions(object):
+class ConcatenationOptions(object):
     __slots__ = ['_tab']
 
     @classmethod
     def GetRootAs(cls, buf, offset=0):
         n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = GatherOptions()
+        x = ConcatenationOptions()
         x.Init(buf, n + offset)
         return x
 
     @classmethod
-    def GetRootAsGatherOptions(cls, buf, offset=0):
+    def GetRootAsConcatenationOptions(cls, buf, offset=0):
         """This method is deprecated. Please switch to GetRootAs."""
         return cls.GetRootAs(buf, offset)
     @classmethod
-    def GatherOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+    def ConcatenationOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
         return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
 
-    # GatherOptions
+    # ConcatenationOptions
     def Init(self, buf, pos):
         self._tab = flatbuffers.table.Table(buf, pos)
 
-    # GatherOptions
+    # ConcatenationOptions
     def Axis(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
         if o != 0:
             return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
         return 0
 
-    # GatherOptions
-    def BatchDims(self):
+    # ConcatenationOptions
+    def FusedActivationFunction(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
         if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
         return 0
 
-def GatherOptionsStart(builder): builder.StartObject(2)
-def Start(builder):
-    return GatherOptionsStart(builder)
-def GatherOptionsAddAxis(builder, axis): builder.PrependInt32Slot(0, axis, 0)
-def AddAxis(builder, axis):
-    return GatherOptionsAddAxis(builder, axis)
-def GatherOptionsAddBatchDims(builder, batchDims): builder.PrependInt32Slot(1, batchDims, 0)
-def AddBatchDims(builder, batchDims):
-    return GatherOptionsAddBatchDims(builder, batchDims)
-def GatherOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return GatherOptionsEnd(builder)
+def ConcatenationOptionsStart(builder):
+    builder.StartObject(2)
 
-class GatherOptionsT(object):
+def ConcatenationOptionsAddAxis(builder, axis):
+    builder.PrependInt32Slot(0, axis, 0)
 
-    # GatherOptionsT
+def ConcatenationOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+    builder.PrependInt8Slot(1, fusedActivationFunction, 0)
+
+def ConcatenationOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class ConcatenationOptionsT(object):
+
+    # ConcatenationOptionsT
     def __init__(self):
         self.axis = 0  # type: int
-        self.batchDims = 0  # type: int
+        self.fusedActivationFunction = 0  # type: int
 
     @classmethod
     def InitFromBuf(cls, buf, pos):
-        gatherOptions = GatherOptions()
-        gatherOptions.Init(buf, pos)
-        return cls.InitFromObj(gatherOptions)
+        concatenationOptions = ConcatenationOptions()
+        concatenationOptions.Init(buf, pos)
+        return cls.InitFromObj(concatenationOptions)
 
     @classmethod
-    def InitFromObj(cls, gatherOptions):
-        x = GatherOptionsT()
-        x._UnPack(gatherOptions)
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, concatenationOptions):
+        x = ConcatenationOptionsT()
+        x._UnPack(concatenationOptions)
         return x
 
-    # GatherOptionsT
-    def _UnPack(self, gatherOptions):
-        if gatherOptions is None:
+    # ConcatenationOptionsT
+    def _UnPack(self, concatenationOptions):
+        if concatenationOptions is None:
             return
-        self.axis = gatherOptions.Axis()
-        self.batchDims = gatherOptions.BatchDims()
+        self.axis = concatenationOptions.Axis()
+        self.fusedActivationFunction = concatenationOptions.FusedActivationFunction()
 
-    # GatherOptionsT
+    # ConcatenationOptionsT
     def Pack(self, builder):
-        GatherOptionsStart(builder)
-        GatherOptionsAddAxis(builder, self.axis)
-        GatherOptionsAddBatchDims(builder, self.batchDims)
-        gatherOptions = GatherOptionsEnd(builder)
-        return gatherOptions
-# automatically generated by the FlatBuffers compiler, do not modify
+        ConcatenationOptionsStart(builder)
+        ConcatenationOptionsAddAxis(builder, self.axis)
+        ConcatenationOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
+        concatenationOptions = ConcatenationOptionsEnd(builder)
+        return concatenationOptions
 
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class GeluOptions(object):
+class AddOptions(object):
     __slots__ = ['_tab']
 
     @classmethod
     def GetRootAs(cls, buf, offset=0):
         n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = GeluOptions()
+        x = AddOptions()
         x.Init(buf, n + offset)
         return x
 
     @classmethod
-    def GetRootAsGeluOptions(cls, buf, offset=0):
+    def GetRootAsAddOptions(cls, buf, offset=0):
         """This method is deprecated. Please switch to GetRootAs."""
         return cls.GetRootAs(buf, offset)
     @classmethod
-    def GeluOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+    def AddOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
         return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
 
-    # GeluOptions
+    # AddOptions
     def Init(self, buf, pos):
         self._tab = flatbuffers.table.Table(buf, pos)
 
-    # GeluOptions
-    def Approximate(self):
+    # AddOptions
+    def FusedActivationFunction(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
         if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+    # AddOptions
+    def PotScaleInt16(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
             return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
+        return True
 
-def GeluOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return GeluOptionsStart(builder)
-def GeluOptionsAddApproximate(builder, approximate): builder.PrependBoolSlot(0, approximate, 0)
-def AddApproximate(builder, approximate):
-    return GeluOptionsAddApproximate(builder, approximate)
-def GeluOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return GeluOptionsEnd(builder)
+def AddOptionsStart(builder):
+    builder.StartObject(2)
 
-class GeluOptionsT(object):
+def AddOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+    builder.PrependInt8Slot(0, fusedActivationFunction, 0)
 
-    # GeluOptionsT
+def AddOptionsAddPotScaleInt16(builder, potScaleInt16):
+    builder.PrependBoolSlot(1, potScaleInt16, 1)
+
+def AddOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class AddOptionsT(object):
+
+    # AddOptionsT
     def __init__(self):
-        self.approximate = False  # type: bool
+        self.fusedActivationFunction = 0  # type: int
+        self.potScaleInt16 = True  # type: bool
 
     @classmethod
     def InitFromBuf(cls, buf, pos):
-        geluOptions = GeluOptions()
-        geluOptions.Init(buf, pos)
-        return cls.InitFromObj(geluOptions)
+        addOptions = AddOptions()
+        addOptions.Init(buf, pos)
+        return cls.InitFromObj(addOptions)
 
     @classmethod
-    def InitFromObj(cls, geluOptions):
-        x = GeluOptionsT()
-        x._UnPack(geluOptions)
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, addOptions):
+        x = AddOptionsT()
+        x._UnPack(addOptions)
         return x
 
-    # GeluOptionsT
-    def _UnPack(self, geluOptions):
-        if geluOptions is None:
+    # AddOptionsT
+    def _UnPack(self, addOptions):
+        if addOptions is None:
             return
-        self.approximate = geluOptions.Approximate()
+        self.fusedActivationFunction = addOptions.FusedActivationFunction()
+        self.potScaleInt16 = addOptions.PotScaleInt16()
 
-    # GeluOptionsT
+    # AddOptionsT
     def Pack(self, builder):
-        GeluOptionsStart(builder)
-        GeluOptionsAddApproximate(builder, self.approximate)
-        geluOptions = GeluOptionsEnd(builder)
-        return geluOptions
-# automatically generated by the FlatBuffers compiler, do not modify
+        AddOptionsStart(builder)
+        AddOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
+        AddOptionsAddPotScaleInt16(builder, self.potScaleInt16)
+        addOptions = AddOptionsEnd(builder)
+        return addOptions
 
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class GreaterEqualOptions(object):
+class MulOptions(object):
     __slots__ = ['_tab']
 
     @classmethod
     def GetRootAs(cls, buf, offset=0):
         n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = GreaterEqualOptions()
+        x = MulOptions()
         x.Init(buf, n + offset)
         return x
 
     @classmethod
-    def GetRootAsGreaterEqualOptions(cls, buf, offset=0):
+    def GetRootAsMulOptions(cls, buf, offset=0):
         """This method is deprecated. Please switch to GetRootAs."""
         return cls.GetRootAs(buf, offset)
     @classmethod
-    def GreaterEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+    def MulOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
         return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
 
-    # GreaterEqualOptions
+    # MulOptions
     def Init(self, buf, pos):
         self._tab = flatbuffers.table.Table(buf, pos)
 
-def GreaterEqualOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return GreaterEqualOptionsStart(builder)
-def GreaterEqualOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return GreaterEqualOptionsEnd(builder)
-
-class GreaterEqualOptionsT(object):
-
-    # GreaterEqualOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        greaterEqualOptions = GreaterEqualOptions()
-        greaterEqualOptions.Init(buf, pos)
-        return cls.InitFromObj(greaterEqualOptions)
-
-    @classmethod
-    def InitFromObj(cls, greaterEqualOptions):
-        x = GreaterEqualOptionsT()
-        x._UnPack(greaterEqualOptions)
-        return x
-
-    # GreaterEqualOptionsT
-    def _UnPack(self, greaterEqualOptions):
-        if greaterEqualOptions is None:
-            return
-
-    # GreaterEqualOptionsT
-    def Pack(self, builder):
-        GreaterEqualOptionsStart(builder)
-        greaterEqualOptions = GreaterEqualOptionsEnd(builder)
-        return greaterEqualOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class GreaterOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = GreaterOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsGreaterOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def GreaterOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # GreaterOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def GreaterOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return GreaterOptionsStart(builder)
-def GreaterOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return GreaterOptionsEnd(builder)
-
-class GreaterOptionsT(object):
-
-    # GreaterOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        greaterOptions = GreaterOptions()
-        greaterOptions.Init(buf, pos)
-        return cls.InitFromObj(greaterOptions)
-
-    @classmethod
-    def InitFromObj(cls, greaterOptions):
-        x = GreaterOptionsT()
-        x._UnPack(greaterOptions)
-        return x
-
-    # GreaterOptionsT
-    def _UnPack(self, greaterOptions):
-        if greaterOptions is None:
-            return
-
-    # GreaterOptionsT
-    def Pack(self, builder):
-        GreaterOptionsStart(builder)
-        greaterOptions = GreaterOptionsEnd(builder)
-        return greaterOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class HardSwishOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = HardSwishOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsHardSwishOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def HardSwishOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # HardSwishOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def HardSwishOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return HardSwishOptionsStart(builder)
-def HardSwishOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return HardSwishOptionsEnd(builder)
-
-class HardSwishOptionsT(object):
-
-    # HardSwishOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        hardSwishOptions = HardSwishOptions()
-        hardSwishOptions.Init(buf, pos)
-        return cls.InitFromObj(hardSwishOptions)
-
-    @classmethod
-    def InitFromObj(cls, hardSwishOptions):
-        x = HardSwishOptionsT()
-        x._UnPack(hardSwishOptions)
-        return x
-
-    # HardSwishOptionsT
-    def _UnPack(self, hardSwishOptions):
-        if hardSwishOptions is None:
-            return
-
-    # HardSwishOptionsT
-    def Pack(self, builder):
-        HardSwishOptionsStart(builder)
-        hardSwishOptions = HardSwishOptionsEnd(builder)
-        return hardSwishOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class HashtableFindOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = HashtableFindOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsHashtableFindOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def HashtableFindOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # HashtableFindOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def HashtableFindOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return HashtableFindOptionsStart(builder)
-def HashtableFindOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return HashtableFindOptionsEnd(builder)
-
-class HashtableFindOptionsT(object):
-
-    # HashtableFindOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        hashtableFindOptions = HashtableFindOptions()
-        hashtableFindOptions.Init(buf, pos)
-        return cls.InitFromObj(hashtableFindOptions)
-
-    @classmethod
-    def InitFromObj(cls, hashtableFindOptions):
-        x = HashtableFindOptionsT()
-        x._UnPack(hashtableFindOptions)
-        return x
-
-    # HashtableFindOptionsT
-    def _UnPack(self, hashtableFindOptions):
-        if hashtableFindOptions is None:
-            return
-
-    # HashtableFindOptionsT
-    def Pack(self, builder):
-        HashtableFindOptionsStart(builder)
-        hashtableFindOptions = HashtableFindOptionsEnd(builder)
-        return hashtableFindOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class HashtableImportOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = HashtableImportOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsHashtableImportOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def HashtableImportOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # HashtableImportOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def HashtableImportOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return HashtableImportOptionsStart(builder)
-def HashtableImportOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return HashtableImportOptionsEnd(builder)
-
-class HashtableImportOptionsT(object):
-
-    # HashtableImportOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        hashtableImportOptions = HashtableImportOptions()
-        hashtableImportOptions.Init(buf, pos)
-        return cls.InitFromObj(hashtableImportOptions)
-
-    @classmethod
-    def InitFromObj(cls, hashtableImportOptions):
-        x = HashtableImportOptionsT()
-        x._UnPack(hashtableImportOptions)
-        return x
-
-    # HashtableImportOptionsT
-    def _UnPack(self, hashtableImportOptions):
-        if hashtableImportOptions is None:
-            return
-
-    # HashtableImportOptionsT
-    def Pack(self, builder):
-        HashtableImportOptionsStart(builder)
-        hashtableImportOptions = HashtableImportOptionsEnd(builder)
-        return hashtableImportOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class HashtableOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = HashtableOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsHashtableOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def HashtableOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # HashtableOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # HashtableOptions
-    def TableId(self):
+    # MulOptions
+    def FusedActivationFunction(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
         if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # HashtableOptions
-    def KeyDtype(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
             return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
         return 0
 
-    # HashtableOptions
-    def ValueDtype(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
+def MulOptionsStart(builder):
+    builder.StartObject(1)
 
-def HashtableOptionsStart(builder): builder.StartObject(3)
-def Start(builder):
-    return HashtableOptionsStart(builder)
-def HashtableOptionsAddTableId(builder, tableId): builder.PrependInt32Slot(0, tableId, 0)
-def AddTableId(builder, tableId):
-    return HashtableOptionsAddTableId(builder, tableId)
-def HashtableOptionsAddKeyDtype(builder, keyDtype): builder.PrependInt8Slot(1, keyDtype, 0)
-def AddKeyDtype(builder, keyDtype):
-    return HashtableOptionsAddKeyDtype(builder, keyDtype)
-def HashtableOptionsAddValueDtype(builder, valueDtype): builder.PrependInt8Slot(2, valueDtype, 0)
-def AddValueDtype(builder, valueDtype):
-    return HashtableOptionsAddValueDtype(builder, valueDtype)
-def HashtableOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return HashtableOptionsEnd(builder)
+def MulOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+    builder.PrependInt8Slot(0, fusedActivationFunction, 0)
 
-class HashtableOptionsT(object):
+def MulOptionsEnd(builder):
+    return builder.EndObject()
 
-    # HashtableOptionsT
+
+
+class MulOptionsT(object):
+
+    # MulOptionsT
     def __init__(self):
-        self.tableId = 0  # type: int
-        self.keyDtype = 0  # type: int
-        self.valueDtype = 0  # type: int
+        self.fusedActivationFunction = 0  # type: int
 
     @classmethod
     def InitFromBuf(cls, buf, pos):
-        hashtableOptions = HashtableOptions()
-        hashtableOptions.Init(buf, pos)
-        return cls.InitFromObj(hashtableOptions)
+        mulOptions = MulOptions()
+        mulOptions.Init(buf, pos)
+        return cls.InitFromObj(mulOptions)
 
     @classmethod
-    def InitFromObj(cls, hashtableOptions):
-        x = HashtableOptionsT()
-        x._UnPack(hashtableOptions)
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, mulOptions):
+        x = MulOptionsT()
+        x._UnPack(mulOptions)
         return x
 
-    # HashtableOptionsT
-    def _UnPack(self, hashtableOptions):
-        if hashtableOptions is None:
+    # MulOptionsT
+    def _UnPack(self, mulOptions):
+        if mulOptions is None:
             return
-        self.tableId = hashtableOptions.TableId()
-        self.keyDtype = hashtableOptions.KeyDtype()
-        self.valueDtype = hashtableOptions.ValueDtype()
+        self.fusedActivationFunction = mulOptions.FusedActivationFunction()
 
-    # HashtableOptionsT
+    # MulOptionsT
     def Pack(self, builder):
-        HashtableOptionsStart(builder)
-        HashtableOptionsAddTableId(builder, self.tableId)
-        HashtableOptionsAddKeyDtype(builder, self.keyDtype)
-        HashtableOptionsAddValueDtype(builder, self.valueDtype)
-        hashtableOptions = HashtableOptionsEnd(builder)
-        return hashtableOptions
-# automatically generated by the FlatBuffers compiler, do not modify
+        MulOptionsStart(builder)
+        MulOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
+        mulOptions = MulOptionsEnd(builder)
+        return mulOptions
 
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class HashtableSizeOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = HashtableSizeOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsHashtableSizeOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def HashtableSizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # HashtableSizeOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def HashtableSizeOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return HashtableSizeOptionsStart(builder)
-def HashtableSizeOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return HashtableSizeOptionsEnd(builder)
-
-class HashtableSizeOptionsT(object):
-
-    # HashtableSizeOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        hashtableSizeOptions = HashtableSizeOptions()
-        hashtableSizeOptions.Init(buf, pos)
-        return cls.InitFromObj(hashtableSizeOptions)
-
-    @classmethod
-    def InitFromObj(cls, hashtableSizeOptions):
-        x = HashtableSizeOptionsT()
-        x._UnPack(hashtableSizeOptions)
-        return x
-
-    # HashtableSizeOptionsT
-    def _UnPack(self, hashtableSizeOptions):
-        if hashtableSizeOptions is None:
-            return
-
-    # HashtableSizeOptionsT
-    def Pack(self, builder):
-        HashtableSizeOptionsStart(builder)
-        hashtableSizeOptions = HashtableSizeOptionsEnd(builder)
-        return hashtableSizeOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class IfOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = IfOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsIfOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def IfOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # IfOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # IfOptions
-    def ThenSubgraphIndex(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # IfOptions
-    def ElseSubgraphIndex(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-def IfOptionsStart(builder): builder.StartObject(2)
-def Start(builder):
-    return IfOptionsStart(builder)
-def IfOptionsAddThenSubgraphIndex(builder, thenSubgraphIndex): builder.PrependInt32Slot(0, thenSubgraphIndex, 0)
-def AddThenSubgraphIndex(builder, thenSubgraphIndex):
-    return IfOptionsAddThenSubgraphIndex(builder, thenSubgraphIndex)
-def IfOptionsAddElseSubgraphIndex(builder, elseSubgraphIndex): builder.PrependInt32Slot(1, elseSubgraphIndex, 0)
-def AddElseSubgraphIndex(builder, elseSubgraphIndex):
-    return IfOptionsAddElseSubgraphIndex(builder, elseSubgraphIndex)
-def IfOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return IfOptionsEnd(builder)
-
-class IfOptionsT(object):
-
-    # IfOptionsT
-    def __init__(self):
-        self.thenSubgraphIndex = 0  # type: int
-        self.elseSubgraphIndex = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        ifOptions = IfOptions()
-        ifOptions.Init(buf, pos)
-        return cls.InitFromObj(ifOptions)
-
-    @classmethod
-    def InitFromObj(cls, ifOptions):
-        x = IfOptionsT()
-        x._UnPack(ifOptions)
-        return x
-
-    # IfOptionsT
-    def _UnPack(self, ifOptions):
-        if ifOptions is None:
-            return
-        self.thenSubgraphIndex = ifOptions.ThenSubgraphIndex()
-        self.elseSubgraphIndex = ifOptions.ElseSubgraphIndex()
-
-    # IfOptionsT
-    def Pack(self, builder):
-        IfOptionsStart(builder)
-        IfOptionsAddThenSubgraphIndex(builder, self.thenSubgraphIndex)
-        IfOptionsAddElseSubgraphIndex(builder, self.elseSubgraphIndex)
-        ifOptions = IfOptionsEnd(builder)
-        return ifOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class Int32Vector(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = Int32Vector()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsInt32Vector(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def Int32VectorBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # Int32Vector
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # Int32Vector
-    def Values(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            a = self._tab.Vector(o)
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
-        return 0
-
-    # Int32Vector
-    def ValuesAsNumpy(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
-        return 0
-
-    # Int32Vector
-    def ValuesLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
-
-    # Int32Vector
-    def ValuesIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        return o == 0
-
-def Int32VectorStart(builder): builder.StartObject(1)
-def Start(builder):
-    return Int32VectorStart(builder)
-def Int32VectorAddValues(builder, values): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0)
-def AddValues(builder, values):
-    return Int32VectorAddValues(builder, values)
-def Int32VectorStartValuesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartValuesVector(builder, numElems):
-    return Int32VectorStartValuesVector(builder, numElems)
-def Int32VectorEnd(builder): return builder.EndObject()
-def End(builder):
-    return Int32VectorEnd(builder)
-try:
-    from typing import List
-except:
-    pass
-
-class Int32VectorT(object):
-
-    # Int32VectorT
-    def __init__(self):
-        self.values = None  # type: List[int]
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        int32vector = Int32Vector()
-        int32vector.Init(buf, pos)
-        return cls.InitFromObj(int32vector)
-
-    @classmethod
-    def InitFromObj(cls, int32vector):
-        x = Int32VectorT()
-        x._UnPack(int32vector)
-        return x
-
-    # Int32VectorT
-    def _UnPack(self, int32vector):
-        if int32vector is None:
-            return
-        if not int32vector.ValuesIsNone():
-            if np is None:
-                self.values = []
-                for i in range(int32vector.ValuesLength()):
-                    self.values.append(int32vector.Values(i))
-            else:
-                self.values = int32vector.ValuesAsNumpy()
-
-    # Int32VectorT
-    def Pack(self, builder):
-        if self.values is not None:
-            if np is not None and type(self.values) is np.ndarray:
-                values = builder.CreateNumpyVector(self.values)
-            else:
-                Int32VectorStartValuesVector(builder, len(self.values))
-                for i in reversed(range(len(self.values))):
-                    builder.PrependInt32(self.values[i])
-                values = builder.EndVector()
-        Int32VectorStart(builder)
-        if self.values is not None:
-            Int32VectorAddValues(builder, values)
-        int32vector = Int32VectorEnd(builder)
-        return int32vector
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
 
 class L2NormOptions(object):
     __slots__ = ['_tab']
@@ -5363,15 +7781,16 @@
             return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
         return 0
 
-def L2NormOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return L2NormOptionsStart(builder)
-def L2NormOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0)
-def AddFusedActivationFunction(builder, fusedActivationFunction):
-    return L2NormOptionsAddFusedActivationFunction(builder, fusedActivationFunction)
-def L2NormOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return L2NormOptionsEnd(builder)
+def L2NormOptionsStart(builder):
+    builder.StartObject(1)
+
+def L2NormOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+    builder.PrependInt8Slot(0, fusedActivationFunction, 0)
+
+def L2NormOptionsEnd(builder):
+    return builder.EndObject()
+
+
 
 class L2NormOptionsT(object):
 
@@ -5381,125 +7800,150 @@
 
     @classmethod
     def InitFromBuf(cls, buf, pos):
-        l2normOptions = L2NormOptions()
-        l2normOptions.Init(buf, pos)
-        return cls.InitFromObj(l2normOptions)
+        l2NormOptions = L2NormOptions()
+        l2NormOptions.Init(buf, pos)
+        return cls.InitFromObj(l2NormOptions)
 
     @classmethod
-    def InitFromObj(cls, l2normOptions):
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, l2NormOptions):
         x = L2NormOptionsT()
-        x._UnPack(l2normOptions)
+        x._UnPack(l2NormOptions)
         return x
 
     # L2NormOptionsT
-    def _UnPack(self, l2normOptions):
-        if l2normOptions is None:
+    def _UnPack(self, l2NormOptions):
+        if l2NormOptions is None:
             return
-        self.fusedActivationFunction = l2normOptions.FusedActivationFunction()
+        self.fusedActivationFunction = l2NormOptions.FusedActivationFunction()
 
     # L2NormOptionsT
     def Pack(self, builder):
         L2NormOptionsStart(builder)
         L2NormOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
-        l2normOptions = L2NormOptionsEnd(builder)
-        return l2normOptions
-# automatically generated by the FlatBuffers compiler, do not modify
+        l2NormOptions = L2NormOptionsEnd(builder)
+        return l2NormOptions
 
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class LSHProjectionOptions(object):
+class LocalResponseNormalizationOptions(object):
     __slots__ = ['_tab']
 
     @classmethod
     def GetRootAs(cls, buf, offset=0):
         n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = LSHProjectionOptions()
+        x = LocalResponseNormalizationOptions()
         x.Init(buf, n + offset)
         return x
 
     @classmethod
-    def GetRootAsLSHProjectionOptions(cls, buf, offset=0):
+    def GetRootAsLocalResponseNormalizationOptions(cls, buf, offset=0):
         """This method is deprecated. Please switch to GetRootAs."""
         return cls.GetRootAs(buf, offset)
     @classmethod
-    def LSHProjectionOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+    def LocalResponseNormalizationOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
         return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
 
-    # LSHProjectionOptions
+    # LocalResponseNormalizationOptions
     def Init(self, buf, pos):
         self._tab = flatbuffers.table.Table(buf, pos)
 
-    # LSHProjectionOptions
-    def Type(self):
+    # LocalResponseNormalizationOptions
+    def Radius(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
         if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
         return 0
 
-def LSHProjectionOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return LSHProjectionOptionsStart(builder)
-def LSHProjectionOptionsAddType(builder, type): builder.PrependInt8Slot(0, type, 0)
-def AddType(builder, type):
-    return LSHProjectionOptionsAddType(builder, type)
-def LSHProjectionOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return LSHProjectionOptionsEnd(builder)
+    # LocalResponseNormalizationOptions
+    def Bias(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
+        return 0.0
 
-class LSHProjectionOptionsT(object):
+    # LocalResponseNormalizationOptions
+    def Alpha(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
+        return 0.0
 
-    # LSHProjectionOptionsT
+    # LocalResponseNormalizationOptions
+    def Beta(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
+        return 0.0
+
+def LocalResponseNormalizationOptionsStart(builder):
+    builder.StartObject(4)
+
+def LocalResponseNormalizationOptionsAddRadius(builder, radius):
+    builder.PrependInt32Slot(0, radius, 0)
+
+def LocalResponseNormalizationOptionsAddBias(builder, bias):
+    builder.PrependFloat32Slot(1, bias, 0.0)
+
+def LocalResponseNormalizationOptionsAddAlpha(builder, alpha):
+    builder.PrependFloat32Slot(2, alpha, 0.0)
+
+def LocalResponseNormalizationOptionsAddBeta(builder, beta):
+    builder.PrependFloat32Slot(3, beta, 0.0)
+
+def LocalResponseNormalizationOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class LocalResponseNormalizationOptionsT(object):
+
+    # LocalResponseNormalizationOptionsT
     def __init__(self):
-        self.type = 0  # type: int
+        self.radius = 0  # type: int
+        self.bias = 0.0  # type: float
+        self.alpha = 0.0  # type: float
+        self.beta = 0.0  # type: float
 
     @classmethod
     def InitFromBuf(cls, buf, pos):
-        lshprojectionOptions = LSHProjectionOptions()
-        lshprojectionOptions.Init(buf, pos)
-        return cls.InitFromObj(lshprojectionOptions)
+        localResponseNormalizationOptions = LocalResponseNormalizationOptions()
+        localResponseNormalizationOptions.Init(buf, pos)
+        return cls.InitFromObj(localResponseNormalizationOptions)
 
     @classmethod
-    def InitFromObj(cls, lshprojectionOptions):
-        x = LSHProjectionOptionsT()
-        x._UnPack(lshprojectionOptions)
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, localResponseNormalizationOptions):
+        x = LocalResponseNormalizationOptionsT()
+        x._UnPack(localResponseNormalizationOptions)
         return x
 
-    # LSHProjectionOptionsT
-    def _UnPack(self, lshprojectionOptions):
-        if lshprojectionOptions is None:
+    # LocalResponseNormalizationOptionsT
+    def _UnPack(self, localResponseNormalizationOptions):
+        if localResponseNormalizationOptions is None:
             return
-        self.type = lshprojectionOptions.Type()
+        self.radius = localResponseNormalizationOptions.Radius()
+        self.bias = localResponseNormalizationOptions.Bias()
+        self.alpha = localResponseNormalizationOptions.Alpha()
+        self.beta = localResponseNormalizationOptions.Beta()
 
-    # LSHProjectionOptionsT
+    # LocalResponseNormalizationOptionsT
     def Pack(self, builder):
-        LSHProjectionOptionsStart(builder)
-        LSHProjectionOptionsAddType(builder, self.type)
-        lshprojectionOptions = LSHProjectionOptionsEnd(builder)
-        return lshprojectionOptions
-# automatically generated by the FlatBuffers compiler, do not modify
+        LocalResponseNormalizationOptionsStart(builder)
+        LocalResponseNormalizationOptionsAddRadius(builder, self.radius)
+        LocalResponseNormalizationOptionsAddBias(builder, self.bias)
+        LocalResponseNormalizationOptionsAddAlpha(builder, self.alpha)
+        LocalResponseNormalizationOptionsAddBeta(builder, self.beta)
+        localResponseNormalizationOptions = LocalResponseNormalizationOptionsEnd(builder)
+        return localResponseNormalizationOptions
 
-# namespace: tflite
-
-class LSHProjectionType(object):
-    UNKNOWN = 0
-    SPARSE = 1
-    DENSE = 2
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-class LSTMKernelType(object):
-    FULL = 0
-    BASIC = 1
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
 
 class LSTMOptions(object):
     __slots__ = ['_tab']
@@ -5558,27 +8002,28 @@
             return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
         return False
 
-def LSTMOptionsStart(builder): builder.StartObject(5)
-def Start(builder):
-    return LSTMOptionsStart(builder)
-def LSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0)
-def AddFusedActivationFunction(builder, fusedActivationFunction):
-    return LSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction)
-def LSTMOptionsAddCellClip(builder, cellClip): builder.PrependFloat32Slot(1, cellClip, 0.0)
-def AddCellClip(builder, cellClip):
-    return LSTMOptionsAddCellClip(builder, cellClip)
-def LSTMOptionsAddProjClip(builder, projClip): builder.PrependFloat32Slot(2, projClip, 0.0)
-def AddProjClip(builder, projClip):
-    return LSTMOptionsAddProjClip(builder, projClip)
-def LSTMOptionsAddKernelType(builder, kernelType): builder.PrependInt8Slot(3, kernelType, 0)
-def AddKernelType(builder, kernelType):
-    return LSTMOptionsAddKernelType(builder, kernelType)
-def LSTMOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(4, asymmetricQuantizeInputs, 0)
-def AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs):
-    return LSTMOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs)
-def LSTMOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return LSTMOptionsEnd(builder)
+def LSTMOptionsStart(builder):
+    builder.StartObject(5)
+
+def LSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+    builder.PrependInt8Slot(0, fusedActivationFunction, 0)
+
+def LSTMOptionsAddCellClip(builder, cellClip):
+    builder.PrependFloat32Slot(1, cellClip, 0.0)
+
+def LSTMOptionsAddProjClip(builder, projClip):
+    builder.PrependFloat32Slot(2, projClip, 0.0)
+
+def LSTMOptionsAddKernelType(builder, kernelType):
+    builder.PrependInt8Slot(3, kernelType, 0)
+
+def LSTMOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs):
+    builder.PrependBoolSlot(4, asymmetricQuantizeInputs, 0)
+
+def LSTMOptionsEnd(builder):
+    return builder.EndObject()
+
+
 
 class LSTMOptionsT(object):
 
@@ -5597,6 +8042,11 @@
         return cls.InitFromObj(lstmoptions)
 
     @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
     def InitFromObj(cls, lstmoptions):
         x = LSTMOptionsT()
         x._UnPack(lstmoptions)
@@ -5622,331 +8072,2270 @@
         LSTMOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs)
         lstmoptions = LSTMOptionsEnd(builder)
         return lstmoptions
-# automatically generated by the FlatBuffers compiler, do not modify
 
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class LeakyReluOptions(object):
+class UnidirectionalSequenceLSTMOptions(object):
     __slots__ = ['_tab']
 
     @classmethod
     def GetRootAs(cls, buf, offset=0):
         n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = LeakyReluOptions()
+        x = UnidirectionalSequenceLSTMOptions()
         x.Init(buf, n + offset)
         return x
 
     @classmethod
-    def GetRootAsLeakyReluOptions(cls, buf, offset=0):
+    def GetRootAsUnidirectionalSequenceLSTMOptions(cls, buf, offset=0):
         """This method is deprecated. Please switch to GetRootAs."""
         return cls.GetRootAs(buf, offset)
     @classmethod
-    def LeakyReluOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+    def UnidirectionalSequenceLSTMOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
         return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
 
-    # LeakyReluOptions
+    # UnidirectionalSequenceLSTMOptions
     def Init(self, buf, pos):
         self._tab = flatbuffers.table.Table(buf, pos)
 
-    # LeakyReluOptions
-    def Alpha(self):
+    # UnidirectionalSequenceLSTMOptions
+    def FusedActivationFunction(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
         if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
-        return 0.0
-
-def LeakyReluOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return LeakyReluOptionsStart(builder)
-def LeakyReluOptionsAddAlpha(builder, alpha): builder.PrependFloat32Slot(0, alpha, 0.0)
-def AddAlpha(builder, alpha):
-    return LeakyReluOptionsAddAlpha(builder, alpha)
-def LeakyReluOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return LeakyReluOptionsEnd(builder)
-
-class LeakyReluOptionsT(object):
-
-    # LeakyReluOptionsT
-    def __init__(self):
-        self.alpha = 0.0  # type: float
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        leakyReluOptions = LeakyReluOptions()
-        leakyReluOptions.Init(buf, pos)
-        return cls.InitFromObj(leakyReluOptions)
-
-    @classmethod
-    def InitFromObj(cls, leakyReluOptions):
-        x = LeakyReluOptionsT()
-        x._UnPack(leakyReluOptions)
-        return x
-
-    # LeakyReluOptionsT
-    def _UnPack(self, leakyReluOptions):
-        if leakyReluOptions is None:
-            return
-        self.alpha = leakyReluOptions.Alpha()
-
-    # LeakyReluOptionsT
-    def Pack(self, builder):
-        LeakyReluOptionsStart(builder)
-        LeakyReluOptionsAddAlpha(builder, self.alpha)
-        leakyReluOptions = LeakyReluOptionsEnd(builder)
-        return leakyReluOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class LessEqualOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = LessEqualOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsLessEqualOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def LessEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # LessEqualOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def LessEqualOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return LessEqualOptionsStart(builder)
-def LessEqualOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return LessEqualOptionsEnd(builder)
-
-class LessEqualOptionsT(object):
-
-    # LessEqualOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        lessEqualOptions = LessEqualOptions()
-        lessEqualOptions.Init(buf, pos)
-        return cls.InitFromObj(lessEqualOptions)
-
-    @classmethod
-    def InitFromObj(cls, lessEqualOptions):
-        x = LessEqualOptionsT()
-        x._UnPack(lessEqualOptions)
-        return x
-
-    # LessEqualOptionsT
-    def _UnPack(self, lessEqualOptions):
-        if lessEqualOptions is None:
-            return
-
-    # LessEqualOptionsT
-    def Pack(self, builder):
-        LessEqualOptionsStart(builder)
-        lessEqualOptions = LessEqualOptionsEnd(builder)
-        return lessEqualOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class LessOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = LessOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsLessOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def LessOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # LessOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def LessOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return LessOptionsStart(builder)
-def LessOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return LessOptionsEnd(builder)
-
-class LessOptionsT(object):
-
-    # LessOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        lessOptions = LessOptions()
-        lessOptions.Init(buf, pos)
-        return cls.InitFromObj(lessOptions)
-
-    @classmethod
-    def InitFromObj(cls, lessOptions):
-        x = LessOptionsT()
-        x._UnPack(lessOptions)
-        return x
-
-    # LessOptionsT
-    def _UnPack(self, lessOptions):
-        if lessOptions is None:
-            return
-
-    # LessOptionsT
-    def Pack(self, builder):
-        LessOptionsStart(builder)
-        lessOptions = LessOptionsEnd(builder)
-        return lessOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class LocalResponseNormalizationOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = LocalResponseNormalizationOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsLocalResponseNormalizationOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def LocalResponseNormalizationOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # LocalResponseNormalizationOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # LocalResponseNormalizationOptions
-    def Radius(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
         return 0
 
-    # LocalResponseNormalizationOptions
-    def Bias(self):
+    # UnidirectionalSequenceLSTMOptions
+    def CellClip(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
         if o != 0:
             return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
         return 0.0
 
-    # LocalResponseNormalizationOptions
-    def Alpha(self):
+    # UnidirectionalSequenceLSTMOptions
+    def ProjClip(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
         if o != 0:
             return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
         return 0.0
 
-    # LocalResponseNormalizationOptions
-    def Beta(self):
+    # UnidirectionalSequenceLSTMOptions
+    def TimeMajor(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
         if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+    # UnidirectionalSequenceLSTMOptions
+    def AsymmetricQuantizeInputs(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+    # UnidirectionalSequenceLSTMOptions
+    def DiagonalRecurrentTensors(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+def UnidirectionalSequenceLSTMOptionsStart(builder):
+    builder.StartObject(6)
+
+def UnidirectionalSequenceLSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+    builder.PrependInt8Slot(0, fusedActivationFunction, 0)
+
+def UnidirectionalSequenceLSTMOptionsAddCellClip(builder, cellClip):
+    builder.PrependFloat32Slot(1, cellClip, 0.0)
+
+def UnidirectionalSequenceLSTMOptionsAddProjClip(builder, projClip):
+    builder.PrependFloat32Slot(2, projClip, 0.0)
+
+def UnidirectionalSequenceLSTMOptionsAddTimeMajor(builder, timeMajor):
+    builder.PrependBoolSlot(3, timeMajor, 0)
+
+def UnidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs):
+    builder.PrependBoolSlot(4, asymmetricQuantizeInputs, 0)
+
+def UnidirectionalSequenceLSTMOptionsAddDiagonalRecurrentTensors(builder, diagonalRecurrentTensors):
+    builder.PrependBoolSlot(5, diagonalRecurrentTensors, 0)
+
+def UnidirectionalSequenceLSTMOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class UnidirectionalSequenceLSTMOptionsT(object):
+
+    # UnidirectionalSequenceLSTMOptionsT
+    def __init__(self):
+        self.fusedActivationFunction = 0  # type: int
+        self.cellClip = 0.0  # type: float
+        self.projClip = 0.0  # type: float
+        self.timeMajor = False  # type: bool
+        self.asymmetricQuantizeInputs = False  # type: bool
+        self.diagonalRecurrentTensors = False  # type: bool
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        unidirectionalSequenceLstmoptions = UnidirectionalSequenceLSTMOptions()
+        unidirectionalSequenceLstmoptions.Init(buf, pos)
+        return cls.InitFromObj(unidirectionalSequenceLstmoptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, unidirectionalSequenceLstmoptions):
+        x = UnidirectionalSequenceLSTMOptionsT()
+        x._UnPack(unidirectionalSequenceLstmoptions)
+        return x
+
+    # UnidirectionalSequenceLSTMOptionsT
+    def _UnPack(self, unidirectionalSequenceLstmoptions):
+        if unidirectionalSequenceLstmoptions is None:
+            return
+        self.fusedActivationFunction = unidirectionalSequenceLstmoptions.FusedActivationFunction()
+        self.cellClip = unidirectionalSequenceLstmoptions.CellClip()
+        self.projClip = unidirectionalSequenceLstmoptions.ProjClip()
+        self.timeMajor = unidirectionalSequenceLstmoptions.TimeMajor()
+        self.asymmetricQuantizeInputs = unidirectionalSequenceLstmoptions.AsymmetricQuantizeInputs()
+        self.diagonalRecurrentTensors = unidirectionalSequenceLstmoptions.DiagonalRecurrentTensors()
+
+    # UnidirectionalSequenceLSTMOptionsT
+    def Pack(self, builder):
+        UnidirectionalSequenceLSTMOptionsStart(builder)
+        UnidirectionalSequenceLSTMOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
+        UnidirectionalSequenceLSTMOptionsAddCellClip(builder, self.cellClip)
+        UnidirectionalSequenceLSTMOptionsAddProjClip(builder, self.projClip)
+        UnidirectionalSequenceLSTMOptionsAddTimeMajor(builder, self.timeMajor)
+        UnidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs)
+        UnidirectionalSequenceLSTMOptionsAddDiagonalRecurrentTensors(builder, self.diagonalRecurrentTensors)
+        unidirectionalSequenceLstmoptions = UnidirectionalSequenceLSTMOptionsEnd(builder)
+        return unidirectionalSequenceLstmoptions
+
+
+class BidirectionalSequenceLSTMOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = BidirectionalSequenceLSTMOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsBidirectionalSequenceLSTMOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def BidirectionalSequenceLSTMOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # BidirectionalSequenceLSTMOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # BidirectionalSequenceLSTMOptions
+    def FusedActivationFunction(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+    # BidirectionalSequenceLSTMOptions
+    def CellClip(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
             return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
         return 0.0
 
-def LocalResponseNormalizationOptionsStart(builder): builder.StartObject(4)
-def Start(builder):
-    return LocalResponseNormalizationOptionsStart(builder)
-def LocalResponseNormalizationOptionsAddRadius(builder, radius): builder.PrependInt32Slot(0, radius, 0)
-def AddRadius(builder, radius):
-    return LocalResponseNormalizationOptionsAddRadius(builder, radius)
-def LocalResponseNormalizationOptionsAddBias(builder, bias): builder.PrependFloat32Slot(1, bias, 0.0)
-def AddBias(builder, bias):
-    return LocalResponseNormalizationOptionsAddBias(builder, bias)
-def LocalResponseNormalizationOptionsAddAlpha(builder, alpha): builder.PrependFloat32Slot(2, alpha, 0.0)
-def AddAlpha(builder, alpha):
-    return LocalResponseNormalizationOptionsAddAlpha(builder, alpha)
-def LocalResponseNormalizationOptionsAddBeta(builder, beta): builder.PrependFloat32Slot(3, beta, 0.0)
-def AddBeta(builder, beta):
-    return LocalResponseNormalizationOptionsAddBeta(builder, beta)
-def LocalResponseNormalizationOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return LocalResponseNormalizationOptionsEnd(builder)
+    # BidirectionalSequenceLSTMOptions
+    def ProjClip(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
+        return 0.0
 
-class LocalResponseNormalizationOptionsT(object):
+    # BidirectionalSequenceLSTMOptions
+    def MergeOutputs(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
 
-    # LocalResponseNormalizationOptionsT
+    # BidirectionalSequenceLSTMOptions
+    def TimeMajor(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return True
+
+    # BidirectionalSequenceLSTMOptions
+    def AsymmetricQuantizeInputs(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+def BidirectionalSequenceLSTMOptionsStart(builder):
+    builder.StartObject(6)
+
+def BidirectionalSequenceLSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+    builder.PrependInt8Slot(0, fusedActivationFunction, 0)
+
+def BidirectionalSequenceLSTMOptionsAddCellClip(builder, cellClip):
+    builder.PrependFloat32Slot(1, cellClip, 0.0)
+
+def BidirectionalSequenceLSTMOptionsAddProjClip(builder, projClip):
+    builder.PrependFloat32Slot(2, projClip, 0.0)
+
+def BidirectionalSequenceLSTMOptionsAddMergeOutputs(builder, mergeOutputs):
+    builder.PrependBoolSlot(3, mergeOutputs, 0)
+
+def BidirectionalSequenceLSTMOptionsAddTimeMajor(builder, timeMajor):
+    builder.PrependBoolSlot(4, timeMajor, 1)
+
+def BidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs):
+    builder.PrependBoolSlot(5, asymmetricQuantizeInputs, 0)
+
+def BidirectionalSequenceLSTMOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class BidirectionalSequenceLSTMOptionsT(object):
+
+    # BidirectionalSequenceLSTMOptionsT
     def __init__(self):
-        self.radius = 0  # type: int
-        self.bias = 0.0  # type: float
-        self.alpha = 0.0  # type: float
-        self.beta = 0.0  # type: float
+        self.fusedActivationFunction = 0  # type: int
+        self.cellClip = 0.0  # type: float
+        self.projClip = 0.0  # type: float
+        self.mergeOutputs = False  # type: bool
+        self.timeMajor = True  # type: bool
+        self.asymmetricQuantizeInputs = False  # type: bool
 
     @classmethod
     def InitFromBuf(cls, buf, pos):
-        localResponseNormalizationOptions = LocalResponseNormalizationOptions()
-        localResponseNormalizationOptions.Init(buf, pos)
-        return cls.InitFromObj(localResponseNormalizationOptions)
+        bidirectionalSequenceLstmoptions = BidirectionalSequenceLSTMOptions()
+        bidirectionalSequenceLstmoptions.Init(buf, pos)
+        return cls.InitFromObj(bidirectionalSequenceLstmoptions)
 
     @classmethod
-    def InitFromObj(cls, localResponseNormalizationOptions):
-        x = LocalResponseNormalizationOptionsT()
-        x._UnPack(localResponseNormalizationOptions)
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, bidirectionalSequenceLstmoptions):
+        x = BidirectionalSequenceLSTMOptionsT()
+        x._UnPack(bidirectionalSequenceLstmoptions)
         return x
 
-    # LocalResponseNormalizationOptionsT
-    def _UnPack(self, localResponseNormalizationOptions):
-        if localResponseNormalizationOptions is None:
+    # BidirectionalSequenceLSTMOptionsT
+    def _UnPack(self, bidirectionalSequenceLstmoptions):
+        if bidirectionalSequenceLstmoptions is None:
             return
-        self.radius = localResponseNormalizationOptions.Radius()
-        self.bias = localResponseNormalizationOptions.Bias()
-        self.alpha = localResponseNormalizationOptions.Alpha()
-        self.beta = localResponseNormalizationOptions.Beta()
+        self.fusedActivationFunction = bidirectionalSequenceLstmoptions.FusedActivationFunction()
+        self.cellClip = bidirectionalSequenceLstmoptions.CellClip()
+        self.projClip = bidirectionalSequenceLstmoptions.ProjClip()
+        self.mergeOutputs = bidirectionalSequenceLstmoptions.MergeOutputs()
+        self.timeMajor = bidirectionalSequenceLstmoptions.TimeMajor()
+        self.asymmetricQuantizeInputs = bidirectionalSequenceLstmoptions.AsymmetricQuantizeInputs()
 
-    # LocalResponseNormalizationOptionsT
+    # BidirectionalSequenceLSTMOptionsT
     def Pack(self, builder):
-        LocalResponseNormalizationOptionsStart(builder)
-        LocalResponseNormalizationOptionsAddRadius(builder, self.radius)
-        LocalResponseNormalizationOptionsAddBias(builder, self.bias)
-        LocalResponseNormalizationOptionsAddAlpha(builder, self.alpha)
-        LocalResponseNormalizationOptionsAddBeta(builder, self.beta)
-        localResponseNormalizationOptions = LocalResponseNormalizationOptionsEnd(builder)
-        return localResponseNormalizationOptions
-# automatically generated by the FlatBuffers compiler, do not modify
+        BidirectionalSequenceLSTMOptionsStart(builder)
+        BidirectionalSequenceLSTMOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
+        BidirectionalSequenceLSTMOptionsAddCellClip(builder, self.cellClip)
+        BidirectionalSequenceLSTMOptionsAddProjClip(builder, self.projClip)
+        BidirectionalSequenceLSTMOptionsAddMergeOutputs(builder, self.mergeOutputs)
+        BidirectionalSequenceLSTMOptionsAddTimeMajor(builder, self.timeMajor)
+        BidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs)
+        bidirectionalSequenceLstmoptions = BidirectionalSequenceLSTMOptionsEnd(builder)
+        return bidirectionalSequenceLstmoptions
 
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
+class ResizeBilinearOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = ResizeBilinearOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsResizeBilinearOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def ResizeBilinearOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # ResizeBilinearOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # ResizeBilinearOptions
+    def AlignCorners(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+    # ResizeBilinearOptions
+    def HalfPixelCenters(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+def ResizeBilinearOptionsStart(builder):
+    builder.StartObject(4)
+
+def ResizeBilinearOptionsAddAlignCorners(builder, alignCorners):
+    builder.PrependBoolSlot(2, alignCorners, 0)
+
+def ResizeBilinearOptionsAddHalfPixelCenters(builder, halfPixelCenters):
+    builder.PrependBoolSlot(3, halfPixelCenters, 0)
+
+def ResizeBilinearOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class ResizeBilinearOptionsT(object):
+
+    # ResizeBilinearOptionsT
+    def __init__(self):
+        self.alignCorners = False  # type: bool
+        self.halfPixelCenters = False  # type: bool
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        resizeBilinearOptions = ResizeBilinearOptions()
+        resizeBilinearOptions.Init(buf, pos)
+        return cls.InitFromObj(resizeBilinearOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, resizeBilinearOptions):
+        x = ResizeBilinearOptionsT()
+        x._UnPack(resizeBilinearOptions)
+        return x
+
+    # ResizeBilinearOptionsT
+    def _UnPack(self, resizeBilinearOptions):
+        if resizeBilinearOptions is None:
+            return
+        self.alignCorners = resizeBilinearOptions.AlignCorners()
+        self.halfPixelCenters = resizeBilinearOptions.HalfPixelCenters()
+
+    # ResizeBilinearOptionsT
+    def Pack(self, builder):
+        ResizeBilinearOptionsStart(builder)
+        ResizeBilinearOptionsAddAlignCorners(builder, self.alignCorners)
+        ResizeBilinearOptionsAddHalfPixelCenters(builder, self.halfPixelCenters)
+        resizeBilinearOptions = ResizeBilinearOptionsEnd(builder)
+        return resizeBilinearOptions
+
+
+class ResizeNearestNeighborOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = ResizeNearestNeighborOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsResizeNearestNeighborOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def ResizeNearestNeighborOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # ResizeNearestNeighborOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # ResizeNearestNeighborOptions
+    def AlignCorners(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+    # ResizeNearestNeighborOptions
+    def HalfPixelCenters(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+def ResizeNearestNeighborOptionsStart(builder):
+    builder.StartObject(2)
+
+def ResizeNearestNeighborOptionsAddAlignCorners(builder, alignCorners):
+    builder.PrependBoolSlot(0, alignCorners, 0)
+
+def ResizeNearestNeighborOptionsAddHalfPixelCenters(builder, halfPixelCenters):
+    builder.PrependBoolSlot(1, halfPixelCenters, 0)
+
+def ResizeNearestNeighborOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class ResizeNearestNeighborOptionsT(object):
+
+    # ResizeNearestNeighborOptionsT
+    def __init__(self):
+        self.alignCorners = False  # type: bool
+        self.halfPixelCenters = False  # type: bool
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        resizeNearestNeighborOptions = ResizeNearestNeighborOptions()
+        resizeNearestNeighborOptions.Init(buf, pos)
+        return cls.InitFromObj(resizeNearestNeighborOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, resizeNearestNeighborOptions):
+        x = ResizeNearestNeighborOptionsT()
+        x._UnPack(resizeNearestNeighborOptions)
+        return x
+
+    # ResizeNearestNeighborOptionsT
+    def _UnPack(self, resizeNearestNeighborOptions):
+        if resizeNearestNeighborOptions is None:
+            return
+        self.alignCorners = resizeNearestNeighborOptions.AlignCorners()
+        self.halfPixelCenters = resizeNearestNeighborOptions.HalfPixelCenters()
+
+    # ResizeNearestNeighborOptionsT
+    def Pack(self, builder):
+        ResizeNearestNeighborOptionsStart(builder)
+        ResizeNearestNeighborOptionsAddAlignCorners(builder, self.alignCorners)
+        ResizeNearestNeighborOptionsAddHalfPixelCenters(builder, self.halfPixelCenters)
+        resizeNearestNeighborOptions = ResizeNearestNeighborOptionsEnd(builder)
+        return resizeNearestNeighborOptions
+
+
+class CallOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = CallOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsCallOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def CallOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # CallOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # CallOptions
+    def Subgraph(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
+        return 0
+
+def CallOptionsStart(builder):
+    builder.StartObject(1)
+
+def CallOptionsAddSubgraph(builder, subgraph):
+    builder.PrependUint32Slot(0, subgraph, 0)
+
+def CallOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class CallOptionsT(object):
+
+    # CallOptionsT
+    def __init__(self):
+        self.subgraph = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        callOptions = CallOptions()
+        callOptions.Init(buf, pos)
+        return cls.InitFromObj(callOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, callOptions):
+        x = CallOptionsT()
+        x._UnPack(callOptions)
+        return x
+
+    # CallOptionsT
+    def _UnPack(self, callOptions):
+        if callOptions is None:
+            return
+        self.subgraph = callOptions.Subgraph()
+
+    # CallOptionsT
+    def Pack(self, builder):
+        CallOptionsStart(builder)
+        CallOptionsAddSubgraph(builder, self.subgraph)
+        callOptions = CallOptionsEnd(builder)
+        return callOptions
+
+
+class PadOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = PadOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsPadOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def PadOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # PadOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def PadOptionsStart(builder):
+    builder.StartObject(0)
+
+def PadOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class PadOptionsT(object):
+
+    # PadOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        padOptions = PadOptions()
+        padOptions.Init(buf, pos)
+        return cls.InitFromObj(padOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, padOptions):
+        x = PadOptionsT()
+        x._UnPack(padOptions)
+        return x
+
+    # PadOptionsT
+    def _UnPack(self, padOptions):
+        if padOptions is None:
+            return
+
+    # PadOptionsT
+    def Pack(self, builder):
+        PadOptionsStart(builder)
+        padOptions = PadOptionsEnd(builder)
+        return padOptions
+
+
+class PadV2Options(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = PadV2Options()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsPadV2Options(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def PadV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # PadV2Options
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def PadV2OptionsStart(builder):
+    builder.StartObject(0)
+
+def PadV2OptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class PadV2OptionsT(object):
+
+    # PadV2OptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        padV2Options = PadV2Options()
+        padV2Options.Init(buf, pos)
+        return cls.InitFromObj(padV2Options)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, padV2Options):
+        x = PadV2OptionsT()
+        x._UnPack(padV2Options)
+        return x
+
+    # PadV2OptionsT
+    def _UnPack(self, padV2Options):
+        if padV2Options is None:
+            return
+
+    # PadV2OptionsT
+    def Pack(self, builder):
+        PadV2OptionsStart(builder)
+        padV2Options = PadV2OptionsEnd(builder)
+        return padV2Options
+
+
+class ReshapeOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = ReshapeOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsReshapeOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def ReshapeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # ReshapeOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # ReshapeOptions
+    def NewShape(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+        return 0
+
+    # ReshapeOptions
+    def NewShapeAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+        return 0
+
+    # ReshapeOptions
+    def NewShapeLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # ReshapeOptions
+    def NewShapeIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        return o == 0
+
+def ReshapeOptionsStart(builder):
+    builder.StartObject(1)
+
+def ReshapeOptionsAddNewShape(builder, newShape):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(newShape), 0)
+
+def ReshapeOptionsStartNewShapeVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def ReshapeOptionsEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class ReshapeOptionsT(object):
+
+    # ReshapeOptionsT
+    def __init__(self):
+        self.newShape = None  # type: List[int]
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        reshapeOptions = ReshapeOptions()
+        reshapeOptions.Init(buf, pos)
+        return cls.InitFromObj(reshapeOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, reshapeOptions):
+        x = ReshapeOptionsT()
+        x._UnPack(reshapeOptions)
+        return x
+
+    # ReshapeOptionsT
+    def _UnPack(self, reshapeOptions):
+        if reshapeOptions is None:
+            return
+        if not reshapeOptions.NewShapeIsNone():
+            if np is None:
+                self.newShape = []
+                for i in range(reshapeOptions.NewShapeLength()):
+                    self.newShape.append(reshapeOptions.NewShape(i))
+            else:
+                self.newShape = reshapeOptions.NewShapeAsNumpy()
+
+    # ReshapeOptionsT
+    def Pack(self, builder):
+        if self.newShape is not None:
+            if np is not None and type(self.newShape) is np.ndarray:
+                newShape = builder.CreateNumpyVector(self.newShape)
+            else:
+                ReshapeOptionsStartNewShapeVector(builder, len(self.newShape))
+                for i in reversed(range(len(self.newShape))):
+                    builder.PrependInt32(self.newShape[i])
+                newShape = builder.EndVector()
+        ReshapeOptionsStart(builder)
+        if self.newShape is not None:
+            ReshapeOptionsAddNewShape(builder, newShape)
+        reshapeOptions = ReshapeOptionsEnd(builder)
+        return reshapeOptions
+
+
+class SpaceToBatchNDOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = SpaceToBatchNDOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsSpaceToBatchNDOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def SpaceToBatchNDOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # SpaceToBatchNDOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def SpaceToBatchNDOptionsStart(builder):
+    builder.StartObject(0)
+
+def SpaceToBatchNDOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class SpaceToBatchNDOptionsT(object):
+
+    # SpaceToBatchNDOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        spaceToBatchNdoptions = SpaceToBatchNDOptions()
+        spaceToBatchNdoptions.Init(buf, pos)
+        return cls.InitFromObj(spaceToBatchNdoptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, spaceToBatchNdoptions):
+        x = SpaceToBatchNDOptionsT()
+        x._UnPack(spaceToBatchNdoptions)
+        return x
+
+    # SpaceToBatchNDOptionsT
+    def _UnPack(self, spaceToBatchNdoptions):
+        if spaceToBatchNdoptions is None:
+            return
+
+    # SpaceToBatchNDOptionsT
+    def Pack(self, builder):
+        SpaceToBatchNDOptionsStart(builder)
+        spaceToBatchNdoptions = SpaceToBatchNDOptionsEnd(builder)
+        return spaceToBatchNdoptions
+
+
+class BatchToSpaceNDOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = BatchToSpaceNDOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsBatchToSpaceNDOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def BatchToSpaceNDOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # BatchToSpaceNDOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def BatchToSpaceNDOptionsStart(builder):
+    builder.StartObject(0)
+
+def BatchToSpaceNDOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class BatchToSpaceNDOptionsT(object):
+
+    # BatchToSpaceNDOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        batchToSpaceNdoptions = BatchToSpaceNDOptions()
+        batchToSpaceNdoptions.Init(buf, pos)
+        return cls.InitFromObj(batchToSpaceNdoptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, batchToSpaceNdoptions):
+        x = BatchToSpaceNDOptionsT()
+        x._UnPack(batchToSpaceNdoptions)
+        return x
+
+    # BatchToSpaceNDOptionsT
+    def _UnPack(self, batchToSpaceNdoptions):
+        if batchToSpaceNdoptions is None:
+            return
+
+    # BatchToSpaceNDOptionsT
+    def Pack(self, builder):
+        BatchToSpaceNDOptionsStart(builder)
+        batchToSpaceNdoptions = BatchToSpaceNDOptionsEnd(builder)
+        return batchToSpaceNdoptions
+
+
+class SkipGramOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = SkipGramOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsSkipGramOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def SkipGramOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # SkipGramOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # SkipGramOptions
+    def NgramSize(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # SkipGramOptions
+    def MaxSkipSize(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # SkipGramOptions
+    def IncludeAllNgrams(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+def SkipGramOptionsStart(builder):
+    builder.StartObject(3)
+
+def SkipGramOptionsAddNgramSize(builder, ngramSize):
+    builder.PrependInt32Slot(0, ngramSize, 0)
+
+def SkipGramOptionsAddMaxSkipSize(builder, maxSkipSize):
+    builder.PrependInt32Slot(1, maxSkipSize, 0)
+
+def SkipGramOptionsAddIncludeAllNgrams(builder, includeAllNgrams):
+    builder.PrependBoolSlot(2, includeAllNgrams, 0)
+
+def SkipGramOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class SkipGramOptionsT(object):
+
+    # SkipGramOptionsT
+    def __init__(self):
+        self.ngramSize = 0  # type: int
+        self.maxSkipSize = 0  # type: int
+        self.includeAllNgrams = False  # type: bool
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        skipGramOptions = SkipGramOptions()
+        skipGramOptions.Init(buf, pos)
+        return cls.InitFromObj(skipGramOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, skipGramOptions):
+        x = SkipGramOptionsT()
+        x._UnPack(skipGramOptions)
+        return x
+
+    # SkipGramOptionsT
+    def _UnPack(self, skipGramOptions):
+        if skipGramOptions is None:
+            return
+        self.ngramSize = skipGramOptions.NgramSize()
+        self.maxSkipSize = skipGramOptions.MaxSkipSize()
+        self.includeAllNgrams = skipGramOptions.IncludeAllNgrams()
+
+    # SkipGramOptionsT
+    def Pack(self, builder):
+        SkipGramOptionsStart(builder)
+        SkipGramOptionsAddNgramSize(builder, self.ngramSize)
+        SkipGramOptionsAddMaxSkipSize(builder, self.maxSkipSize)
+        SkipGramOptionsAddIncludeAllNgrams(builder, self.includeAllNgrams)
+        skipGramOptions = SkipGramOptionsEnd(builder)
+        return skipGramOptions
+
+
+class SpaceToDepthOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = SpaceToDepthOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsSpaceToDepthOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def SpaceToDepthOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # SpaceToDepthOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # SpaceToDepthOptions
+    def BlockSize(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+def SpaceToDepthOptionsStart(builder):
+    builder.StartObject(1)
+
+def SpaceToDepthOptionsAddBlockSize(builder, blockSize):
+    builder.PrependInt32Slot(0, blockSize, 0)
+
+def SpaceToDepthOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class SpaceToDepthOptionsT(object):
+
+    # SpaceToDepthOptionsT
+    def __init__(self):
+        self.blockSize = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        spaceToDepthOptions = SpaceToDepthOptions()
+        spaceToDepthOptions.Init(buf, pos)
+        return cls.InitFromObj(spaceToDepthOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, spaceToDepthOptions):
+        x = SpaceToDepthOptionsT()
+        x._UnPack(spaceToDepthOptions)
+        return x
+
+    # SpaceToDepthOptionsT
+    def _UnPack(self, spaceToDepthOptions):
+        if spaceToDepthOptions is None:
+            return
+        self.blockSize = spaceToDepthOptions.BlockSize()
+
+    # SpaceToDepthOptionsT
+    def Pack(self, builder):
+        SpaceToDepthOptionsStart(builder)
+        SpaceToDepthOptionsAddBlockSize(builder, self.blockSize)
+        spaceToDepthOptions = SpaceToDepthOptionsEnd(builder)
+        return spaceToDepthOptions
+
+
+class DepthToSpaceOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = DepthToSpaceOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsDepthToSpaceOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def DepthToSpaceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # DepthToSpaceOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # DepthToSpaceOptions
+    def BlockSize(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+def DepthToSpaceOptionsStart(builder):
+    builder.StartObject(1)
+
+def DepthToSpaceOptionsAddBlockSize(builder, blockSize):
+    builder.PrependInt32Slot(0, blockSize, 0)
+
+def DepthToSpaceOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class DepthToSpaceOptionsT(object):
+
+    # DepthToSpaceOptionsT
+    def __init__(self):
+        self.blockSize = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        depthToSpaceOptions = DepthToSpaceOptions()
+        depthToSpaceOptions.Init(buf, pos)
+        return cls.InitFromObj(depthToSpaceOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, depthToSpaceOptions):
+        x = DepthToSpaceOptionsT()
+        x._UnPack(depthToSpaceOptions)
+        return x
+
+    # DepthToSpaceOptionsT
+    def _UnPack(self, depthToSpaceOptions):
+        if depthToSpaceOptions is None:
+            return
+        self.blockSize = depthToSpaceOptions.BlockSize()
+
+    # DepthToSpaceOptionsT
+    def Pack(self, builder):
+        DepthToSpaceOptionsStart(builder)
+        DepthToSpaceOptionsAddBlockSize(builder, self.blockSize)
+        depthToSpaceOptions = DepthToSpaceOptionsEnd(builder)
+        return depthToSpaceOptions
+
+
+class SubOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = SubOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsSubOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def SubOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # SubOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # SubOptions
+    def FusedActivationFunction(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+    # SubOptions
+    def PotScaleInt16(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return True
+
+def SubOptionsStart(builder):
+    builder.StartObject(2)
+
+def SubOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+    builder.PrependInt8Slot(0, fusedActivationFunction, 0)
+
+def SubOptionsAddPotScaleInt16(builder, potScaleInt16):
+    builder.PrependBoolSlot(1, potScaleInt16, 1)
+
+def SubOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class SubOptionsT(object):
+
+    # SubOptionsT
+    def __init__(self):
+        self.fusedActivationFunction = 0  # type: int
+        self.potScaleInt16 = True  # type: bool
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        subOptions = SubOptions()
+        subOptions.Init(buf, pos)
+        return cls.InitFromObj(subOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, subOptions):
+        x = SubOptionsT()
+        x._UnPack(subOptions)
+        return x
+
+    # SubOptionsT
+    def _UnPack(self, subOptions):
+        if subOptions is None:
+            return
+        self.fusedActivationFunction = subOptions.FusedActivationFunction()
+        self.potScaleInt16 = subOptions.PotScaleInt16()
+
+    # SubOptionsT
+    def Pack(self, builder):
+        SubOptionsStart(builder)
+        SubOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
+        SubOptionsAddPotScaleInt16(builder, self.potScaleInt16)
+        subOptions = SubOptionsEnd(builder)
+        return subOptions
+
+
+class DivOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = DivOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsDivOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def DivOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # DivOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # DivOptions
+    def FusedActivationFunction(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+def DivOptionsStart(builder):
+    builder.StartObject(1)
+
+def DivOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+    builder.PrependInt8Slot(0, fusedActivationFunction, 0)
+
+def DivOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class DivOptionsT(object):
+
+    # DivOptionsT
+    def __init__(self):
+        self.fusedActivationFunction = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        divOptions = DivOptions()
+        divOptions.Init(buf, pos)
+        return cls.InitFromObj(divOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, divOptions):
+        x = DivOptionsT()
+        x._UnPack(divOptions)
+        return x
+
+    # DivOptionsT
+    def _UnPack(self, divOptions):
+        if divOptions is None:
+            return
+        self.fusedActivationFunction = divOptions.FusedActivationFunction()
+
+    # DivOptionsT
+    def Pack(self, builder):
+        DivOptionsStart(builder)
+        DivOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
+        divOptions = DivOptionsEnd(builder)
+        return divOptions
+
+
+class TopKV2Options(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = TopKV2Options()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsTopKV2Options(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def TopKV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # TopKV2Options
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def TopKV2OptionsStart(builder):
+    builder.StartObject(0)
+
+def TopKV2OptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class TopKV2OptionsT(object):
+
+    # TopKV2OptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        topKv2Options = TopKV2Options()
+        topKv2Options.Init(buf, pos)
+        return cls.InitFromObj(topKv2Options)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, topKv2Options):
+        x = TopKV2OptionsT()
+        x._UnPack(topKv2Options)
+        return x
+
+    # TopKV2OptionsT
+    def _UnPack(self, topKv2Options):
+        if topKv2Options is None:
+            return
+
+    # TopKV2OptionsT
+    def Pack(self, builder):
+        TopKV2OptionsStart(builder)
+        topKv2Options = TopKV2OptionsEnd(builder)
+        return topKv2Options
+
+
+class EmbeddingLookupSparseOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = EmbeddingLookupSparseOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsEmbeddingLookupSparseOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def EmbeddingLookupSparseOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # EmbeddingLookupSparseOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # EmbeddingLookupSparseOptions
+    def Combiner(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+def EmbeddingLookupSparseOptionsStart(builder):
+    builder.StartObject(1)
+
+def EmbeddingLookupSparseOptionsAddCombiner(builder, combiner):
+    builder.PrependInt8Slot(0, combiner, 0)
+
+def EmbeddingLookupSparseOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class EmbeddingLookupSparseOptionsT(object):
+
+    # EmbeddingLookupSparseOptionsT
+    def __init__(self):
+        self.combiner = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        embeddingLookupSparseOptions = EmbeddingLookupSparseOptions()
+        embeddingLookupSparseOptions.Init(buf, pos)
+        return cls.InitFromObj(embeddingLookupSparseOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, embeddingLookupSparseOptions):
+        x = EmbeddingLookupSparseOptionsT()
+        x._UnPack(embeddingLookupSparseOptions)
+        return x
+
+    # EmbeddingLookupSparseOptionsT
+    def _UnPack(self, embeddingLookupSparseOptions):
+        if embeddingLookupSparseOptions is None:
+            return
+        self.combiner = embeddingLookupSparseOptions.Combiner()
+
+    # EmbeddingLookupSparseOptionsT
+    def Pack(self, builder):
+        EmbeddingLookupSparseOptionsStart(builder)
+        EmbeddingLookupSparseOptionsAddCombiner(builder, self.combiner)
+        embeddingLookupSparseOptions = EmbeddingLookupSparseOptionsEnd(builder)
+        return embeddingLookupSparseOptions
+
+
+class GatherOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = GatherOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsGatherOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def GatherOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # GatherOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # GatherOptions
+    def Axis(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # GatherOptions
+    def BatchDims(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+def GatherOptionsStart(builder):
+    builder.StartObject(2)
+
+def GatherOptionsAddAxis(builder, axis):
+    builder.PrependInt32Slot(0, axis, 0)
+
+def GatherOptionsAddBatchDims(builder, batchDims):
+    builder.PrependInt32Slot(1, batchDims, 0)
+
+def GatherOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class GatherOptionsT(object):
+
+    # GatherOptionsT
+    def __init__(self):
+        self.axis = 0  # type: int
+        self.batchDims = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        gatherOptions = GatherOptions()
+        gatherOptions.Init(buf, pos)
+        return cls.InitFromObj(gatherOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, gatherOptions):
+        x = GatherOptionsT()
+        x._UnPack(gatherOptions)
+        return x
+
+    # GatherOptionsT
+    def _UnPack(self, gatherOptions):
+        if gatherOptions is None:
+            return
+        self.axis = gatherOptions.Axis()
+        self.batchDims = gatherOptions.BatchDims()
+
+    # GatherOptionsT
+    def Pack(self, builder):
+        GatherOptionsStart(builder)
+        GatherOptionsAddAxis(builder, self.axis)
+        GatherOptionsAddBatchDims(builder, self.batchDims)
+        gatherOptions = GatherOptionsEnd(builder)
+        return gatherOptions
+
+
+class TransposeOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = TransposeOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsTransposeOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def TransposeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # TransposeOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def TransposeOptionsStart(builder):
+    builder.StartObject(0)
+
+def TransposeOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class TransposeOptionsT(object):
+
+    # TransposeOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        transposeOptions = TransposeOptions()
+        transposeOptions.Init(buf, pos)
+        return cls.InitFromObj(transposeOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, transposeOptions):
+        x = TransposeOptionsT()
+        x._UnPack(transposeOptions)
+        return x
+
+    # TransposeOptionsT
+    def _UnPack(self, transposeOptions):
+        if transposeOptions is None:
+            return
+
+    # TransposeOptionsT
+    def Pack(self, builder):
+        TransposeOptionsStart(builder)
+        transposeOptions = TransposeOptionsEnd(builder)
+        return transposeOptions
+
+
+class ExpOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = ExpOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsExpOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def ExpOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # ExpOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def ExpOptionsStart(builder):
+    builder.StartObject(0)
+
+def ExpOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class ExpOptionsT(object):
+
+    # ExpOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        expOptions = ExpOptions()
+        expOptions.Init(buf, pos)
+        return cls.InitFromObj(expOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, expOptions):
+        x = ExpOptionsT()
+        x._UnPack(expOptions)
+        return x
+
+    # ExpOptionsT
+    def _UnPack(self, expOptions):
+        if expOptions is None:
+            return
+
+    # ExpOptionsT
+    def Pack(self, builder):
+        ExpOptionsStart(builder)
+        expOptions = ExpOptionsEnd(builder)
+        return expOptions
+
+
+class CosOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = CosOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsCosOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def CosOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # CosOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def CosOptionsStart(builder):
+    builder.StartObject(0)
+
+def CosOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class CosOptionsT(object):
+
+    # CosOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        cosOptions = CosOptions()
+        cosOptions.Init(buf, pos)
+        return cls.InitFromObj(cosOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, cosOptions):
+        x = CosOptionsT()
+        x._UnPack(cosOptions)
+        return x
+
+    # CosOptionsT
+    def _UnPack(self, cosOptions):
+        if cosOptions is None:
+            return
+
+    # CosOptionsT
+    def Pack(self, builder):
+        CosOptionsStart(builder)
+        cosOptions = CosOptionsEnd(builder)
+        return cosOptions
+
+
+class ReducerOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = ReducerOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsReducerOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def ReducerOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # ReducerOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # ReducerOptions
+    def KeepDims(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+def ReducerOptionsStart(builder):
+    builder.StartObject(1)
+
+def ReducerOptionsAddKeepDims(builder, keepDims):
+    builder.PrependBoolSlot(0, keepDims, 0)
+
+def ReducerOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class ReducerOptionsT(object):
+
+    # ReducerOptionsT
+    def __init__(self):
+        self.keepDims = False  # type: bool
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        reducerOptions = ReducerOptions()
+        reducerOptions.Init(buf, pos)
+        return cls.InitFromObj(reducerOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, reducerOptions):
+        x = ReducerOptionsT()
+        x._UnPack(reducerOptions)
+        return x
+
+    # ReducerOptionsT
+    def _UnPack(self, reducerOptions):
+        if reducerOptions is None:
+            return
+        self.keepDims = reducerOptions.KeepDims()
+
+    # ReducerOptionsT
+    def Pack(self, builder):
+        ReducerOptionsStart(builder)
+        ReducerOptionsAddKeepDims(builder, self.keepDims)
+        reducerOptions = ReducerOptionsEnd(builder)
+        return reducerOptions
+
+
+class SqueezeOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = SqueezeOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsSqueezeOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def SqueezeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # SqueezeOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # SqueezeOptions
+    def SqueezeDims(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+        return 0
+
+    # SqueezeOptions
+    def SqueezeDimsAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+        return 0
+
+    # SqueezeOptions
+    def SqueezeDimsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # SqueezeOptions
+    def SqueezeDimsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        return o == 0
+
+def SqueezeOptionsStart(builder):
+    builder.StartObject(1)
+
+def SqueezeOptionsAddSqueezeDims(builder, squeezeDims):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(squeezeDims), 0)
+
+def SqueezeOptionsStartSqueezeDimsVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def SqueezeOptionsEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class SqueezeOptionsT(object):
+
+    # SqueezeOptionsT
+    def __init__(self):
+        self.squeezeDims = None  # type: List[int]
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        squeezeOptions = SqueezeOptions()
+        squeezeOptions.Init(buf, pos)
+        return cls.InitFromObj(squeezeOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, squeezeOptions):
+        x = SqueezeOptionsT()
+        x._UnPack(squeezeOptions)
+        return x
+
+    # SqueezeOptionsT
+    def _UnPack(self, squeezeOptions):
+        if squeezeOptions is None:
+            return
+        if not squeezeOptions.SqueezeDimsIsNone():
+            if np is None:
+                self.squeezeDims = []
+                for i in range(squeezeOptions.SqueezeDimsLength()):
+                    self.squeezeDims.append(squeezeOptions.SqueezeDims(i))
+            else:
+                self.squeezeDims = squeezeOptions.SqueezeDimsAsNumpy()
+
+    # SqueezeOptionsT
+    def Pack(self, builder):
+        if self.squeezeDims is not None:
+            if np is not None and type(self.squeezeDims) is np.ndarray:
+                squeezeDims = builder.CreateNumpyVector(self.squeezeDims)
+            else:
+                SqueezeOptionsStartSqueezeDimsVector(builder, len(self.squeezeDims))
+                for i in reversed(range(len(self.squeezeDims))):
+                    builder.PrependInt32(self.squeezeDims[i])
+                squeezeDims = builder.EndVector()
+        SqueezeOptionsStart(builder)
+        if self.squeezeDims is not None:
+            SqueezeOptionsAddSqueezeDims(builder, squeezeDims)
+        squeezeOptions = SqueezeOptionsEnd(builder)
+        return squeezeOptions
+
+
+class SplitOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = SplitOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsSplitOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def SplitOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # SplitOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # SplitOptions
+    def NumSplits(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+def SplitOptionsStart(builder):
+    builder.StartObject(1)
+
+def SplitOptionsAddNumSplits(builder, numSplits):
+    builder.PrependInt32Slot(0, numSplits, 0)
+
+def SplitOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class SplitOptionsT(object):
+
+    # SplitOptionsT
+    def __init__(self):
+        self.numSplits = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        splitOptions = SplitOptions()
+        splitOptions.Init(buf, pos)
+        return cls.InitFromObj(splitOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, splitOptions):
+        x = SplitOptionsT()
+        x._UnPack(splitOptions)
+        return x
+
+    # SplitOptionsT
+    def _UnPack(self, splitOptions):
+        if splitOptions is None:
+            return
+        self.numSplits = splitOptions.NumSplits()
+
+    # SplitOptionsT
+    def Pack(self, builder):
+        SplitOptionsStart(builder)
+        SplitOptionsAddNumSplits(builder, self.numSplits)
+        splitOptions = SplitOptionsEnd(builder)
+        return splitOptions
+
+
+class SplitVOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = SplitVOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsSplitVOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def SplitVOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # SplitVOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # SplitVOptions
+    def NumSplits(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+def SplitVOptionsStart(builder):
+    builder.StartObject(1)
+
+def SplitVOptionsAddNumSplits(builder, numSplits):
+    builder.PrependInt32Slot(0, numSplits, 0)
+
+def SplitVOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class SplitVOptionsT(object):
+
+    # SplitVOptionsT
+    def __init__(self):
+        self.numSplits = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        splitVoptions = SplitVOptions()
+        splitVoptions.Init(buf, pos)
+        return cls.InitFromObj(splitVoptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, splitVoptions):
+        x = SplitVOptionsT()
+        x._UnPack(splitVoptions)
+        return x
+
+    # SplitVOptionsT
+    def _UnPack(self, splitVoptions):
+        if splitVoptions is None:
+            return
+        self.numSplits = splitVoptions.NumSplits()
+
+    # SplitVOptionsT
+    def Pack(self, builder):
+        SplitVOptionsStart(builder)
+        SplitVOptionsAddNumSplits(builder, self.numSplits)
+        splitVoptions = SplitVOptionsEnd(builder)
+        return splitVoptions
+
+
+class StridedSliceOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = StridedSliceOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsStridedSliceOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def StridedSliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # StridedSliceOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # StridedSliceOptions
+    def BeginMask(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # StridedSliceOptions
+    def EndMask(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # StridedSliceOptions
+    def EllipsisMask(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # StridedSliceOptions
+    def NewAxisMask(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # StridedSliceOptions
+    def ShrinkAxisMask(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # StridedSliceOptions
+    def Offset(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+def StridedSliceOptionsStart(builder):
+    builder.StartObject(6)
+
+def StridedSliceOptionsAddBeginMask(builder, beginMask):
+    builder.PrependInt32Slot(0, beginMask, 0)
+
+def StridedSliceOptionsAddEndMask(builder, endMask):
+    builder.PrependInt32Slot(1, endMask, 0)
+
+def StridedSliceOptionsAddEllipsisMask(builder, ellipsisMask):
+    builder.PrependInt32Slot(2, ellipsisMask, 0)
+
+def StridedSliceOptionsAddNewAxisMask(builder, newAxisMask):
+    builder.PrependInt32Slot(3, newAxisMask, 0)
+
+def StridedSliceOptionsAddShrinkAxisMask(builder, shrinkAxisMask):
+    builder.PrependInt32Slot(4, shrinkAxisMask, 0)
+
+def StridedSliceOptionsAddOffset(builder, offset):
+    builder.PrependBoolSlot(5, offset, 0)
+
+def StridedSliceOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class StridedSliceOptionsT(object):
+
+    # StridedSliceOptionsT
+    def __init__(self):
+        self.beginMask = 0  # type: int
+        self.endMask = 0  # type: int
+        self.ellipsisMask = 0  # type: int
+        self.newAxisMask = 0  # type: int
+        self.shrinkAxisMask = 0  # type: int
+        self.offset = False  # type: bool
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        stridedSliceOptions = StridedSliceOptions()
+        stridedSliceOptions.Init(buf, pos)
+        return cls.InitFromObj(stridedSliceOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, stridedSliceOptions):
+        x = StridedSliceOptionsT()
+        x._UnPack(stridedSliceOptions)
+        return x
+
+    # StridedSliceOptionsT
+    def _UnPack(self, stridedSliceOptions):
+        if stridedSliceOptions is None:
+            return
+        self.beginMask = stridedSliceOptions.BeginMask()
+        self.endMask = stridedSliceOptions.EndMask()
+        self.ellipsisMask = stridedSliceOptions.EllipsisMask()
+        self.newAxisMask = stridedSliceOptions.NewAxisMask()
+        self.shrinkAxisMask = stridedSliceOptions.ShrinkAxisMask()
+        self.offset = stridedSliceOptions.Offset()
+
+    # StridedSliceOptionsT
+    def Pack(self, builder):
+        StridedSliceOptionsStart(builder)
+        StridedSliceOptionsAddBeginMask(builder, self.beginMask)
+        StridedSliceOptionsAddEndMask(builder, self.endMask)
+        StridedSliceOptionsAddEllipsisMask(builder, self.ellipsisMask)
+        StridedSliceOptionsAddNewAxisMask(builder, self.newAxisMask)
+        StridedSliceOptionsAddShrinkAxisMask(builder, self.shrinkAxisMask)
+        StridedSliceOptionsAddOffset(builder, self.offset)
+        stridedSliceOptions = StridedSliceOptionsEnd(builder)
+        return stridedSliceOptions
+
 
 class LogSoftmaxOptions(object):
     __slots__ = ['_tab']
@@ -5970,12 +10359,13 @@
     def Init(self, buf, pos):
         self._tab = flatbuffers.table.Table(buf, pos)
 
-def LogSoftmaxOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return LogSoftmaxOptionsStart(builder)
-def LogSoftmaxOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return LogSoftmaxOptionsEnd(builder)
+def LogSoftmaxOptionsStart(builder):
+    builder.StartObject(0)
+
+def LogSoftmaxOptionsEnd(builder):
+    return builder.EndObject()
+
+
 
 class LogSoftmaxOptionsT(object):
 
@@ -5990,6 +10380,11 @@
         return cls.InitFromObj(logSoftmaxOptions)
 
     @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
     def InitFromObj(cls, logSoftmaxOptions):
         x = LogSoftmaxOptionsT()
         x._UnPack(logSoftmaxOptions)
@@ -6005,332 +10400,162 @@
         LogSoftmaxOptionsStart(builder)
         logSoftmaxOptions = LogSoftmaxOptionsEnd(builder)
         return logSoftmaxOptions
-# automatically generated by the FlatBuffers compiler, do not modify
 
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class LogicalAndOptions(object):
+class CastOptions(object):
     __slots__ = ['_tab']
 
     @classmethod
     def GetRootAs(cls, buf, offset=0):
         n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = LogicalAndOptions()
+        x = CastOptions()
         x.Init(buf, n + offset)
         return x
 
     @classmethod
-    def GetRootAsLogicalAndOptions(cls, buf, offset=0):
+    def GetRootAsCastOptions(cls, buf, offset=0):
         """This method is deprecated. Please switch to GetRootAs."""
         return cls.GetRootAs(buf, offset)
     @classmethod
-    def LogicalAndOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+    def CastOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
         return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
 
-    # LogicalAndOptions
+    # CastOptions
     def Init(self, buf, pos):
         self._tab = flatbuffers.table.Table(buf, pos)
 
-def LogicalAndOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return LogicalAndOptionsStart(builder)
-def LogicalAndOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return LogicalAndOptionsEnd(builder)
+    # CastOptions
+    def InDataType(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
 
-class LogicalAndOptionsT(object):
+    # CastOptions
+    def OutDataType(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
 
-    # LogicalAndOptionsT
+def CastOptionsStart(builder):
+    builder.StartObject(2)
+
+def CastOptionsAddInDataType(builder, inDataType):
+    builder.PrependInt8Slot(0, inDataType, 0)
+
+def CastOptionsAddOutDataType(builder, outDataType):
+    builder.PrependInt8Slot(1, outDataType, 0)
+
+def CastOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class CastOptionsT(object):
+
+    # CastOptionsT
     def __init__(self):
-        pass
+        self.inDataType = 0  # type: int
+        self.outDataType = 0  # type: int
 
     @classmethod
     def InitFromBuf(cls, buf, pos):
-        logicalAndOptions = LogicalAndOptions()
-        logicalAndOptions.Init(buf, pos)
-        return cls.InitFromObj(logicalAndOptions)
+        castOptions = CastOptions()
+        castOptions.Init(buf, pos)
+        return cls.InitFromObj(castOptions)
 
     @classmethod
-    def InitFromObj(cls, logicalAndOptions):
-        x = LogicalAndOptionsT()
-        x._UnPack(logicalAndOptions)
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, castOptions):
+        x = CastOptionsT()
+        x._UnPack(castOptions)
         return x
 
-    # LogicalAndOptionsT
-    def _UnPack(self, logicalAndOptions):
-        if logicalAndOptions is None:
+    # CastOptionsT
+    def _UnPack(self, castOptions):
+        if castOptions is None:
             return
+        self.inDataType = castOptions.InDataType()
+        self.outDataType = castOptions.OutDataType()
 
-    # LogicalAndOptionsT
+    # CastOptionsT
     def Pack(self, builder):
-        LogicalAndOptionsStart(builder)
-        logicalAndOptions = LogicalAndOptionsEnd(builder)
-        return logicalAndOptions
-# automatically generated by the FlatBuffers compiler, do not modify
+        CastOptionsStart(builder)
+        CastOptionsAddInDataType(builder, self.inDataType)
+        CastOptionsAddOutDataType(builder, self.outDataType)
+        castOptions = CastOptionsEnd(builder)
+        return castOptions
 
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class LogicalNotOptions(object):
+class DequantizeOptions(object):
     __slots__ = ['_tab']
 
     @classmethod
     def GetRootAs(cls, buf, offset=0):
         n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = LogicalNotOptions()
+        x = DequantizeOptions()
         x.Init(buf, n + offset)
         return x
 
     @classmethod
-    def GetRootAsLogicalNotOptions(cls, buf, offset=0):
+    def GetRootAsDequantizeOptions(cls, buf, offset=0):
         """This method is deprecated. Please switch to GetRootAs."""
         return cls.GetRootAs(buf, offset)
     @classmethod
-    def LogicalNotOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+    def DequantizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
         return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
 
-    # LogicalNotOptions
+    # DequantizeOptions
     def Init(self, buf, pos):
         self._tab = flatbuffers.table.Table(buf, pos)
 
-def LogicalNotOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return LogicalNotOptionsStart(builder)
-def LogicalNotOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return LogicalNotOptionsEnd(builder)
+def DequantizeOptionsStart(builder):
+    builder.StartObject(0)
 
-class LogicalNotOptionsT(object):
+def DequantizeOptionsEnd(builder):
+    return builder.EndObject()
 
-    # LogicalNotOptionsT
+
+
+class DequantizeOptionsT(object):
+
+    # DequantizeOptionsT
     def __init__(self):
         pass
 
     @classmethod
     def InitFromBuf(cls, buf, pos):
-        logicalNotOptions = LogicalNotOptions()
-        logicalNotOptions.Init(buf, pos)
-        return cls.InitFromObj(logicalNotOptions)
+        dequantizeOptions = DequantizeOptions()
+        dequantizeOptions.Init(buf, pos)
+        return cls.InitFromObj(dequantizeOptions)
 
     @classmethod
-    def InitFromObj(cls, logicalNotOptions):
-        x = LogicalNotOptionsT()
-        x._UnPack(logicalNotOptions)
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, dequantizeOptions):
+        x = DequantizeOptionsT()
+        x._UnPack(dequantizeOptions)
         return x
 
-    # LogicalNotOptionsT
-    def _UnPack(self, logicalNotOptions):
-        if logicalNotOptions is None:
+    # DequantizeOptionsT
+    def _UnPack(self, dequantizeOptions):
+        if dequantizeOptions is None:
             return
 
-    # LogicalNotOptionsT
+    # DequantizeOptionsT
     def Pack(self, builder):
-        LogicalNotOptionsStart(builder)
-        logicalNotOptions = LogicalNotOptionsEnd(builder)
-        return logicalNotOptions
-# automatically generated by the FlatBuffers compiler, do not modify
+        DequantizeOptionsStart(builder)
+        dequantizeOptions = DequantizeOptionsEnd(builder)
+        return dequantizeOptions
 
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class LogicalOrOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = LogicalOrOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsLogicalOrOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def LogicalOrOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # LogicalOrOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def LogicalOrOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return LogicalOrOptionsStart(builder)
-def LogicalOrOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return LogicalOrOptionsEnd(builder)
-
-class LogicalOrOptionsT(object):
-
-    # LogicalOrOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        logicalOrOptions = LogicalOrOptions()
-        logicalOrOptions.Init(buf, pos)
-        return cls.InitFromObj(logicalOrOptions)
-
-    @classmethod
-    def InitFromObj(cls, logicalOrOptions):
-        x = LogicalOrOptionsT()
-        x._UnPack(logicalOrOptions)
-        return x
-
-    # LogicalOrOptionsT
-    def _UnPack(self, logicalOrOptions):
-        if logicalOrOptions is None:
-            return
-
-    # LogicalOrOptionsT
-    def Pack(self, builder):
-        LogicalOrOptionsStart(builder)
-        logicalOrOptions = LogicalOrOptionsEnd(builder)
-        return logicalOrOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class MatrixDiagOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = MatrixDiagOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsMatrixDiagOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def MatrixDiagOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # MatrixDiagOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def MatrixDiagOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return MatrixDiagOptionsStart(builder)
-def MatrixDiagOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return MatrixDiagOptionsEnd(builder)
-
-class MatrixDiagOptionsT(object):
-
-    # MatrixDiagOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        matrixDiagOptions = MatrixDiagOptions()
-        matrixDiagOptions.Init(buf, pos)
-        return cls.InitFromObj(matrixDiagOptions)
-
-    @classmethod
-    def InitFromObj(cls, matrixDiagOptions):
-        x = MatrixDiagOptionsT()
-        x._UnPack(matrixDiagOptions)
-        return x
-
-    # MatrixDiagOptionsT
-    def _UnPack(self, matrixDiagOptions):
-        if matrixDiagOptions is None:
-            return
-
-    # MatrixDiagOptionsT
-    def Pack(self, builder):
-        MatrixDiagOptionsStart(builder)
-        matrixDiagOptions = MatrixDiagOptionsEnd(builder)
-        return matrixDiagOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class MatrixSetDiagOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = MatrixSetDiagOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsMatrixSetDiagOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def MatrixSetDiagOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # MatrixSetDiagOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def MatrixSetDiagOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return MatrixSetDiagOptionsStart(builder)
-def MatrixSetDiagOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return MatrixSetDiagOptionsEnd(builder)
-
-class MatrixSetDiagOptionsT(object):
-
-    # MatrixSetDiagOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        matrixSetDiagOptions = MatrixSetDiagOptions()
-        matrixSetDiagOptions.Init(buf, pos)
-        return cls.InitFromObj(matrixSetDiagOptions)
-
-    @classmethod
-    def InitFromObj(cls, matrixSetDiagOptions):
-        x = MatrixSetDiagOptionsT()
-        x._UnPack(matrixSetDiagOptions)
-        return x
-
-    # MatrixSetDiagOptionsT
-    def _UnPack(self, matrixSetDiagOptions):
-        if matrixSetDiagOptions is None:
-            return
-
-    # MatrixSetDiagOptionsT
-    def Pack(self, builder):
-        MatrixSetDiagOptionsStart(builder)
-        matrixSetDiagOptions = MatrixSetDiagOptionsEnd(builder)
-        return matrixSetDiagOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
 
 class MaximumMinimumOptions(object):
     __slots__ = ['_tab']
@@ -6354,12 +10579,13 @@
     def Init(self, buf, pos):
         self._tab = flatbuffers.table.Table(buf, pos)
 
-def MaximumMinimumOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return MaximumMinimumOptionsStart(builder)
-def MaximumMinimumOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return MaximumMinimumOptionsEnd(builder)
+def MaximumMinimumOptionsStart(builder):
+    builder.StartObject(0)
+
+def MaximumMinimumOptionsEnd(builder):
+    return builder.EndObject()
+
+
 
 class MaximumMinimumOptionsT(object):
 
@@ -6374,6 +10600,11 @@
         return cls.InitFromObj(maximumMinimumOptions)
 
     @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
     def InitFromObj(cls, maximumMinimumOptions):
         x = MaximumMinimumOptionsT()
         x._UnPack(maximumMinimumOptions)
@@ -6389,655 +10620,486 @@
         MaximumMinimumOptionsStart(builder)
         maximumMinimumOptions = MaximumMinimumOptionsEnd(builder)
         return maximumMinimumOptions
-# automatically generated by the FlatBuffers compiler, do not modify
 
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class Metadata(object):
+class TileOptions(object):
     __slots__ = ['_tab']
 
     @classmethod
     def GetRootAs(cls, buf, offset=0):
         n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = Metadata()
+        x = TileOptions()
         x.Init(buf, n + offset)
         return x
 
     @classmethod
-    def GetRootAsMetadata(cls, buf, offset=0):
+    def GetRootAsTileOptions(cls, buf, offset=0):
         """This method is deprecated. Please switch to GetRootAs."""
         return cls.GetRootAs(buf, offset)
     @classmethod
-    def MetadataBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+    def TileOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
         return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
 
-    # Metadata
+    # TileOptions
     def Init(self, buf, pos):
         self._tab = flatbuffers.table.Table(buf, pos)
 
-    # Metadata
-    def Name(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.String(o + self._tab.Pos)
-        return None
+def TileOptionsStart(builder):
+    builder.StartObject(0)
 
-    # Metadata
-    def Buffer(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
-        return 0
+def TileOptionsEnd(builder):
+    return builder.EndObject()
 
-def MetadataStart(builder): builder.StartObject(2)
-def Start(builder):
-    return MetadataStart(builder)
-def MetadataAddName(builder, name): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
-def AddName(builder, name):
-    return MetadataAddName(builder, name)
-def MetadataAddBuffer(builder, buffer): builder.PrependUint32Slot(1, buffer, 0)
-def AddBuffer(builder, buffer):
-    return MetadataAddBuffer(builder, buffer)
-def MetadataEnd(builder): return builder.EndObject()
-def End(builder):
-    return MetadataEnd(builder)
 
-class MetadataT(object):
 
-    # MetadataT
+class TileOptionsT(object):
+
+    # TileOptionsT
     def __init__(self):
-        self.name = None  # type: str
-        self.buffer = 0  # type: int
+        pass
 
     @classmethod
     def InitFromBuf(cls, buf, pos):
-        metadata = Metadata()
-        metadata.Init(buf, pos)
-        return cls.InitFromObj(metadata)
+        tileOptions = TileOptions()
+        tileOptions.Init(buf, pos)
+        return cls.InitFromObj(tileOptions)
 
     @classmethod
-    def InitFromObj(cls, metadata):
-        x = MetadataT()
-        x._UnPack(metadata)
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, tileOptions):
+        x = TileOptionsT()
+        x._UnPack(tileOptions)
         return x
 
-    # MetadataT
-    def _UnPack(self, metadata):
-        if metadata is None:
+    # TileOptionsT
+    def _UnPack(self, tileOptions):
+        if tileOptions is None:
             return
-        self.name = metadata.Name()
-        self.buffer = metadata.Buffer()
 
-    # MetadataT
+    # TileOptionsT
     def Pack(self, builder):
-        if self.name is not None:
-            name = builder.CreateString(self.name)
-        MetadataStart(builder)
-        if self.name is not None:
-            MetadataAddName(builder, name)
-        MetadataAddBuffer(builder, self.buffer)
-        metadata = MetadataEnd(builder)
-        return metadata
-# automatically generated by the FlatBuffers compiler, do not modify
+        TileOptionsStart(builder)
+        tileOptions = TileOptionsEnd(builder)
+        return tileOptions
 
-# namespace: tflite
 
-class MirrorPadMode(object):
-    REFLECT = 0
-    SYMMETRIC = 1
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class MirrorPadOptions(object):
+class ArgMaxOptions(object):
     __slots__ = ['_tab']
 
     @classmethod
     def GetRootAs(cls, buf, offset=0):
         n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = MirrorPadOptions()
+        x = ArgMaxOptions()
         x.Init(buf, n + offset)
         return x
 
     @classmethod
-    def GetRootAsMirrorPadOptions(cls, buf, offset=0):
+    def GetRootAsArgMaxOptions(cls, buf, offset=0):
         """This method is deprecated. Please switch to GetRootAs."""
         return cls.GetRootAs(buf, offset)
     @classmethod
-    def MirrorPadOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+    def ArgMaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
         return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
 
-    # MirrorPadOptions
+    # ArgMaxOptions
     def Init(self, buf, pos):
         self._tab = flatbuffers.table.Table(buf, pos)
 
-    # MirrorPadOptions
-    def Mode(self):
+    # ArgMaxOptions
+    def OutputType(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
         if o != 0:
             return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
         return 0
 
-def MirrorPadOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return MirrorPadOptionsStart(builder)
-def MirrorPadOptionsAddMode(builder, mode): builder.PrependInt8Slot(0, mode, 0)
-def AddMode(builder, mode):
-    return MirrorPadOptionsAddMode(builder, mode)
-def MirrorPadOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return MirrorPadOptionsEnd(builder)
+def ArgMaxOptionsStart(builder):
+    builder.StartObject(1)
 
-class MirrorPadOptionsT(object):
+def ArgMaxOptionsAddOutputType(builder, outputType):
+    builder.PrependInt8Slot(0, outputType, 0)
 
-    # MirrorPadOptionsT
+def ArgMaxOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class ArgMaxOptionsT(object):
+
+    # ArgMaxOptionsT
     def __init__(self):
-        self.mode = 0  # type: int
+        self.outputType = 0  # type: int
 
     @classmethod
     def InitFromBuf(cls, buf, pos):
-        mirrorPadOptions = MirrorPadOptions()
-        mirrorPadOptions.Init(buf, pos)
-        return cls.InitFromObj(mirrorPadOptions)
+        argMaxOptions = ArgMaxOptions()
+        argMaxOptions.Init(buf, pos)
+        return cls.InitFromObj(argMaxOptions)
 
     @classmethod
-    def InitFromObj(cls, mirrorPadOptions):
-        x = MirrorPadOptionsT()
-        x._UnPack(mirrorPadOptions)
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, argMaxOptions):
+        x = ArgMaxOptionsT()
+        x._UnPack(argMaxOptions)
         return x
 
-    # MirrorPadOptionsT
-    def _UnPack(self, mirrorPadOptions):
-        if mirrorPadOptions is None:
+    # ArgMaxOptionsT
+    def _UnPack(self, argMaxOptions):
+        if argMaxOptions is None:
             return
-        self.mode = mirrorPadOptions.Mode()
+        self.outputType = argMaxOptions.OutputType()
 
-    # MirrorPadOptionsT
+    # ArgMaxOptionsT
     def Pack(self, builder):
-        MirrorPadOptionsStart(builder)
-        MirrorPadOptionsAddMode(builder, self.mode)
-        mirrorPadOptions = MirrorPadOptionsEnd(builder)
-        return mirrorPadOptions
-# automatically generated by the FlatBuffers compiler, do not modify
+        ArgMaxOptionsStart(builder)
+        ArgMaxOptionsAddOutputType(builder, self.outputType)
+        argMaxOptions = ArgMaxOptionsEnd(builder)
+        return argMaxOptions
 
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class Model(object):
+class ArgMinOptions(object):
     __slots__ = ['_tab']
 
     @classmethod
     def GetRootAs(cls, buf, offset=0):
         n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = Model()
+        x = ArgMinOptions()
         x.Init(buf, n + offset)
         return x
 
     @classmethod
-    def GetRootAsModel(cls, buf, offset=0):
+    def GetRootAsArgMinOptions(cls, buf, offset=0):
         """This method is deprecated. Please switch to GetRootAs."""
         return cls.GetRootAs(buf, offset)
     @classmethod
-    def ModelBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+    def ArgMinOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
         return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
 
-    # Model
+    # ArgMinOptions
     def Init(self, buf, pos):
         self._tab = flatbuffers.table.Table(buf, pos)
 
-    # Model
-    def Version(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
-        return 0
-
-    # Model
-    def OperatorCodes(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            x = self._tab.Vector(o)
-            x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
-            x = self._tab.Indirect(x)
-            obj = OperatorCode()
-            obj.Init(self._tab.Bytes, x)
-            return obj
-        return None
-
-    # Model
-    def OperatorCodesLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
-
-    # Model
-    def OperatorCodesIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        return o == 0
-
-    # Model
-    def Subgraphs(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            x = self._tab.Vector(o)
-            x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
-            x = self._tab.Indirect(x)
-            obj = SubGraph()
-            obj.Init(self._tab.Bytes, x)
-            return obj
-        return None
-
-    # Model
-    def SubgraphsLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
-
-    # Model
-    def SubgraphsIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        return o == 0
-
-    # Model
-    def Description(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
-        if o != 0:
-            return self._tab.String(o + self._tab.Pos)
-        return None
-
-    # Model
-    def Buffers(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
-        if o != 0:
-            x = self._tab.Vector(o)
-            x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
-            x = self._tab.Indirect(x)
-            obj = Buffer()
-            obj.Init(self._tab.Bytes, x)
-            return obj
-        return None
-
-    # Model
-    def BuffersLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
-
-    # Model
-    def BuffersIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
-        return o == 0
-
-    # Model
-    def MetadataBuffer(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
-        if o != 0:
-            a = self._tab.Vector(o)
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
-        return 0
-
-    # Model
-    def MetadataBufferAsNumpy(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
-        if o != 0:
-            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
-        return 0
-
-    # Model
-    def MetadataBufferLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
-
-    # Model
-    def MetadataBufferIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
-        return o == 0
-
-    # Model
-    def Metadata(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
-        if o != 0:
-            x = self._tab.Vector(o)
-            x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
-            x = self._tab.Indirect(x)
-            obj = Metadata()
-            obj.Init(self._tab.Bytes, x)
-            return obj
-        return None
-
-    # Model
-    def MetadataLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
-
-    # Model
-    def MetadataIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
-        return o == 0
-
-    # Model
-    def SignatureDefs(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
-        if o != 0:
-            x = self._tab.Vector(o)
-            x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
-            x = self._tab.Indirect(x)
-            obj = SignatureDef()
-            obj.Init(self._tab.Bytes, x)
-            return obj
-        return None
-
-    # Model
-    def SignatureDefsLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
-
-    # Model
-    def SignatureDefsIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
-        return o == 0
-
-def ModelStart(builder): builder.StartObject(8)
-def Start(builder):
-    return ModelStart(builder)
-def ModelAddVersion(builder, version): builder.PrependUint32Slot(0, version, 0)
-def AddVersion(builder, version):
-    return ModelAddVersion(builder, version)
-def ModelAddOperatorCodes(builder, operatorCodes): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(operatorCodes), 0)
-def AddOperatorCodes(builder, operatorCodes):
-    return ModelAddOperatorCodes(builder, operatorCodes)
-def ModelStartOperatorCodesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartOperatorCodesVector(builder, numElems):
-    return ModelStartOperatorCodesVector(builder, numElems)
-def ModelAddSubgraphs(builder, subgraphs): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(subgraphs), 0)
-def AddSubgraphs(builder, subgraphs):
-    return ModelAddSubgraphs(builder, subgraphs)
-def ModelStartSubgraphsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartSubgraphsVector(builder, numElems):
-    return ModelStartSubgraphsVector(builder, numElems)
-def ModelAddDescription(builder, description): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(description), 0)
-def AddDescription(builder, description):
-    return ModelAddDescription(builder, description)
-def ModelAddBuffers(builder, buffers): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(buffers), 0)
-def AddBuffers(builder, buffers):
-    return ModelAddBuffers(builder, buffers)
-def ModelStartBuffersVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartBuffersVector(builder, numElems):
-    return ModelStartBuffersVector(builder, numElems)
-def ModelAddMetadataBuffer(builder, metadataBuffer): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(metadataBuffer), 0)
-def AddMetadataBuffer(builder, metadataBuffer):
-    return ModelAddMetadataBuffer(builder, metadataBuffer)
-def ModelStartMetadataBufferVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartMetadataBufferVector(builder, numElems):
-    return ModelStartMetadataBufferVector(builder, numElems)
-def ModelAddMetadata(builder, metadata): builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(metadata), 0)
-def AddMetadata(builder, metadata):
-    return ModelAddMetadata(builder, metadata)
-def ModelStartMetadataVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartMetadataVector(builder, numElems):
-    return ModelStartMetadataVector(builder, numElems)
-def ModelAddSignatureDefs(builder, signatureDefs): builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(signatureDefs), 0)
-def AddSignatureDefs(builder, signatureDefs):
-    return ModelAddSignatureDefs(builder, signatureDefs)
-def ModelStartSignatureDefsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartSignatureDefsVector(builder, numElems):
-    return ModelStartSignatureDefsVector(builder, numElems)
-def ModelEnd(builder): return builder.EndObject()
-def End(builder):
-    return ModelEnd(builder)
-try:
-    from typing import List
-except:
-    pass
-
-class ModelT(object):
-
-    # ModelT
-    def __init__(self):
-        self.version = 0  # type: int
-        self.operatorCodes = None  # type: List[OperatorCodeT]
-        self.subgraphs = None  # type: List[SubGraphT]
-        self.description = None  # type: str
-        self.buffers = None  # type: List[BufferT]
-        self.metadataBuffer = None  # type: List[int]
-        self.metadata = None  # type: List[MetadataT]
-        self.signatureDefs = None  # type: List[SignatureDefT]
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        model = Model()
-        model.Init(buf, pos)
-        return cls.InitFromObj(model)
-
-    @classmethod
-    def InitFromObj(cls, model):
-        x = ModelT()
-        x._UnPack(model)
-        return x
-
-    # ModelT
-    def _UnPack(self, model):
-        if model is None:
-            return
-        self.version = model.Version()
-        if not model.OperatorCodesIsNone():
-            self.operatorCodes = []
-            for i in range(model.OperatorCodesLength()):
-                if model.OperatorCodes(i) is None:
-                    self.operatorCodes.append(None)
-                else:
-                    operatorCode_ = OperatorCodeT.InitFromObj(model.OperatorCodes(i))
-                    self.operatorCodes.append(operatorCode_)
-        if not model.SubgraphsIsNone():
-            self.subgraphs = []
-            for i in range(model.SubgraphsLength()):
-                if model.Subgraphs(i) is None:
-                    self.subgraphs.append(None)
-                else:
-                    subGraph_ = SubGraphT.InitFromObj(model.Subgraphs(i))
-                    self.subgraphs.append(subGraph_)
-        self.description = model.Description()
-        if not model.BuffersIsNone():
-            self.buffers = []
-            for i in range(model.BuffersLength()):
-                if model.Buffers(i) is None:
-                    self.buffers.append(None)
-                else:
-                    buffer_ = BufferT.InitFromObj(model.Buffers(i))
-                    self.buffers.append(buffer_)
-        if not model.MetadataBufferIsNone():
-            if np is None:
-                self.metadataBuffer = []
-                for i in range(model.MetadataBufferLength()):
-                    self.metadataBuffer.append(model.MetadataBuffer(i))
-            else:
-                self.metadataBuffer = model.MetadataBufferAsNumpy()
-        if not model.MetadataIsNone():
-            self.metadata = []
-            for i in range(model.MetadataLength()):
-                if model.Metadata(i) is None:
-                    self.metadata.append(None)
-                else:
-                    metadata_ = MetadataT.InitFromObj(model.Metadata(i))
-                    self.metadata.append(metadata_)
-        if not model.SignatureDefsIsNone():
-            self.signatureDefs = []
-            for i in range(model.SignatureDefsLength()):
-                if model.SignatureDefs(i) is None:
-                    self.signatureDefs.append(None)
-                else:
-                    signatureDef_ = SignatureDefT.InitFromObj(model.SignatureDefs(i))
-                    self.signatureDefs.append(signatureDef_)
-
-    # ModelT
-    def Pack(self, builder):
-        if self.operatorCodes is not None:
-            operatorCodeslist = []
-            for i in range(len(self.operatorCodes)):
-                operatorCodeslist.append(self.operatorCodes[i].Pack(builder))
-            ModelStartOperatorCodesVector(builder, len(self.operatorCodes))
-            for i in reversed(range(len(self.operatorCodes))):
-                builder.PrependUOffsetTRelative(operatorCodeslist[i])
-            operatorCodes = builder.EndVector()
-        if self.subgraphs is not None:
-            subgraphslist = []
-            for i in range(len(self.subgraphs)):
-                subgraphslist.append(self.subgraphs[i].Pack(builder))
-            ModelStartSubgraphsVector(builder, len(self.subgraphs))
-            for i in reversed(range(len(self.subgraphs))):
-                builder.PrependUOffsetTRelative(subgraphslist[i])
-            subgraphs = builder.EndVector()
-        if self.description is not None:
-            description = builder.CreateString(self.description)
-        if self.buffers is not None:
-            bufferslist = []
-            for i in range(len(self.buffers)):
-                bufferslist.append(self.buffers[i].Pack(builder))
-            ModelStartBuffersVector(builder, len(self.buffers))
-            for i in reversed(range(len(self.buffers))):
-                builder.PrependUOffsetTRelative(bufferslist[i])
-            buffers = builder.EndVector()
-        if self.metadataBuffer is not None:
-            if np is not None and type(self.metadataBuffer) is np.ndarray:
-                metadataBuffer = builder.CreateNumpyVector(self.metadataBuffer)
-            else:
-                ModelStartMetadataBufferVector(builder, len(self.metadataBuffer))
-                for i in reversed(range(len(self.metadataBuffer))):
-                    builder.PrependInt32(self.metadataBuffer[i])
-                metadataBuffer = builder.EndVector()
-        if self.metadata is not None:
-            metadatalist = []
-            for i in range(len(self.metadata)):
-                metadatalist.append(self.metadata[i].Pack(builder))
-            ModelStartMetadataVector(builder, len(self.metadata))
-            for i in reversed(range(len(self.metadata))):
-                builder.PrependUOffsetTRelative(metadatalist[i])
-            metadata = builder.EndVector()
-        if self.signatureDefs is not None:
-            signatureDefslist = []
-            for i in range(len(self.signatureDefs)):
-                signatureDefslist.append(self.signatureDefs[i].Pack(builder))
-            ModelStartSignatureDefsVector(builder, len(self.signatureDefs))
-            for i in reversed(range(len(self.signatureDefs))):
-                builder.PrependUOffsetTRelative(signatureDefslist[i])
-            signatureDefs = builder.EndVector()
-        ModelStart(builder)
-        ModelAddVersion(builder, self.version)
-        if self.operatorCodes is not None:
-            ModelAddOperatorCodes(builder, operatorCodes)
-        if self.subgraphs is not None:
-            ModelAddSubgraphs(builder, subgraphs)
-        if self.description is not None:
-            ModelAddDescription(builder, description)
-        if self.buffers is not None:
-            ModelAddBuffers(builder, buffers)
-        if self.metadataBuffer is not None:
-            ModelAddMetadataBuffer(builder, metadataBuffer)
-        if self.metadata is not None:
-            ModelAddMetadata(builder, metadata)
-        if self.signatureDefs is not None:
-            ModelAddSignatureDefs(builder, signatureDefs)
-        model = ModelEnd(builder)
-        return model
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class MulOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = MulOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsMulOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def MulOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # MulOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # MulOptions
-    def FusedActivationFunction(self):
+    # ArgMinOptions
+    def OutputType(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
         if o != 0:
             return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
         return 0
 
-def MulOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return MulOptionsStart(builder)
-def MulOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0)
-def AddFusedActivationFunction(builder, fusedActivationFunction):
-    return MulOptionsAddFusedActivationFunction(builder, fusedActivationFunction)
-def MulOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return MulOptionsEnd(builder)
+def ArgMinOptionsStart(builder):
+    builder.StartObject(1)
 
-class MulOptionsT(object):
+def ArgMinOptionsAddOutputType(builder, outputType):
+    builder.PrependInt8Slot(0, outputType, 0)
 
-    # MulOptionsT
+def ArgMinOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class ArgMinOptionsT(object):
+
+    # ArgMinOptionsT
     def __init__(self):
-        self.fusedActivationFunction = 0  # type: int
+        self.outputType = 0  # type: int
 
     @classmethod
     def InitFromBuf(cls, buf, pos):
-        mulOptions = MulOptions()
-        mulOptions.Init(buf, pos)
-        return cls.InitFromObj(mulOptions)
+        argMinOptions = ArgMinOptions()
+        argMinOptions.Init(buf, pos)
+        return cls.InitFromObj(argMinOptions)
 
     @classmethod
-    def InitFromObj(cls, mulOptions):
-        x = MulOptionsT()
-        x._UnPack(mulOptions)
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, argMinOptions):
+        x = ArgMinOptionsT()
+        x._UnPack(argMinOptions)
         return x
 
-    # MulOptionsT
-    def _UnPack(self, mulOptions):
-        if mulOptions is None:
+    # ArgMinOptionsT
+    def _UnPack(self, argMinOptions):
+        if argMinOptions is None:
             return
-        self.fusedActivationFunction = mulOptions.FusedActivationFunction()
+        self.outputType = argMinOptions.OutputType()
 
-    # MulOptionsT
+    # ArgMinOptionsT
     def Pack(self, builder):
-        MulOptionsStart(builder)
-        MulOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
-        mulOptions = MulOptionsEnd(builder)
-        return mulOptions
-# automatically generated by the FlatBuffers compiler, do not modify
+        ArgMinOptionsStart(builder)
+        ArgMinOptionsAddOutputType(builder, self.outputType)
+        argMinOptions = ArgMinOptionsEnd(builder)
+        return argMinOptions
 
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
+class GreaterOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = GreaterOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsGreaterOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def GreaterOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # GreaterOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def GreaterOptionsStart(builder):
+    builder.StartObject(0)
+
+def GreaterOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class GreaterOptionsT(object):
+
+    # GreaterOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        greaterOptions = GreaterOptions()
+        greaterOptions.Init(buf, pos)
+        return cls.InitFromObj(greaterOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, greaterOptions):
+        x = GreaterOptionsT()
+        x._UnPack(greaterOptions)
+        return x
+
+    # GreaterOptionsT
+    def _UnPack(self, greaterOptions):
+        if greaterOptions is None:
+            return
+
+    # GreaterOptionsT
+    def Pack(self, builder):
+        GreaterOptionsStart(builder)
+        greaterOptions = GreaterOptionsEnd(builder)
+        return greaterOptions
+
+
+class GreaterEqualOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = GreaterEqualOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsGreaterEqualOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def GreaterEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # GreaterEqualOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def GreaterEqualOptionsStart(builder):
+    builder.StartObject(0)
+
+def GreaterEqualOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class GreaterEqualOptionsT(object):
+
+    # GreaterEqualOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        greaterEqualOptions = GreaterEqualOptions()
+        greaterEqualOptions.Init(buf, pos)
+        return cls.InitFromObj(greaterEqualOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, greaterEqualOptions):
+        x = GreaterEqualOptionsT()
+        x._UnPack(greaterEqualOptions)
+        return x
+
+    # GreaterEqualOptionsT
+    def _UnPack(self, greaterEqualOptions):
+        if greaterEqualOptions is None:
+            return
+
+    # GreaterEqualOptionsT
+    def Pack(self, builder):
+        GreaterEqualOptionsStart(builder)
+        greaterEqualOptions = GreaterEqualOptionsEnd(builder)
+        return greaterEqualOptions
+
+
+class LessOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = LessOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsLessOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def LessOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # LessOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def LessOptionsStart(builder):
+    builder.StartObject(0)
+
+def LessOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class LessOptionsT(object):
+
+    # LessOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        lessOptions = LessOptions()
+        lessOptions.Init(buf, pos)
+        return cls.InitFromObj(lessOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, lessOptions):
+        x = LessOptionsT()
+        x._UnPack(lessOptions)
+        return x
+
+    # LessOptionsT
+    def _UnPack(self, lessOptions):
+        if lessOptions is None:
+            return
+
+    # LessOptionsT
+    def Pack(self, builder):
+        LessOptionsStart(builder)
+        lessOptions = LessOptionsEnd(builder)
+        return lessOptions
+
+
+class LessEqualOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = LessEqualOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsLessEqualOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def LessEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # LessEqualOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def LessEqualOptionsStart(builder):
+    builder.StartObject(0)
+
+def LessEqualOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class LessEqualOptionsT(object):
+
+    # LessEqualOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        lessEqualOptions = LessEqualOptions()
+        lessEqualOptions.Init(buf, pos)
+        return cls.InitFromObj(lessEqualOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, lessEqualOptions):
+        x = LessEqualOptionsT()
+        x._UnPack(lessEqualOptions)
+        return x
+
+    # LessEqualOptionsT
+    def _UnPack(self, lessEqualOptions):
+        if lessEqualOptions is None:
+            return
+
+    # LessEqualOptionsT
+    def Pack(self, builder):
+        LessEqualOptionsStart(builder)
+        lessEqualOptions = LessEqualOptionsEnd(builder)
+        return lessEqualOptions
+
 
 class NegOptions(object):
     __slots__ = ['_tab']
@@ -7061,12 +11123,13 @@
     def Init(self, buf, pos):
         self._tab = flatbuffers.table.Table(buf, pos)
 
-def NegOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return NegOptionsStart(builder)
-def NegOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return NegOptionsEnd(builder)
+def NegOptionsStart(builder):
+    builder.StartObject(0)
+
+def NegOptionsEnd(builder):
+    return builder.EndObject()
+
+
 
 class NegOptionsT(object):
 
@@ -7081,6 +11144,11 @@
         return cls.InitFromObj(negOptions)
 
     @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
     def InitFromObj(cls, negOptions):
         x = NegOptionsT()
         x._UnPack(negOptions)
@@ -7096,140 +11164,473 @@
         NegOptionsStart(builder)
         negOptions = NegOptionsEnd(builder)
         return negOptions
-# automatically generated by the FlatBuffers compiler, do not modify
 
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class NonMaxSuppressionV4Options(object):
+class SelectOptions(object):
     __slots__ = ['_tab']
 
     @classmethod
     def GetRootAs(cls, buf, offset=0):
         n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = NonMaxSuppressionV4Options()
+        x = SelectOptions()
         x.Init(buf, n + offset)
         return x
 
     @classmethod
-    def GetRootAsNonMaxSuppressionV4Options(cls, buf, offset=0):
+    def GetRootAsSelectOptions(cls, buf, offset=0):
         """This method is deprecated. Please switch to GetRootAs."""
         return cls.GetRootAs(buf, offset)
     @classmethod
-    def NonMaxSuppressionV4OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+    def SelectOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
         return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
 
-    # NonMaxSuppressionV4Options
+    # SelectOptions
     def Init(self, buf, pos):
         self._tab = flatbuffers.table.Table(buf, pos)
 
-def NonMaxSuppressionV4OptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return NonMaxSuppressionV4OptionsStart(builder)
-def NonMaxSuppressionV4OptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return NonMaxSuppressionV4OptionsEnd(builder)
+def SelectOptionsStart(builder):
+    builder.StartObject(0)
 
-class NonMaxSuppressionV4OptionsT(object):
+def SelectOptionsEnd(builder):
+    return builder.EndObject()
 
-    # NonMaxSuppressionV4OptionsT
+
+
+class SelectOptionsT(object):
+
+    # SelectOptionsT
     def __init__(self):
         pass
 
     @classmethod
     def InitFromBuf(cls, buf, pos):
-        nonMaxSuppressionV4options = NonMaxSuppressionV4Options()
-        nonMaxSuppressionV4options.Init(buf, pos)
-        return cls.InitFromObj(nonMaxSuppressionV4options)
+        selectOptions = SelectOptions()
+        selectOptions.Init(buf, pos)
+        return cls.InitFromObj(selectOptions)
 
     @classmethod
-    def InitFromObj(cls, nonMaxSuppressionV4options):
-        x = NonMaxSuppressionV4OptionsT()
-        x._UnPack(nonMaxSuppressionV4options)
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, selectOptions):
+        x = SelectOptionsT()
+        x._UnPack(selectOptions)
         return x
 
-    # NonMaxSuppressionV4OptionsT
-    def _UnPack(self, nonMaxSuppressionV4options):
-        if nonMaxSuppressionV4options is None:
+    # SelectOptionsT
+    def _UnPack(self, selectOptions):
+        if selectOptions is None:
             return
 
-    # NonMaxSuppressionV4OptionsT
+    # SelectOptionsT
     def Pack(self, builder):
-        NonMaxSuppressionV4OptionsStart(builder)
-        nonMaxSuppressionV4options = NonMaxSuppressionV4OptionsEnd(builder)
-        return nonMaxSuppressionV4options
-# automatically generated by the FlatBuffers compiler, do not modify
+        SelectOptionsStart(builder)
+        selectOptions = SelectOptionsEnd(builder)
+        return selectOptions
 
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class NonMaxSuppressionV5Options(object):
+class SliceOptions(object):
     __slots__ = ['_tab']
 
     @classmethod
     def GetRootAs(cls, buf, offset=0):
         n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = NonMaxSuppressionV5Options()
+        x = SliceOptions()
         x.Init(buf, n + offset)
         return x
 
     @classmethod
-    def GetRootAsNonMaxSuppressionV5Options(cls, buf, offset=0):
+    def GetRootAsSliceOptions(cls, buf, offset=0):
         """This method is deprecated. Please switch to GetRootAs."""
         return cls.GetRootAs(buf, offset)
     @classmethod
-    def NonMaxSuppressionV5OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+    def SliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
         return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
 
-    # NonMaxSuppressionV5Options
+    # SliceOptions
     def Init(self, buf, pos):
         self._tab = flatbuffers.table.Table(buf, pos)
 
-def NonMaxSuppressionV5OptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return NonMaxSuppressionV5OptionsStart(builder)
-def NonMaxSuppressionV5OptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return NonMaxSuppressionV5OptionsEnd(builder)
+def SliceOptionsStart(builder):
+    builder.StartObject(0)
 
-class NonMaxSuppressionV5OptionsT(object):
+def SliceOptionsEnd(builder):
+    return builder.EndObject()
 
-    # NonMaxSuppressionV5OptionsT
+
+
+class SliceOptionsT(object):
+
+    # SliceOptionsT
     def __init__(self):
         pass
 
     @classmethod
     def InitFromBuf(cls, buf, pos):
-        nonMaxSuppressionV5options = NonMaxSuppressionV5Options()
-        nonMaxSuppressionV5options.Init(buf, pos)
-        return cls.InitFromObj(nonMaxSuppressionV5options)
+        sliceOptions = SliceOptions()
+        sliceOptions.Init(buf, pos)
+        return cls.InitFromObj(sliceOptions)
 
     @classmethod
-    def InitFromObj(cls, nonMaxSuppressionV5options):
-        x = NonMaxSuppressionV5OptionsT()
-        x._UnPack(nonMaxSuppressionV5options)
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, sliceOptions):
+        x = SliceOptionsT()
+        x._UnPack(sliceOptions)
         return x
 
-    # NonMaxSuppressionV5OptionsT
-    def _UnPack(self, nonMaxSuppressionV5options):
-        if nonMaxSuppressionV5options is None:
+    # SliceOptionsT
+    def _UnPack(self, sliceOptions):
+        if sliceOptions is None:
             return
 
-    # NonMaxSuppressionV5OptionsT
+    # SliceOptionsT
     def Pack(self, builder):
-        NonMaxSuppressionV5OptionsStart(builder)
-        nonMaxSuppressionV5options = NonMaxSuppressionV5OptionsEnd(builder)
-        return nonMaxSuppressionV5options
-# automatically generated by the FlatBuffers compiler, do not modify
+        SliceOptionsStart(builder)
+        sliceOptions = SliceOptionsEnd(builder)
+        return sliceOptions
 
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
+class TransposeConvOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = TransposeConvOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsTransposeConvOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def TransposeConvOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # TransposeConvOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # TransposeConvOptions
+    def Padding(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+    # TransposeConvOptions
+    def StrideW(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # TransposeConvOptions
+    def StrideH(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # TransposeConvOptions
+    def FusedActivationFunction(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+    # TransposeConvOptions
+    def QuantizedBiasType(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+def TransposeConvOptionsStart(builder):
+    builder.StartObject(5)
+
+def TransposeConvOptionsAddPadding(builder, padding):
+    builder.PrependInt8Slot(0, padding, 0)
+
+def TransposeConvOptionsAddStrideW(builder, strideW):
+    builder.PrependInt32Slot(1, strideW, 0)
+
+def TransposeConvOptionsAddStrideH(builder, strideH):
+    builder.PrependInt32Slot(2, strideH, 0)
+
+def TransposeConvOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+    builder.PrependInt8Slot(3, fusedActivationFunction, 0)
+
+def TransposeConvOptionsAddQuantizedBiasType(builder, quantizedBiasType):
+    builder.PrependInt8Slot(4, quantizedBiasType, 0)
+
+def TransposeConvOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class TransposeConvOptionsT(object):
+
+    # TransposeConvOptionsT
+    def __init__(self):
+        self.padding = 0  # type: int
+        self.strideW = 0  # type: int
+        self.strideH = 0  # type: int
+        self.fusedActivationFunction = 0  # type: int
+        self.quantizedBiasType = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        transposeConvOptions = TransposeConvOptions()
+        transposeConvOptions.Init(buf, pos)
+        return cls.InitFromObj(transposeConvOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, transposeConvOptions):
+        x = TransposeConvOptionsT()
+        x._UnPack(transposeConvOptions)
+        return x
+
+    # TransposeConvOptionsT
+    def _UnPack(self, transposeConvOptions):
+        if transposeConvOptions is None:
+            return
+        self.padding = transposeConvOptions.Padding()
+        self.strideW = transposeConvOptions.StrideW()
+        self.strideH = transposeConvOptions.StrideH()
+        self.fusedActivationFunction = transposeConvOptions.FusedActivationFunction()
+        self.quantizedBiasType = transposeConvOptions.QuantizedBiasType()
+
+    # TransposeConvOptionsT
+    def Pack(self, builder):
+        TransposeConvOptionsStart(builder)
+        TransposeConvOptionsAddPadding(builder, self.padding)
+        TransposeConvOptionsAddStrideW(builder, self.strideW)
+        TransposeConvOptionsAddStrideH(builder, self.strideH)
+        TransposeConvOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
+        TransposeConvOptionsAddQuantizedBiasType(builder, self.quantizedBiasType)
+        transposeConvOptions = TransposeConvOptionsEnd(builder)
+        return transposeConvOptions
+
+
+class ExpandDimsOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = ExpandDimsOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsExpandDimsOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def ExpandDimsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # ExpandDimsOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def ExpandDimsOptionsStart(builder):
+    builder.StartObject(0)
+
+def ExpandDimsOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class ExpandDimsOptionsT(object):
+
+    # ExpandDimsOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        expandDimsOptions = ExpandDimsOptions()
+        expandDimsOptions.Init(buf, pos)
+        return cls.InitFromObj(expandDimsOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, expandDimsOptions):
+        x = ExpandDimsOptionsT()
+        x._UnPack(expandDimsOptions)
+        return x
+
+    # ExpandDimsOptionsT
+    def _UnPack(self, expandDimsOptions):
+        if expandDimsOptions is None:
+            return
+
+    # ExpandDimsOptionsT
+    def Pack(self, builder):
+        ExpandDimsOptionsStart(builder)
+        expandDimsOptions = ExpandDimsOptionsEnd(builder)
+        return expandDimsOptions
+
+
+class SparseToDenseOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = SparseToDenseOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsSparseToDenseOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def SparseToDenseOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # SparseToDenseOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # SparseToDenseOptions
+    def ValidateIndices(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+def SparseToDenseOptionsStart(builder):
+    builder.StartObject(1)
+
+def SparseToDenseOptionsAddValidateIndices(builder, validateIndices):
+    builder.PrependBoolSlot(0, validateIndices, 0)
+
+def SparseToDenseOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class SparseToDenseOptionsT(object):
+
+    # SparseToDenseOptionsT
+    def __init__(self):
+        self.validateIndices = False  # type: bool
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        sparseToDenseOptions = SparseToDenseOptions()
+        sparseToDenseOptions.Init(buf, pos)
+        return cls.InitFromObj(sparseToDenseOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, sparseToDenseOptions):
+        x = SparseToDenseOptionsT()
+        x._UnPack(sparseToDenseOptions)
+        return x
+
+    # SparseToDenseOptionsT
+    def _UnPack(self, sparseToDenseOptions):
+        if sparseToDenseOptions is None:
+            return
+        self.validateIndices = sparseToDenseOptions.ValidateIndices()
+
+    # SparseToDenseOptionsT
+    def Pack(self, builder):
+        SparseToDenseOptionsStart(builder)
+        SparseToDenseOptionsAddValidateIndices(builder, self.validateIndices)
+        sparseToDenseOptions = SparseToDenseOptionsEnd(builder)
+        return sparseToDenseOptions
+
+
+class EqualOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = EqualOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsEqualOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def EqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # EqualOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def EqualOptionsStart(builder):
+    builder.StartObject(0)
+
+def EqualOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class EqualOptionsT(object):
+
+    # EqualOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        equalOptions = EqualOptions()
+        equalOptions.Init(buf, pos)
+        return cls.InitFromObj(equalOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, equalOptions):
+        x = EqualOptionsT()
+        x._UnPack(equalOptions)
+        return x
+
+    # EqualOptionsT
+    def _UnPack(self, equalOptions):
+        if equalOptions is None:
+            return
+
+    # EqualOptionsT
+    def Pack(self, builder):
+        EqualOptionsStart(builder)
+        equalOptions = EqualOptionsEnd(builder)
+        return equalOptions
+
 
 class NotEqualOptions(object):
     __slots__ = ['_tab']
@@ -7253,12 +11654,13 @@
     def Init(self, buf, pos):
         self._tab = flatbuffers.table.Table(buf, pos)
 
-def NotEqualOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return NotEqualOptionsStart(builder)
-def NotEqualOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return NotEqualOptionsEnd(builder)
+def NotEqualOptionsStart(builder):
+    builder.StartObject(0)
+
+def NotEqualOptionsEnd(builder):
+    return builder.EndObject()
+
+
 
 class NotEqualOptionsT(object):
 
@@ -7273,6 +11675,11 @@
         return cls.InitFromObj(notEqualOptions)
 
     @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
     def InitFromObj(cls, notEqualOptions):
         x = NotEqualOptionsT()
         x._UnPack(notEqualOptions)
@@ -7288,12 +11695,485 @@
         NotEqualOptionsStart(builder)
         notEqualOptions = NotEqualOptionsEnd(builder)
         return notEqualOptions
-# automatically generated by the FlatBuffers compiler, do not modify
 
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
+class ShapeOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = ShapeOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsShapeOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def ShapeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # ShapeOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # ShapeOptions
+    def OutType(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+def ShapeOptionsStart(builder):
+    builder.StartObject(1)
+
+def ShapeOptionsAddOutType(builder, outType):
+    builder.PrependInt8Slot(0, outType, 0)
+
+def ShapeOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class ShapeOptionsT(object):
+
+    # ShapeOptionsT
+    def __init__(self):
+        self.outType = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        shapeOptions = ShapeOptions()
+        shapeOptions.Init(buf, pos)
+        return cls.InitFromObj(shapeOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, shapeOptions):
+        x = ShapeOptionsT()
+        x._UnPack(shapeOptions)
+        return x
+
+    # ShapeOptionsT
+    def _UnPack(self, shapeOptions):
+        if shapeOptions is None:
+            return
+        self.outType = shapeOptions.OutType()
+
+    # ShapeOptionsT
+    def Pack(self, builder):
+        ShapeOptionsStart(builder)
+        ShapeOptionsAddOutType(builder, self.outType)
+        shapeOptions = ShapeOptionsEnd(builder)
+        return shapeOptions
+
+
+class RankOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = RankOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsRankOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def RankOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # RankOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def RankOptionsStart(builder):
+    builder.StartObject(0)
+
+def RankOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class RankOptionsT(object):
+
+    # RankOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        rankOptions = RankOptions()
+        rankOptions.Init(buf, pos)
+        return cls.InitFromObj(rankOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, rankOptions):
+        x = RankOptionsT()
+        x._UnPack(rankOptions)
+        return x
+
+    # RankOptionsT
+    def _UnPack(self, rankOptions):
+        if rankOptions is None:
+            return
+
+    # RankOptionsT
+    def Pack(self, builder):
+        RankOptionsStart(builder)
+        rankOptions = RankOptionsEnd(builder)
+        return rankOptions
+
+
+class PowOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = PowOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsPowOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def PowOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # PowOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def PowOptionsStart(builder):
+    builder.StartObject(0)
+
+def PowOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class PowOptionsT(object):
+
+    # PowOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        powOptions = PowOptions()
+        powOptions.Init(buf, pos)
+        return cls.InitFromObj(powOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, powOptions):
+        x = PowOptionsT()
+        x._UnPack(powOptions)
+        return x
+
+    # PowOptionsT
+    def _UnPack(self, powOptions):
+        if powOptions is None:
+            return
+
+    # PowOptionsT
+    def Pack(self, builder):
+        PowOptionsStart(builder)
+        powOptions = PowOptionsEnd(builder)
+        return powOptions
+
+
+class FakeQuantOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = FakeQuantOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsFakeQuantOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def FakeQuantOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # FakeQuantOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # FakeQuantOptions
+    def Min(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
+        return 0.0
+
+    # FakeQuantOptions
+    def Max(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
+        return 0.0
+
+    # FakeQuantOptions
+    def NumBits(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # FakeQuantOptions
+    def NarrowRange(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+def FakeQuantOptionsStart(builder):
+    builder.StartObject(4)
+
+def FakeQuantOptionsAddMin(builder, min):
+    builder.PrependFloat32Slot(0, min, 0.0)
+
+def FakeQuantOptionsAddMax(builder, max):
+    builder.PrependFloat32Slot(1, max, 0.0)
+
+def FakeQuantOptionsAddNumBits(builder, numBits):
+    builder.PrependInt32Slot(2, numBits, 0)
+
+def FakeQuantOptionsAddNarrowRange(builder, narrowRange):
+    builder.PrependBoolSlot(3, narrowRange, 0)
+
+def FakeQuantOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class FakeQuantOptionsT(object):
+
+    # FakeQuantOptionsT
+    def __init__(self):
+        self.min = 0.0  # type: float
+        self.max = 0.0  # type: float
+        self.numBits = 0  # type: int
+        self.narrowRange = False  # type: bool
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        fakeQuantOptions = FakeQuantOptions()
+        fakeQuantOptions.Init(buf, pos)
+        return cls.InitFromObj(fakeQuantOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, fakeQuantOptions):
+        x = FakeQuantOptionsT()
+        x._UnPack(fakeQuantOptions)
+        return x
+
+    # FakeQuantOptionsT
+    def _UnPack(self, fakeQuantOptions):
+        if fakeQuantOptions is None:
+            return
+        self.min = fakeQuantOptions.Min()
+        self.max = fakeQuantOptions.Max()
+        self.numBits = fakeQuantOptions.NumBits()
+        self.narrowRange = fakeQuantOptions.NarrowRange()
+
+    # FakeQuantOptionsT
+    def Pack(self, builder):
+        FakeQuantOptionsStart(builder)
+        FakeQuantOptionsAddMin(builder, self.min)
+        FakeQuantOptionsAddMax(builder, self.max)
+        FakeQuantOptionsAddNumBits(builder, self.numBits)
+        FakeQuantOptionsAddNarrowRange(builder, self.narrowRange)
+        fakeQuantOptions = FakeQuantOptionsEnd(builder)
+        return fakeQuantOptions
+
+
+class PackOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = PackOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsPackOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def PackOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # PackOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # PackOptions
+    def ValuesCount(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # PackOptions
+    def Axis(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+def PackOptionsStart(builder):
+    builder.StartObject(2)
+
+def PackOptionsAddValuesCount(builder, valuesCount):
+    builder.PrependInt32Slot(0, valuesCount, 0)
+
+def PackOptionsAddAxis(builder, axis):
+    builder.PrependInt32Slot(1, axis, 0)
+
+def PackOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class PackOptionsT(object):
+
+    # PackOptionsT
+    def __init__(self):
+        self.valuesCount = 0  # type: int
+        self.axis = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        packOptions = PackOptions()
+        packOptions.Init(buf, pos)
+        return cls.InitFromObj(packOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, packOptions):
+        x = PackOptionsT()
+        x._UnPack(packOptions)
+        return x
+
+    # PackOptionsT
+    def _UnPack(self, packOptions):
+        if packOptions is None:
+            return
+        self.valuesCount = packOptions.ValuesCount()
+        self.axis = packOptions.Axis()
+
+    # PackOptionsT
+    def Pack(self, builder):
+        PackOptionsStart(builder)
+        PackOptionsAddValuesCount(builder, self.valuesCount)
+        PackOptionsAddAxis(builder, self.axis)
+        packOptions = PackOptionsEnd(builder)
+        return packOptions
+
+
+class LogicalOrOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = LogicalOrOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsLogicalOrOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def LogicalOrOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # LogicalOrOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def LogicalOrOptionsStart(builder):
+    builder.StartObject(0)
+
+def LogicalOrOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class LogicalOrOptionsT(object):
+
+    # LogicalOrOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        logicalOrOptions = LogicalOrOptions()
+        logicalOrOptions.Init(buf, pos)
+        return cls.InitFromObj(logicalOrOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, logicalOrOptions):
+        x = LogicalOrOptionsT()
+        x._UnPack(logicalOrOptions)
+        return x
+
+    # LogicalOrOptionsT
+    def _UnPack(self, logicalOrOptions):
+        if logicalOrOptions is None:
+            return
+
+    # LogicalOrOptionsT
+    def Pack(self, builder):
+        LogicalOrOptionsStart(builder)
+        logicalOrOptions = LogicalOrOptionsEnd(builder)
+        return logicalOrOptions
+
 
 class OneHotOptions(object):
     __slots__ = ['_tab']
@@ -7324,15 +12204,16 @@
             return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
         return 0
 
-def OneHotOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return OneHotOptionsStart(builder)
-def OneHotOptionsAddAxis(builder, axis): builder.PrependInt32Slot(0, axis, 0)
-def AddAxis(builder, axis):
-    return OneHotOptionsAddAxis(builder, axis)
-def OneHotOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return OneHotOptionsEnd(builder)
+def OneHotOptionsStart(builder):
+    builder.StartObject(1)
+
+def OneHotOptionsAddAxis(builder, axis):
+    builder.PrependInt32Slot(0, axis, 0)
+
+def OneHotOptionsEnd(builder):
+    return builder.EndObject()
+
+
 
 class OneHotOptionsT(object):
 
@@ -7347,6 +12228,11 @@
         return cls.InitFromObj(oneHotOptions)
 
     @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
     def InitFromObj(cls, oneHotOptions):
         x = OneHotOptionsT()
         x._UnPack(oneHotOptions)
@@ -7364,12 +12250,4453 @@
         OneHotOptionsAddAxis(builder, self.axis)
         oneHotOptions = OneHotOptionsEnd(builder)
         return oneHotOptions
-# automatically generated by the FlatBuffers compiler, do not modify
 
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
+class AbsOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = AbsOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsAbsOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def AbsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # AbsOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def AbsOptionsStart(builder):
+    builder.StartObject(0)
+
+def AbsOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class AbsOptionsT(object):
+
+    # AbsOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        absOptions = AbsOptions()
+        absOptions.Init(buf, pos)
+        return cls.InitFromObj(absOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, absOptions):
+        x = AbsOptionsT()
+        x._UnPack(absOptions)
+        return x
+
+    # AbsOptionsT
+    def _UnPack(self, absOptions):
+        if absOptions is None:
+            return
+
+    # AbsOptionsT
+    def Pack(self, builder):
+        AbsOptionsStart(builder)
+        absOptions = AbsOptionsEnd(builder)
+        return absOptions
+
+
+class HardSwishOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = HardSwishOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsHardSwishOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def HardSwishOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # HardSwishOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def HardSwishOptionsStart(builder):
+    builder.StartObject(0)
+
+def HardSwishOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class HardSwishOptionsT(object):
+
+    # HardSwishOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        hardSwishOptions = HardSwishOptions()
+        hardSwishOptions.Init(buf, pos)
+        return cls.InitFromObj(hardSwishOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, hardSwishOptions):
+        x = HardSwishOptionsT()
+        x._UnPack(hardSwishOptions)
+        return x
+
+    # HardSwishOptionsT
+    def _UnPack(self, hardSwishOptions):
+        if hardSwishOptions is None:
+            return
+
+    # HardSwishOptionsT
+    def Pack(self, builder):
+        HardSwishOptionsStart(builder)
+        hardSwishOptions = HardSwishOptionsEnd(builder)
+        return hardSwishOptions
+
+
+class LogicalAndOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = LogicalAndOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsLogicalAndOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def LogicalAndOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # LogicalAndOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def LogicalAndOptionsStart(builder):
+    builder.StartObject(0)
+
+def LogicalAndOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class LogicalAndOptionsT(object):
+
+    # LogicalAndOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        logicalAndOptions = LogicalAndOptions()
+        logicalAndOptions.Init(buf, pos)
+        return cls.InitFromObj(logicalAndOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, logicalAndOptions):
+        x = LogicalAndOptionsT()
+        x._UnPack(logicalAndOptions)
+        return x
+
+    # LogicalAndOptionsT
+    def _UnPack(self, logicalAndOptions):
+        if logicalAndOptions is None:
+            return
+
+    # LogicalAndOptionsT
+    def Pack(self, builder):
+        LogicalAndOptionsStart(builder)
+        logicalAndOptions = LogicalAndOptionsEnd(builder)
+        return logicalAndOptions
+
+
+class LogicalNotOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = LogicalNotOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsLogicalNotOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def LogicalNotOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # LogicalNotOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def LogicalNotOptionsStart(builder):
+    builder.StartObject(0)
+
+def LogicalNotOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class LogicalNotOptionsT(object):
+
+    # LogicalNotOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        logicalNotOptions = LogicalNotOptions()
+        logicalNotOptions.Init(buf, pos)
+        return cls.InitFromObj(logicalNotOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, logicalNotOptions):
+        x = LogicalNotOptionsT()
+        x._UnPack(logicalNotOptions)
+        return x
+
+    # LogicalNotOptionsT
+    def _UnPack(self, logicalNotOptions):
+        if logicalNotOptions is None:
+            return
+
+    # LogicalNotOptionsT
+    def Pack(self, builder):
+        LogicalNotOptionsStart(builder)
+        logicalNotOptions = LogicalNotOptionsEnd(builder)
+        return logicalNotOptions
+
+
+class UnpackOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = UnpackOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsUnpackOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def UnpackOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # UnpackOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # UnpackOptions
+    def Num(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # UnpackOptions
+    def Axis(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+def UnpackOptionsStart(builder):
+    builder.StartObject(2)
+
+def UnpackOptionsAddNum(builder, num):
+    builder.PrependInt32Slot(0, num, 0)
+
+def UnpackOptionsAddAxis(builder, axis):
+    builder.PrependInt32Slot(1, axis, 0)
+
+def UnpackOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class UnpackOptionsT(object):
+
+    # UnpackOptionsT
+    def __init__(self):
+        self.num = 0  # type: int
+        self.axis = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        unpackOptions = UnpackOptions()
+        unpackOptions.Init(buf, pos)
+        return cls.InitFromObj(unpackOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, unpackOptions):
+        x = UnpackOptionsT()
+        x._UnPack(unpackOptions)
+        return x
+
+    # UnpackOptionsT
+    def _UnPack(self, unpackOptions):
+        if unpackOptions is None:
+            return
+        self.num = unpackOptions.Num()
+        self.axis = unpackOptions.Axis()
+
+    # UnpackOptionsT
+    def Pack(self, builder):
+        UnpackOptionsStart(builder)
+        UnpackOptionsAddNum(builder, self.num)
+        UnpackOptionsAddAxis(builder, self.axis)
+        unpackOptions = UnpackOptionsEnd(builder)
+        return unpackOptions
+
+
+class FloorDivOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = FloorDivOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsFloorDivOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def FloorDivOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # FloorDivOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def FloorDivOptionsStart(builder):
+    builder.StartObject(0)
+
+def FloorDivOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class FloorDivOptionsT(object):
+
+    # FloorDivOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        floorDivOptions = FloorDivOptions()
+        floorDivOptions.Init(buf, pos)
+        return cls.InitFromObj(floorDivOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, floorDivOptions):
+        x = FloorDivOptionsT()
+        x._UnPack(floorDivOptions)
+        return x
+
+    # FloorDivOptionsT
+    def _UnPack(self, floorDivOptions):
+        if floorDivOptions is None:
+            return
+
+    # FloorDivOptionsT
+    def Pack(self, builder):
+        FloorDivOptionsStart(builder)
+        floorDivOptions = FloorDivOptionsEnd(builder)
+        return floorDivOptions
+
+
+class SquareOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = SquareOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsSquareOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def SquareOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # SquareOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def SquareOptionsStart(builder):
+    builder.StartObject(0)
+
+def SquareOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class SquareOptionsT(object):
+
+    # SquareOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        squareOptions = SquareOptions()
+        squareOptions.Init(buf, pos)
+        return cls.InitFromObj(squareOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, squareOptions):
+        x = SquareOptionsT()
+        x._UnPack(squareOptions)
+        return x
+
+    # SquareOptionsT
+    def _UnPack(self, squareOptions):
+        if squareOptions is None:
+            return
+
+    # SquareOptionsT
+    def Pack(self, builder):
+        SquareOptionsStart(builder)
+        squareOptions = SquareOptionsEnd(builder)
+        return squareOptions
+
+
+class ZerosLikeOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = ZerosLikeOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsZerosLikeOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def ZerosLikeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # ZerosLikeOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def ZerosLikeOptionsStart(builder):
+    builder.StartObject(0)
+
+def ZerosLikeOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class ZerosLikeOptionsT(object):
+
+    # ZerosLikeOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        zerosLikeOptions = ZerosLikeOptions()
+        zerosLikeOptions.Init(buf, pos)
+        return cls.InitFromObj(zerosLikeOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, zerosLikeOptions):
+        x = ZerosLikeOptionsT()
+        x._UnPack(zerosLikeOptions)
+        return x
+
+    # ZerosLikeOptionsT
+    def _UnPack(self, zerosLikeOptions):
+        if zerosLikeOptions is None:
+            return
+
+    # ZerosLikeOptionsT
+    def Pack(self, builder):
+        ZerosLikeOptionsStart(builder)
+        zerosLikeOptions = ZerosLikeOptionsEnd(builder)
+        return zerosLikeOptions
+
+
+class FillOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = FillOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsFillOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def FillOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # FillOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def FillOptionsStart(builder):
+    builder.StartObject(0)
+
+def FillOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class FillOptionsT(object):
+
+    # FillOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        fillOptions = FillOptions()
+        fillOptions.Init(buf, pos)
+        return cls.InitFromObj(fillOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, fillOptions):
+        x = FillOptionsT()
+        x._UnPack(fillOptions)
+        return x
+
+    # FillOptionsT
+    def _UnPack(self, fillOptions):
+        if fillOptions is None:
+            return
+
+    # FillOptionsT
+    def Pack(self, builder):
+        FillOptionsStart(builder)
+        fillOptions = FillOptionsEnd(builder)
+        return fillOptions
+
+
+class FloorModOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = FloorModOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsFloorModOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def FloorModOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # FloorModOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def FloorModOptionsStart(builder):
+    builder.StartObject(0)
+
+def FloorModOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class FloorModOptionsT(object):
+
+    # FloorModOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        floorModOptions = FloorModOptions()
+        floorModOptions.Init(buf, pos)
+        return cls.InitFromObj(floorModOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, floorModOptions):
+        x = FloorModOptionsT()
+        x._UnPack(floorModOptions)
+        return x
+
+    # FloorModOptionsT
+    def _UnPack(self, floorModOptions):
+        if floorModOptions is None:
+            return
+
+    # FloorModOptionsT
+    def Pack(self, builder):
+        FloorModOptionsStart(builder)
+        floorModOptions = FloorModOptionsEnd(builder)
+        return floorModOptions
+
+
+class RangeOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = RangeOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsRangeOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def RangeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # RangeOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def RangeOptionsStart(builder):
+    builder.StartObject(0)
+
+def RangeOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class RangeOptionsT(object):
+
+    # RangeOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        rangeOptions = RangeOptions()
+        rangeOptions.Init(buf, pos)
+        return cls.InitFromObj(rangeOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, rangeOptions):
+        x = RangeOptionsT()
+        x._UnPack(rangeOptions)
+        return x
+
+    # RangeOptionsT
+    def _UnPack(self, rangeOptions):
+        if rangeOptions is None:
+            return
+
+    # RangeOptionsT
+    def Pack(self, builder):
+        RangeOptionsStart(builder)
+        rangeOptions = RangeOptionsEnd(builder)
+        return rangeOptions
+
+
+class LeakyReluOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = LeakyReluOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsLeakyReluOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def LeakyReluOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # LeakyReluOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # LeakyReluOptions
+    def Alpha(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
+        return 0.0
+
+def LeakyReluOptionsStart(builder):
+    builder.StartObject(1)
+
+def LeakyReluOptionsAddAlpha(builder, alpha):
+    builder.PrependFloat32Slot(0, alpha, 0.0)
+
+def LeakyReluOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class LeakyReluOptionsT(object):
+
+    # LeakyReluOptionsT
+    def __init__(self):
+        self.alpha = 0.0  # type: float
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        leakyReluOptions = LeakyReluOptions()
+        leakyReluOptions.Init(buf, pos)
+        return cls.InitFromObj(leakyReluOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, leakyReluOptions):
+        x = LeakyReluOptionsT()
+        x._UnPack(leakyReluOptions)
+        return x
+
+    # LeakyReluOptionsT
+    def _UnPack(self, leakyReluOptions):
+        if leakyReluOptions is None:
+            return
+        self.alpha = leakyReluOptions.Alpha()
+
+    # LeakyReluOptionsT
+    def Pack(self, builder):
+        LeakyReluOptionsStart(builder)
+        LeakyReluOptionsAddAlpha(builder, self.alpha)
+        leakyReluOptions = LeakyReluOptionsEnd(builder)
+        return leakyReluOptions
+
+
+class SquaredDifferenceOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = SquaredDifferenceOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsSquaredDifferenceOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def SquaredDifferenceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # SquaredDifferenceOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def SquaredDifferenceOptionsStart(builder):
+    builder.StartObject(0)
+
+def SquaredDifferenceOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class SquaredDifferenceOptionsT(object):
+
+    # SquaredDifferenceOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        squaredDifferenceOptions = SquaredDifferenceOptions()
+        squaredDifferenceOptions.Init(buf, pos)
+        return cls.InitFromObj(squaredDifferenceOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, squaredDifferenceOptions):
+        x = SquaredDifferenceOptionsT()
+        x._UnPack(squaredDifferenceOptions)
+        return x
+
+    # SquaredDifferenceOptionsT
+    def _UnPack(self, squaredDifferenceOptions):
+        if squaredDifferenceOptions is None:
+            return
+
+    # SquaredDifferenceOptionsT
+    def Pack(self, builder):
+        SquaredDifferenceOptionsStart(builder)
+        squaredDifferenceOptions = SquaredDifferenceOptionsEnd(builder)
+        return squaredDifferenceOptions
+
+
+class MirrorPadOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = MirrorPadOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsMirrorPadOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def MirrorPadOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # MirrorPadOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # MirrorPadOptions
+    def Mode(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+def MirrorPadOptionsStart(builder):
+    builder.StartObject(1)
+
+def MirrorPadOptionsAddMode(builder, mode):
+    builder.PrependInt8Slot(0, mode, 0)
+
+def MirrorPadOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class MirrorPadOptionsT(object):
+
+    # MirrorPadOptionsT
+    def __init__(self):
+        self.mode = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        mirrorPadOptions = MirrorPadOptions()
+        mirrorPadOptions.Init(buf, pos)
+        return cls.InitFromObj(mirrorPadOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, mirrorPadOptions):
+        x = MirrorPadOptionsT()
+        x._UnPack(mirrorPadOptions)
+        return x
+
+    # MirrorPadOptionsT
+    def _UnPack(self, mirrorPadOptions):
+        if mirrorPadOptions is None:
+            return
+        self.mode = mirrorPadOptions.Mode()
+
+    # MirrorPadOptionsT
+    def Pack(self, builder):
+        MirrorPadOptionsStart(builder)
+        MirrorPadOptionsAddMode(builder, self.mode)
+        mirrorPadOptions = MirrorPadOptionsEnd(builder)
+        return mirrorPadOptions
+
+
+class UniqueOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = UniqueOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsUniqueOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def UniqueOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # UniqueOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # UniqueOptions
+    def IdxOutType(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 2
+
+def UniqueOptionsStart(builder):
+    builder.StartObject(1)
+
+def UniqueOptionsAddIdxOutType(builder, idxOutType):
+    builder.PrependInt8Slot(0, idxOutType, 2)
+
+def UniqueOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class UniqueOptionsT(object):
+
+    # UniqueOptionsT
+    def __init__(self):
+        self.idxOutType = 2  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        uniqueOptions = UniqueOptions()
+        uniqueOptions.Init(buf, pos)
+        return cls.InitFromObj(uniqueOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, uniqueOptions):
+        x = UniqueOptionsT()
+        x._UnPack(uniqueOptions)
+        return x
+
+    # UniqueOptionsT
+    def _UnPack(self, uniqueOptions):
+        if uniqueOptions is None:
+            return
+        self.idxOutType = uniqueOptions.IdxOutType()
+
+    # UniqueOptionsT
+    def Pack(self, builder):
+        UniqueOptionsStart(builder)
+        UniqueOptionsAddIdxOutType(builder, self.idxOutType)
+        uniqueOptions = UniqueOptionsEnd(builder)
+        return uniqueOptions
+
+
+class ReverseV2Options(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = ReverseV2Options()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsReverseV2Options(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def ReverseV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # ReverseV2Options
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def ReverseV2OptionsStart(builder):
+    builder.StartObject(0)
+
+def ReverseV2OptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class ReverseV2OptionsT(object):
+
+    # ReverseV2OptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        reverseV2Options = ReverseV2Options()
+        reverseV2Options.Init(buf, pos)
+        return cls.InitFromObj(reverseV2Options)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, reverseV2Options):
+        x = ReverseV2OptionsT()
+        x._UnPack(reverseV2Options)
+        return x
+
+    # ReverseV2OptionsT
+    def _UnPack(self, reverseV2Options):
+        if reverseV2Options is None:
+            return
+
+    # ReverseV2OptionsT
+    def Pack(self, builder):
+        ReverseV2OptionsStart(builder)
+        reverseV2Options = ReverseV2OptionsEnd(builder)
+        return reverseV2Options
+
+
+class AddNOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = AddNOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsAddNOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def AddNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # AddNOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def AddNOptionsStart(builder):
+    builder.StartObject(0)
+
+def AddNOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class AddNOptionsT(object):
+
+    # AddNOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        addNoptions = AddNOptions()
+        addNoptions.Init(buf, pos)
+        return cls.InitFromObj(addNoptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, addNoptions):
+        x = AddNOptionsT()
+        x._UnPack(addNoptions)
+        return x
+
+    # AddNOptionsT
+    def _UnPack(self, addNoptions):
+        if addNoptions is None:
+            return
+
+    # AddNOptionsT
+    def Pack(self, builder):
+        AddNOptionsStart(builder)
+        addNoptions = AddNOptionsEnd(builder)
+        return addNoptions
+
+
+class GatherNdOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = GatherNdOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsGatherNdOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def GatherNdOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # GatherNdOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def GatherNdOptionsStart(builder):
+    builder.StartObject(0)
+
+def GatherNdOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class GatherNdOptionsT(object):
+
+    # GatherNdOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        gatherNdOptions = GatherNdOptions()
+        gatherNdOptions.Init(buf, pos)
+        return cls.InitFromObj(gatherNdOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, gatherNdOptions):
+        x = GatherNdOptionsT()
+        x._UnPack(gatherNdOptions)
+        return x
+
+    # GatherNdOptionsT
+    def _UnPack(self, gatherNdOptions):
+        if gatherNdOptions is None:
+            return
+
+    # GatherNdOptionsT
+    def Pack(self, builder):
+        GatherNdOptionsStart(builder)
+        gatherNdOptions = GatherNdOptionsEnd(builder)
+        return gatherNdOptions
+
+
+class WhereOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = WhereOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsWhereOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def WhereOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # WhereOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def WhereOptionsStart(builder):
+    builder.StartObject(0)
+
+def WhereOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class WhereOptionsT(object):
+
+    # WhereOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        whereOptions = WhereOptions()
+        whereOptions.Init(buf, pos)
+        return cls.InitFromObj(whereOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, whereOptions):
+        x = WhereOptionsT()
+        x._UnPack(whereOptions)
+        return x
+
+    # WhereOptionsT
+    def _UnPack(self, whereOptions):
+        if whereOptions is None:
+            return
+
+    # WhereOptionsT
+    def Pack(self, builder):
+        WhereOptionsStart(builder)
+        whereOptions = WhereOptionsEnd(builder)
+        return whereOptions
+
+
+class ReverseSequenceOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = ReverseSequenceOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsReverseSequenceOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def ReverseSequenceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # ReverseSequenceOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # ReverseSequenceOptions
+    def SeqDim(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # ReverseSequenceOptions
+    def BatchDim(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+def ReverseSequenceOptionsStart(builder):
+    builder.StartObject(2)
+
+def ReverseSequenceOptionsAddSeqDim(builder, seqDim):
+    builder.PrependInt32Slot(0, seqDim, 0)
+
+def ReverseSequenceOptionsAddBatchDim(builder, batchDim):
+    builder.PrependInt32Slot(1, batchDim, 0)
+
+def ReverseSequenceOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class ReverseSequenceOptionsT(object):
+
+    # ReverseSequenceOptionsT
+    def __init__(self):
+        self.seqDim = 0  # type: int
+        self.batchDim = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        reverseSequenceOptions = ReverseSequenceOptions()
+        reverseSequenceOptions.Init(buf, pos)
+        return cls.InitFromObj(reverseSequenceOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, reverseSequenceOptions):
+        x = ReverseSequenceOptionsT()
+        x._UnPack(reverseSequenceOptions)
+        return x
+
+    # ReverseSequenceOptionsT
+    def _UnPack(self, reverseSequenceOptions):
+        if reverseSequenceOptions is None:
+            return
+        self.seqDim = reverseSequenceOptions.SeqDim()
+        self.batchDim = reverseSequenceOptions.BatchDim()
+
+    # ReverseSequenceOptionsT
+    def Pack(self, builder):
+        ReverseSequenceOptionsStart(builder)
+        ReverseSequenceOptionsAddSeqDim(builder, self.seqDim)
+        ReverseSequenceOptionsAddBatchDim(builder, self.batchDim)
+        reverseSequenceOptions = ReverseSequenceOptionsEnd(builder)
+        return reverseSequenceOptions
+
+
+class MatrixDiagOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = MatrixDiagOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsMatrixDiagOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def MatrixDiagOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # MatrixDiagOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def MatrixDiagOptionsStart(builder):
+    builder.StartObject(0)
+
+def MatrixDiagOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class MatrixDiagOptionsT(object):
+
+    # MatrixDiagOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        matrixDiagOptions = MatrixDiagOptions()
+        matrixDiagOptions.Init(buf, pos)
+        return cls.InitFromObj(matrixDiagOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, matrixDiagOptions):
+        x = MatrixDiagOptionsT()
+        x._UnPack(matrixDiagOptions)
+        return x
+
+    # MatrixDiagOptionsT
+    def _UnPack(self, matrixDiagOptions):
+        if matrixDiagOptions is None:
+            return
+
+    # MatrixDiagOptionsT
+    def Pack(self, builder):
+        MatrixDiagOptionsStart(builder)
+        matrixDiagOptions = MatrixDiagOptionsEnd(builder)
+        return matrixDiagOptions
+
+
+class QuantizeOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = QuantizeOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsQuantizeOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def QuantizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # QuantizeOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def QuantizeOptionsStart(builder):
+    builder.StartObject(0)
+
+def QuantizeOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class QuantizeOptionsT(object):
+
+    # QuantizeOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        quantizeOptions = QuantizeOptions()
+        quantizeOptions.Init(buf, pos)
+        return cls.InitFromObj(quantizeOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, quantizeOptions):
+        x = QuantizeOptionsT()
+        x._UnPack(quantizeOptions)
+        return x
+
+    # QuantizeOptionsT
+    def _UnPack(self, quantizeOptions):
+        if quantizeOptions is None:
+            return
+
+    # QuantizeOptionsT
+    def Pack(self, builder):
+        QuantizeOptionsStart(builder)
+        quantizeOptions = QuantizeOptionsEnd(builder)
+        return quantizeOptions
+
+
+class MatrixSetDiagOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = MatrixSetDiagOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsMatrixSetDiagOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def MatrixSetDiagOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # MatrixSetDiagOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def MatrixSetDiagOptionsStart(builder):
+    builder.StartObject(0)
+
+def MatrixSetDiagOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class MatrixSetDiagOptionsT(object):
+
+    # MatrixSetDiagOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        matrixSetDiagOptions = MatrixSetDiagOptions()
+        matrixSetDiagOptions.Init(buf, pos)
+        return cls.InitFromObj(matrixSetDiagOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, matrixSetDiagOptions):
+        x = MatrixSetDiagOptionsT()
+        x._UnPack(matrixSetDiagOptions)
+        return x
+
+    # MatrixSetDiagOptionsT
+    def _UnPack(self, matrixSetDiagOptions):
+        if matrixSetDiagOptions is None:
+            return
+
+    # MatrixSetDiagOptionsT
+    def Pack(self, builder):
+        MatrixSetDiagOptionsStart(builder)
+        matrixSetDiagOptions = MatrixSetDiagOptionsEnd(builder)
+        return matrixSetDiagOptions
+
+
+class IfOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = IfOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsIfOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def IfOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # IfOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # IfOptions
+    def ThenSubgraphIndex(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # IfOptions
+    def ElseSubgraphIndex(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+def IfOptionsStart(builder):
+    builder.StartObject(2)
+
+def IfOptionsAddThenSubgraphIndex(builder, thenSubgraphIndex):
+    builder.PrependInt32Slot(0, thenSubgraphIndex, 0)
+
+def IfOptionsAddElseSubgraphIndex(builder, elseSubgraphIndex):
+    builder.PrependInt32Slot(1, elseSubgraphIndex, 0)
+
+def IfOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class IfOptionsT(object):
+
+    # IfOptionsT
+    def __init__(self):
+        self.thenSubgraphIndex = 0  # type: int
+        self.elseSubgraphIndex = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        ifOptions = IfOptions()
+        ifOptions.Init(buf, pos)
+        return cls.InitFromObj(ifOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, ifOptions):
+        x = IfOptionsT()
+        x._UnPack(ifOptions)
+        return x
+
+    # IfOptionsT
+    def _UnPack(self, ifOptions):
+        if ifOptions is None:
+            return
+        self.thenSubgraphIndex = ifOptions.ThenSubgraphIndex()
+        self.elseSubgraphIndex = ifOptions.ElseSubgraphIndex()
+
+    # IfOptionsT
+    def Pack(self, builder):
+        IfOptionsStart(builder)
+        IfOptionsAddThenSubgraphIndex(builder, self.thenSubgraphIndex)
+        IfOptionsAddElseSubgraphIndex(builder, self.elseSubgraphIndex)
+        ifOptions = IfOptionsEnd(builder)
+        return ifOptions
+
+
+class CallOnceOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = CallOnceOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsCallOnceOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def CallOnceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # CallOnceOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # CallOnceOptions
+    def InitSubgraphIndex(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+def CallOnceOptionsStart(builder):
+    builder.StartObject(1)
+
+def CallOnceOptionsAddInitSubgraphIndex(builder, initSubgraphIndex):
+    builder.PrependInt32Slot(0, initSubgraphIndex, 0)
+
+def CallOnceOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class CallOnceOptionsT(object):
+
+    # CallOnceOptionsT
+    def __init__(self):
+        self.initSubgraphIndex = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        callOnceOptions = CallOnceOptions()
+        callOnceOptions.Init(buf, pos)
+        return cls.InitFromObj(callOnceOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, callOnceOptions):
+        x = CallOnceOptionsT()
+        x._UnPack(callOnceOptions)
+        return x
+
+    # CallOnceOptionsT
+    def _UnPack(self, callOnceOptions):
+        if callOnceOptions is None:
+            return
+        self.initSubgraphIndex = callOnceOptions.InitSubgraphIndex()
+
+    # CallOnceOptionsT
+    def Pack(self, builder):
+        CallOnceOptionsStart(builder)
+        CallOnceOptionsAddInitSubgraphIndex(builder, self.initSubgraphIndex)
+        callOnceOptions = CallOnceOptionsEnd(builder)
+        return callOnceOptions
+
+
+class WhileOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = WhileOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsWhileOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def WhileOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # WhileOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # WhileOptions
+    def CondSubgraphIndex(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # WhileOptions
+    def BodySubgraphIndex(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+def WhileOptionsStart(builder):
+    builder.StartObject(2)
+
+def WhileOptionsAddCondSubgraphIndex(builder, condSubgraphIndex):
+    builder.PrependInt32Slot(0, condSubgraphIndex, 0)
+
+def WhileOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex):
+    builder.PrependInt32Slot(1, bodySubgraphIndex, 0)
+
+def WhileOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class WhileOptionsT(object):
+
+    # WhileOptionsT
+    def __init__(self):
+        self.condSubgraphIndex = 0  # type: int
+        self.bodySubgraphIndex = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        whileOptions = WhileOptions()
+        whileOptions.Init(buf, pos)
+        return cls.InitFromObj(whileOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, whileOptions):
+        x = WhileOptionsT()
+        x._UnPack(whileOptions)
+        return x
+
+    # WhileOptionsT
+    def _UnPack(self, whileOptions):
+        if whileOptions is None:
+            return
+        self.condSubgraphIndex = whileOptions.CondSubgraphIndex()
+        self.bodySubgraphIndex = whileOptions.BodySubgraphIndex()
+
+    # WhileOptionsT
+    def Pack(self, builder):
+        WhileOptionsStart(builder)
+        WhileOptionsAddCondSubgraphIndex(builder, self.condSubgraphIndex)
+        WhileOptionsAddBodySubgraphIndex(builder, self.bodySubgraphIndex)
+        whileOptions = WhileOptionsEnd(builder)
+        return whileOptions
+
+
+class NonMaxSuppressionV4Options(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = NonMaxSuppressionV4Options()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsNonMaxSuppressionV4Options(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def NonMaxSuppressionV4OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # NonMaxSuppressionV4Options
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def NonMaxSuppressionV4OptionsStart(builder):
+    builder.StartObject(0)
+
+def NonMaxSuppressionV4OptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class NonMaxSuppressionV4OptionsT(object):
+
+    # NonMaxSuppressionV4OptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        nonMaxSuppressionV4Options = NonMaxSuppressionV4Options()
+        nonMaxSuppressionV4Options.Init(buf, pos)
+        return cls.InitFromObj(nonMaxSuppressionV4Options)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, nonMaxSuppressionV4Options):
+        x = NonMaxSuppressionV4OptionsT()
+        x._UnPack(nonMaxSuppressionV4Options)
+        return x
+
+    # NonMaxSuppressionV4OptionsT
+    def _UnPack(self, nonMaxSuppressionV4Options):
+        if nonMaxSuppressionV4Options is None:
+            return
+
+    # NonMaxSuppressionV4OptionsT
+    def Pack(self, builder):
+        NonMaxSuppressionV4OptionsStart(builder)
+        nonMaxSuppressionV4Options = NonMaxSuppressionV4OptionsEnd(builder)
+        return nonMaxSuppressionV4Options
+
+
+class NonMaxSuppressionV5Options(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = NonMaxSuppressionV5Options()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsNonMaxSuppressionV5Options(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def NonMaxSuppressionV5OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # NonMaxSuppressionV5Options
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def NonMaxSuppressionV5OptionsStart(builder):
+    builder.StartObject(0)
+
+def NonMaxSuppressionV5OptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class NonMaxSuppressionV5OptionsT(object):
+
+    # NonMaxSuppressionV5OptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        nonMaxSuppressionV5Options = NonMaxSuppressionV5Options()
+        nonMaxSuppressionV5Options.Init(buf, pos)
+        return cls.InitFromObj(nonMaxSuppressionV5Options)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, nonMaxSuppressionV5Options):
+        x = NonMaxSuppressionV5OptionsT()
+        x._UnPack(nonMaxSuppressionV5Options)
+        return x
+
+    # NonMaxSuppressionV5OptionsT
+    def _UnPack(self, nonMaxSuppressionV5Options):
+        if nonMaxSuppressionV5Options is None:
+            return
+
+    # NonMaxSuppressionV5OptionsT
+    def Pack(self, builder):
+        NonMaxSuppressionV5OptionsStart(builder)
+        nonMaxSuppressionV5Options = NonMaxSuppressionV5OptionsEnd(builder)
+        return nonMaxSuppressionV5Options
+
+
+class ScatterNdOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = ScatterNdOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsScatterNdOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def ScatterNdOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # ScatterNdOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def ScatterNdOptionsStart(builder):
+    builder.StartObject(0)
+
+def ScatterNdOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class ScatterNdOptionsT(object):
+
+    # ScatterNdOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        scatterNdOptions = ScatterNdOptions()
+        scatterNdOptions.Init(buf, pos)
+        return cls.InitFromObj(scatterNdOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, scatterNdOptions):
+        x = ScatterNdOptionsT()
+        x._UnPack(scatterNdOptions)
+        return x
+
+    # ScatterNdOptionsT
+    def _UnPack(self, scatterNdOptions):
+        if scatterNdOptions is None:
+            return
+
+    # ScatterNdOptionsT
+    def Pack(self, builder):
+        ScatterNdOptionsStart(builder)
+        scatterNdOptions = ScatterNdOptionsEnd(builder)
+        return scatterNdOptions
+
+
+class SelectV2Options(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = SelectV2Options()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsSelectV2Options(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def SelectV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # SelectV2Options
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def SelectV2OptionsStart(builder):
+    builder.StartObject(0)
+
+def SelectV2OptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class SelectV2OptionsT(object):
+
+    # SelectV2OptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        selectV2Options = SelectV2Options()
+        selectV2Options.Init(buf, pos)
+        return cls.InitFromObj(selectV2Options)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, selectV2Options):
+        x = SelectV2OptionsT()
+        x._UnPack(selectV2Options)
+        return x
+
+    # SelectV2OptionsT
+    def _UnPack(self, selectV2Options):
+        if selectV2Options is None:
+            return
+
+    # SelectV2OptionsT
+    def Pack(self, builder):
+        SelectV2OptionsStart(builder)
+        selectV2Options = SelectV2OptionsEnd(builder)
+        return selectV2Options
+
+
+class DensifyOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = DensifyOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsDensifyOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def DensifyOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # DensifyOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def DensifyOptionsStart(builder):
+    builder.StartObject(0)
+
+def DensifyOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class DensifyOptionsT(object):
+
+    # DensifyOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        densifyOptions = DensifyOptions()
+        densifyOptions.Init(buf, pos)
+        return cls.InitFromObj(densifyOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, densifyOptions):
+        x = DensifyOptionsT()
+        x._UnPack(densifyOptions)
+        return x
+
+    # DensifyOptionsT
+    def _UnPack(self, densifyOptions):
+        if densifyOptions is None:
+            return
+
+    # DensifyOptionsT
+    def Pack(self, builder):
+        DensifyOptionsStart(builder)
+        densifyOptions = DensifyOptionsEnd(builder)
+        return densifyOptions
+
+
+class SegmentSumOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = SegmentSumOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsSegmentSumOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def SegmentSumOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # SegmentSumOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def SegmentSumOptionsStart(builder):
+    builder.StartObject(0)
+
+def SegmentSumOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class SegmentSumOptionsT(object):
+
+    # SegmentSumOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        segmentSumOptions = SegmentSumOptions()
+        segmentSumOptions.Init(buf, pos)
+        return cls.InitFromObj(segmentSumOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, segmentSumOptions):
+        x = SegmentSumOptionsT()
+        x._UnPack(segmentSumOptions)
+        return x
+
+    # SegmentSumOptionsT
+    def _UnPack(self, segmentSumOptions):
+        if segmentSumOptions is None:
+            return
+
+    # SegmentSumOptionsT
+    def Pack(self, builder):
+        SegmentSumOptionsStart(builder)
+        segmentSumOptions = SegmentSumOptionsEnd(builder)
+        return segmentSumOptions
+
+
+class BatchMatMulOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = BatchMatMulOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsBatchMatMulOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def BatchMatMulOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # BatchMatMulOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # BatchMatMulOptions
+    def AdjX(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+    # BatchMatMulOptions
+    def AdjY(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+    # BatchMatMulOptions
+    def AsymmetricQuantizeInputs(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+def BatchMatMulOptionsStart(builder):
+    builder.StartObject(3)
+
+def BatchMatMulOptionsAddAdjX(builder, adjX):
+    builder.PrependBoolSlot(0, adjX, 0)
+
+def BatchMatMulOptionsAddAdjY(builder, adjY):
+    builder.PrependBoolSlot(1, adjY, 0)
+
+def BatchMatMulOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs):
+    builder.PrependBoolSlot(2, asymmetricQuantizeInputs, 0)
+
+def BatchMatMulOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class BatchMatMulOptionsT(object):
+
+    # BatchMatMulOptionsT
+    def __init__(self):
+        self.adjX = False  # type: bool
+        self.adjY = False  # type: bool
+        self.asymmetricQuantizeInputs = False  # type: bool
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        batchMatMulOptions = BatchMatMulOptions()
+        batchMatMulOptions.Init(buf, pos)
+        return cls.InitFromObj(batchMatMulOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, batchMatMulOptions):
+        x = BatchMatMulOptionsT()
+        x._UnPack(batchMatMulOptions)
+        return x
+
+    # BatchMatMulOptionsT
+    def _UnPack(self, batchMatMulOptions):
+        if batchMatMulOptions is None:
+            return
+        self.adjX = batchMatMulOptions.AdjX()
+        self.adjY = batchMatMulOptions.AdjY()
+        self.asymmetricQuantizeInputs = batchMatMulOptions.AsymmetricQuantizeInputs()
+
+    # BatchMatMulOptionsT
+    def Pack(self, builder):
+        BatchMatMulOptionsStart(builder)
+        BatchMatMulOptionsAddAdjX(builder, self.adjX)
+        BatchMatMulOptionsAddAdjY(builder, self.adjY)
+        BatchMatMulOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs)
+        batchMatMulOptions = BatchMatMulOptionsEnd(builder)
+        return batchMatMulOptions
+
+
+class CumsumOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = CumsumOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsCumsumOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def CumsumOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # CumsumOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # CumsumOptions
+    def Exclusive(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+    # CumsumOptions
+    def Reverse(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+def CumsumOptionsStart(builder):
+    builder.StartObject(2)
+
+def CumsumOptionsAddExclusive(builder, exclusive):
+    builder.PrependBoolSlot(0, exclusive, 0)
+
+def CumsumOptionsAddReverse(builder, reverse):
+    builder.PrependBoolSlot(1, reverse, 0)
+
+def CumsumOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class CumsumOptionsT(object):
+
+    # CumsumOptionsT
+    def __init__(self):
+        self.exclusive = False  # type: bool
+        self.reverse = False  # type: bool
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        cumsumOptions = CumsumOptions()
+        cumsumOptions.Init(buf, pos)
+        return cls.InitFromObj(cumsumOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, cumsumOptions):
+        x = CumsumOptionsT()
+        x._UnPack(cumsumOptions)
+        return x
+
+    # CumsumOptionsT
+    def _UnPack(self, cumsumOptions):
+        if cumsumOptions is None:
+            return
+        self.exclusive = cumsumOptions.Exclusive()
+        self.reverse = cumsumOptions.Reverse()
+
+    # CumsumOptionsT
+    def Pack(self, builder):
+        CumsumOptionsStart(builder)
+        CumsumOptionsAddExclusive(builder, self.exclusive)
+        CumsumOptionsAddReverse(builder, self.reverse)
+        cumsumOptions = CumsumOptionsEnd(builder)
+        return cumsumOptions
+
+
+class BroadcastToOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = BroadcastToOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsBroadcastToOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def BroadcastToOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # BroadcastToOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def BroadcastToOptionsStart(builder):
+    builder.StartObject(0)
+
+def BroadcastToOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class BroadcastToOptionsT(object):
+
+    # BroadcastToOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        broadcastToOptions = BroadcastToOptions()
+        broadcastToOptions.Init(buf, pos)
+        return cls.InitFromObj(broadcastToOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, broadcastToOptions):
+        x = BroadcastToOptionsT()
+        x._UnPack(broadcastToOptions)
+        return x
+
+    # BroadcastToOptionsT
+    def _UnPack(self, broadcastToOptions):
+        if broadcastToOptions is None:
+            return
+
+    # BroadcastToOptionsT
+    def Pack(self, builder):
+        BroadcastToOptionsStart(builder)
+        broadcastToOptions = BroadcastToOptionsEnd(builder)
+        return broadcastToOptions
+
+
+class Rfft2dOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = Rfft2dOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsRfft2dOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def Rfft2dOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # Rfft2dOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def Rfft2dOptionsStart(builder):
+    builder.StartObject(0)
+
+def Rfft2dOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class Rfft2dOptionsT(object):
+
+    # Rfft2dOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        rfft2dOptions = Rfft2dOptions()
+        rfft2dOptions.Init(buf, pos)
+        return cls.InitFromObj(rfft2dOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, rfft2dOptions):
+        x = Rfft2dOptionsT()
+        x._UnPack(rfft2dOptions)
+        return x
+
+    # Rfft2dOptionsT
+    def _UnPack(self, rfft2dOptions):
+        if rfft2dOptions is None:
+            return
+
+    # Rfft2dOptionsT
+    def Pack(self, builder):
+        Rfft2dOptionsStart(builder)
+        rfft2dOptions = Rfft2dOptionsEnd(builder)
+        return rfft2dOptions
+
+
+class HashtableOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = HashtableOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsHashtableOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def HashtableOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # HashtableOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # HashtableOptions
+    def TableId(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # HashtableOptions
+    def KeyDtype(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+    # HashtableOptions
+    def ValueDtype(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+def HashtableOptionsStart(builder):
+    builder.StartObject(3)
+
+def HashtableOptionsAddTableId(builder, tableId):
+    builder.PrependInt32Slot(0, tableId, 0)
+
+def HashtableOptionsAddKeyDtype(builder, keyDtype):
+    builder.PrependInt8Slot(1, keyDtype, 0)
+
+def HashtableOptionsAddValueDtype(builder, valueDtype):
+    builder.PrependInt8Slot(2, valueDtype, 0)
+
+def HashtableOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class HashtableOptionsT(object):
+
+    # HashtableOptionsT
+    def __init__(self):
+        self.tableId = 0  # type: int
+        self.keyDtype = 0  # type: int
+        self.valueDtype = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        hashtableOptions = HashtableOptions()
+        hashtableOptions.Init(buf, pos)
+        return cls.InitFromObj(hashtableOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, hashtableOptions):
+        x = HashtableOptionsT()
+        x._UnPack(hashtableOptions)
+        return x
+
+    # HashtableOptionsT
+    def _UnPack(self, hashtableOptions):
+        if hashtableOptions is None:
+            return
+        self.tableId = hashtableOptions.TableId()
+        self.keyDtype = hashtableOptions.KeyDtype()
+        self.valueDtype = hashtableOptions.ValueDtype()
+
+    # HashtableOptionsT
+    def Pack(self, builder):
+        HashtableOptionsStart(builder)
+        HashtableOptionsAddTableId(builder, self.tableId)
+        HashtableOptionsAddKeyDtype(builder, self.keyDtype)
+        HashtableOptionsAddValueDtype(builder, self.valueDtype)
+        hashtableOptions = HashtableOptionsEnd(builder)
+        return hashtableOptions
+
+
+class HashtableFindOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = HashtableFindOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsHashtableFindOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def HashtableFindOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # HashtableFindOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def HashtableFindOptionsStart(builder):
+    builder.StartObject(0)
+
+def HashtableFindOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class HashtableFindOptionsT(object):
+
+    # HashtableFindOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        hashtableFindOptions = HashtableFindOptions()
+        hashtableFindOptions.Init(buf, pos)
+        return cls.InitFromObj(hashtableFindOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, hashtableFindOptions):
+        x = HashtableFindOptionsT()
+        x._UnPack(hashtableFindOptions)
+        return x
+
+    # HashtableFindOptionsT
+    def _UnPack(self, hashtableFindOptions):
+        if hashtableFindOptions is None:
+            return
+
+    # HashtableFindOptionsT
+    def Pack(self, builder):
+        HashtableFindOptionsStart(builder)
+        hashtableFindOptions = HashtableFindOptionsEnd(builder)
+        return hashtableFindOptions
+
+
+class HashtableImportOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = HashtableImportOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsHashtableImportOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def HashtableImportOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # HashtableImportOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def HashtableImportOptionsStart(builder):
+    builder.StartObject(0)
+
+def HashtableImportOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class HashtableImportOptionsT(object):
+
+    # HashtableImportOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        hashtableImportOptions = HashtableImportOptions()
+        hashtableImportOptions.Init(buf, pos)
+        return cls.InitFromObj(hashtableImportOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, hashtableImportOptions):
+        x = HashtableImportOptionsT()
+        x._UnPack(hashtableImportOptions)
+        return x
+
+    # HashtableImportOptionsT
+    def _UnPack(self, hashtableImportOptions):
+        if hashtableImportOptions is None:
+            return
+
+    # HashtableImportOptionsT
+    def Pack(self, builder):
+        HashtableImportOptionsStart(builder)
+        hashtableImportOptions = HashtableImportOptionsEnd(builder)
+        return hashtableImportOptions
+
+
+class HashtableSizeOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = HashtableSizeOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsHashtableSizeOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def HashtableSizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # HashtableSizeOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def HashtableSizeOptionsStart(builder):
+    builder.StartObject(0)
+
+def HashtableSizeOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class HashtableSizeOptionsT(object):
+
+    # HashtableSizeOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        hashtableSizeOptions = HashtableSizeOptions()
+        hashtableSizeOptions.Init(buf, pos)
+        return cls.InitFromObj(hashtableSizeOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, hashtableSizeOptions):
+        x = HashtableSizeOptionsT()
+        x._UnPack(hashtableSizeOptions)
+        return x
+
+    # HashtableSizeOptionsT
+    def _UnPack(self, hashtableSizeOptions):
+        if hashtableSizeOptions is None:
+            return
+
+    # HashtableSizeOptionsT
+    def Pack(self, builder):
+        HashtableSizeOptionsStart(builder)
+        hashtableSizeOptions = HashtableSizeOptionsEnd(builder)
+        return hashtableSizeOptions
+
+
+class VarHandleOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = VarHandleOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsVarHandleOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def VarHandleOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # VarHandleOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # VarHandleOptions
+    def Container(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.String(o + self._tab.Pos)
+        return None
+
+    # VarHandleOptions
+    def SharedName(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.String(o + self._tab.Pos)
+        return None
+
+def VarHandleOptionsStart(builder):
+    builder.StartObject(2)
+
+def VarHandleOptionsAddContainer(builder, container):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(container), 0)
+
+def VarHandleOptionsAddSharedName(builder, sharedName):
+    builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(sharedName), 0)
+
+def VarHandleOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class VarHandleOptionsT(object):
+
+    # VarHandleOptionsT
+    def __init__(self):
+        self.container = None  # type: str
+        self.sharedName = None  # type: str
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        varHandleOptions = VarHandleOptions()
+        varHandleOptions.Init(buf, pos)
+        return cls.InitFromObj(varHandleOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, varHandleOptions):
+        x = VarHandleOptionsT()
+        x._UnPack(varHandleOptions)
+        return x
+
+    # VarHandleOptionsT
+    def _UnPack(self, varHandleOptions):
+        if varHandleOptions is None:
+            return
+        self.container = varHandleOptions.Container()
+        self.sharedName = varHandleOptions.SharedName()
+
+    # VarHandleOptionsT
+    def Pack(self, builder):
+        if self.container is not None:
+            container = builder.CreateString(self.container)
+        if self.sharedName is not None:
+            sharedName = builder.CreateString(self.sharedName)
+        VarHandleOptionsStart(builder)
+        if self.container is not None:
+            VarHandleOptionsAddContainer(builder, container)
+        if self.sharedName is not None:
+            VarHandleOptionsAddSharedName(builder, sharedName)
+        varHandleOptions = VarHandleOptionsEnd(builder)
+        return varHandleOptions
+
+
+class ReadVariableOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = ReadVariableOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsReadVariableOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def ReadVariableOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # ReadVariableOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def ReadVariableOptionsStart(builder):
+    builder.StartObject(0)
+
+def ReadVariableOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class ReadVariableOptionsT(object):
+
+    # ReadVariableOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        readVariableOptions = ReadVariableOptions()
+        readVariableOptions.Init(buf, pos)
+        return cls.InitFromObj(readVariableOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, readVariableOptions):
+        x = ReadVariableOptionsT()
+        x._UnPack(readVariableOptions)
+        return x
+
+    # ReadVariableOptionsT
+    def _UnPack(self, readVariableOptions):
+        if readVariableOptions is None:
+            return
+
+    # ReadVariableOptionsT
+    def Pack(self, builder):
+        ReadVariableOptionsStart(builder)
+        readVariableOptions = ReadVariableOptionsEnd(builder)
+        return readVariableOptions
+
+
+class AssignVariableOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = AssignVariableOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsAssignVariableOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def AssignVariableOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # AssignVariableOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def AssignVariableOptionsStart(builder):
+    builder.StartObject(0)
+
+def AssignVariableOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class AssignVariableOptionsT(object):
+
+    # AssignVariableOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        assignVariableOptions = AssignVariableOptions()
+        assignVariableOptions.Init(buf, pos)
+        return cls.InitFromObj(assignVariableOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, assignVariableOptions):
+        x = AssignVariableOptionsT()
+        x._UnPack(assignVariableOptions)
+        return x
+
+    # AssignVariableOptionsT
+    def _UnPack(self, assignVariableOptions):
+        if assignVariableOptions is None:
+            return
+
+    # AssignVariableOptionsT
+    def Pack(self, builder):
+        AssignVariableOptionsStart(builder)
+        assignVariableOptions = AssignVariableOptionsEnd(builder)
+        return assignVariableOptions
+
+
+class RandomOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = RandomOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsRandomOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def RandomOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # RandomOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # RandomOptions
+    def Seed(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
+        return 0
+
+    # RandomOptions
+    def Seed2(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
+        return 0
+
+def RandomOptionsStart(builder):
+    builder.StartObject(2)
+
+def RandomOptionsAddSeed(builder, seed):
+    builder.PrependInt64Slot(0, seed, 0)
+
+def RandomOptionsAddSeed2(builder, seed2):
+    builder.PrependInt64Slot(1, seed2, 0)
+
+def RandomOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class RandomOptionsT(object):
+
+    # RandomOptionsT
+    def __init__(self):
+        self.seed = 0  # type: int
+        self.seed2 = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        randomOptions = RandomOptions()
+        randomOptions.Init(buf, pos)
+        return cls.InitFromObj(randomOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, randomOptions):
+        x = RandomOptionsT()
+        x._UnPack(randomOptions)
+        return x
+
+    # RandomOptionsT
+    def _UnPack(self, randomOptions):
+        if randomOptions is None:
+            return
+        self.seed = randomOptions.Seed()
+        self.seed2 = randomOptions.Seed2()
+
+    # RandomOptionsT
+    def Pack(self, builder):
+        RandomOptionsStart(builder)
+        RandomOptionsAddSeed(builder, self.seed)
+        RandomOptionsAddSeed2(builder, self.seed2)
+        randomOptions = RandomOptionsEnd(builder)
+        return randomOptions
+
+
+class BucketizeOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = BucketizeOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsBucketizeOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def BucketizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # BucketizeOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # BucketizeOptions
+    def Boundaries(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+        return 0
+
+    # BucketizeOptions
+    def BoundariesAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
+        return 0
+
+    # BucketizeOptions
+    def BoundariesLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # BucketizeOptions
+    def BoundariesIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        return o == 0
+
+def BucketizeOptionsStart(builder):
+    builder.StartObject(1)
+
+def BucketizeOptionsAddBoundaries(builder, boundaries):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(boundaries), 0)
+
+def BucketizeOptionsStartBoundariesVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def BucketizeOptionsEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class BucketizeOptionsT(object):
+
+    # BucketizeOptionsT
+    def __init__(self):
+        self.boundaries = None  # type: List[float]
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        bucketizeOptions = BucketizeOptions()
+        bucketizeOptions.Init(buf, pos)
+        return cls.InitFromObj(bucketizeOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, bucketizeOptions):
+        x = BucketizeOptionsT()
+        x._UnPack(bucketizeOptions)
+        return x
+
+    # BucketizeOptionsT
+    def _UnPack(self, bucketizeOptions):
+        if bucketizeOptions is None:
+            return
+        if not bucketizeOptions.BoundariesIsNone():
+            if np is None:
+                self.boundaries = []
+                for i in range(bucketizeOptions.BoundariesLength()):
+                    self.boundaries.append(bucketizeOptions.Boundaries(i))
+            else:
+                self.boundaries = bucketizeOptions.BoundariesAsNumpy()
+
+    # BucketizeOptionsT
+    def Pack(self, builder):
+        if self.boundaries is not None:
+            if np is not None and type(self.boundaries) is np.ndarray:
+                boundaries = builder.CreateNumpyVector(self.boundaries)
+            else:
+                BucketizeOptionsStartBoundariesVector(builder, len(self.boundaries))
+                for i in reversed(range(len(self.boundaries))):
+                    builder.PrependFloat32(self.boundaries[i])
+                boundaries = builder.EndVector()
+        BucketizeOptionsStart(builder)
+        if self.boundaries is not None:
+            BucketizeOptionsAddBoundaries(builder, boundaries)
+        bucketizeOptions = BucketizeOptionsEnd(builder)
+        return bucketizeOptions
+
+
+class GeluOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = GeluOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsGeluOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def GeluOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # GeluOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # GeluOptions
+    def Approximate(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+        return False
+
+def GeluOptionsStart(builder):
+    builder.StartObject(1)
+
+def GeluOptionsAddApproximate(builder, approximate):
+    builder.PrependBoolSlot(0, approximate, 0)
+
+def GeluOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class GeluOptionsT(object):
+
+    # GeluOptionsT
+    def __init__(self):
+        self.approximate = False  # type: bool
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        geluOptions = GeluOptions()
+        geluOptions.Init(buf, pos)
+        return cls.InitFromObj(geluOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, geluOptions):
+        x = GeluOptionsT()
+        x._UnPack(geluOptions)
+        return x
+
+    # GeluOptionsT
+    def _UnPack(self, geluOptions):
+        if geluOptions is None:
+            return
+        self.approximate = geluOptions.Approximate()
+
+    # GeluOptionsT
+    def Pack(self, builder):
+        GeluOptionsStart(builder)
+        GeluOptionsAddApproximate(builder, self.approximate)
+        geluOptions = GeluOptionsEnd(builder)
+        return geluOptions
+
+
+class DynamicUpdateSliceOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = DynamicUpdateSliceOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsDynamicUpdateSliceOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def DynamicUpdateSliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # DynamicUpdateSliceOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def DynamicUpdateSliceOptionsStart(builder):
+    builder.StartObject(0)
+
+def DynamicUpdateSliceOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class DynamicUpdateSliceOptionsT(object):
+
+    # DynamicUpdateSliceOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        dynamicUpdateSliceOptions = DynamicUpdateSliceOptions()
+        dynamicUpdateSliceOptions.Init(buf, pos)
+        return cls.InitFromObj(dynamicUpdateSliceOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, dynamicUpdateSliceOptions):
+        x = DynamicUpdateSliceOptionsT()
+        x._UnPack(dynamicUpdateSliceOptions)
+        return x
+
+    # DynamicUpdateSliceOptionsT
+    def _UnPack(self, dynamicUpdateSliceOptions):
+        if dynamicUpdateSliceOptions is None:
+            return
+
+    # DynamicUpdateSliceOptionsT
+    def Pack(self, builder):
+        DynamicUpdateSliceOptionsStart(builder)
+        dynamicUpdateSliceOptions = DynamicUpdateSliceOptionsEnd(builder)
+        return dynamicUpdateSliceOptions
+
+
+class UnsortedSegmentProdOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = UnsortedSegmentProdOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsUnsortedSegmentProdOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def UnsortedSegmentProdOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # UnsortedSegmentProdOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def UnsortedSegmentProdOptionsStart(builder):
+    builder.StartObject(0)
+
+def UnsortedSegmentProdOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class UnsortedSegmentProdOptionsT(object):
+
+    # UnsortedSegmentProdOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        unsortedSegmentProdOptions = UnsortedSegmentProdOptions()
+        unsortedSegmentProdOptions.Init(buf, pos)
+        return cls.InitFromObj(unsortedSegmentProdOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, unsortedSegmentProdOptions):
+        x = UnsortedSegmentProdOptionsT()
+        x._UnPack(unsortedSegmentProdOptions)
+        return x
+
+    # UnsortedSegmentProdOptionsT
+    def _UnPack(self, unsortedSegmentProdOptions):
+        if unsortedSegmentProdOptions is None:
+            return
+
+    # UnsortedSegmentProdOptionsT
+    def Pack(self, builder):
+        UnsortedSegmentProdOptionsStart(builder)
+        unsortedSegmentProdOptions = UnsortedSegmentProdOptionsEnd(builder)
+        return unsortedSegmentProdOptions
+
+
+class UnsortedSegmentMaxOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = UnsortedSegmentMaxOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsUnsortedSegmentMaxOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def UnsortedSegmentMaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # UnsortedSegmentMaxOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def UnsortedSegmentMaxOptionsStart(builder):
+    builder.StartObject(0)
+
+def UnsortedSegmentMaxOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class UnsortedSegmentMaxOptionsT(object):
+
+    # UnsortedSegmentMaxOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        unsortedSegmentMaxOptions = UnsortedSegmentMaxOptions()
+        unsortedSegmentMaxOptions.Init(buf, pos)
+        return cls.InitFromObj(unsortedSegmentMaxOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, unsortedSegmentMaxOptions):
+        x = UnsortedSegmentMaxOptionsT()
+        x._UnPack(unsortedSegmentMaxOptions)
+        return x
+
+    # UnsortedSegmentMaxOptionsT
+    def _UnPack(self, unsortedSegmentMaxOptions):
+        if unsortedSegmentMaxOptions is None:
+            return
+
+    # UnsortedSegmentMaxOptionsT
+    def Pack(self, builder):
+        UnsortedSegmentMaxOptionsStart(builder)
+        unsortedSegmentMaxOptions = UnsortedSegmentMaxOptionsEnd(builder)
+        return unsortedSegmentMaxOptions
+
+
+class UnsortedSegmentSumOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = UnsortedSegmentSumOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsUnsortedSegmentSumOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def UnsortedSegmentSumOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # UnsortedSegmentSumOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def UnsortedSegmentSumOptionsStart(builder):
+    builder.StartObject(0)
+
+def UnsortedSegmentSumOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class UnsortedSegmentSumOptionsT(object):
+
+    # UnsortedSegmentSumOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        unsortedSegmentSumOptions = UnsortedSegmentSumOptions()
+        unsortedSegmentSumOptions.Init(buf, pos)
+        return cls.InitFromObj(unsortedSegmentSumOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, unsortedSegmentSumOptions):
+        x = UnsortedSegmentSumOptionsT()
+        x._UnPack(unsortedSegmentSumOptions)
+        return x
+
+    # UnsortedSegmentSumOptionsT
+    def _UnPack(self, unsortedSegmentSumOptions):
+        if unsortedSegmentSumOptions is None:
+            return
+
+    # UnsortedSegmentSumOptionsT
+    def Pack(self, builder):
+        UnsortedSegmentSumOptionsStart(builder)
+        unsortedSegmentSumOptions = UnsortedSegmentSumOptionsEnd(builder)
+        return unsortedSegmentSumOptions
+
+
+class ATan2Options(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = ATan2Options()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsATan2Options(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def ATan2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # ATan2Options
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def ATan2OptionsStart(builder):
+    builder.StartObject(0)
+
+def ATan2OptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class ATan2OptionsT(object):
+
+    # ATan2OptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        atan2Options = ATan2Options()
+        atan2Options.Init(buf, pos)
+        return cls.InitFromObj(atan2Options)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, atan2Options):
+        x = ATan2OptionsT()
+        x._UnPack(atan2Options)
+        return x
+
+    # ATan2OptionsT
+    def _UnPack(self, atan2Options):
+        if atan2Options is None:
+            return
+
+    # ATan2OptionsT
+    def Pack(self, builder):
+        ATan2OptionsStart(builder)
+        atan2Options = ATan2OptionsEnd(builder)
+        return atan2Options
+
+
+class UnsortedSegmentMinOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = UnsortedSegmentMinOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsUnsortedSegmentMinOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def UnsortedSegmentMinOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # UnsortedSegmentMinOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def UnsortedSegmentMinOptionsStart(builder):
+    builder.StartObject(0)
+
+def UnsortedSegmentMinOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class UnsortedSegmentMinOptionsT(object):
+
+    # UnsortedSegmentMinOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        unsortedSegmentMinOptions = UnsortedSegmentMinOptions()
+        unsortedSegmentMinOptions.Init(buf, pos)
+        return cls.InitFromObj(unsortedSegmentMinOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, unsortedSegmentMinOptions):
+        x = UnsortedSegmentMinOptionsT()
+        x._UnPack(unsortedSegmentMinOptions)
+        return x
+
+    # UnsortedSegmentMinOptionsT
+    def _UnPack(self, unsortedSegmentMinOptions):
+        if unsortedSegmentMinOptions is None:
+            return
+
+    # UnsortedSegmentMinOptionsT
+    def Pack(self, builder):
+        UnsortedSegmentMinOptionsStart(builder)
+        unsortedSegmentMinOptions = UnsortedSegmentMinOptionsEnd(builder)
+        return unsortedSegmentMinOptions
+
+
+class SignOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = SignOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsSignOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def SignOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # SignOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def SignOptionsStart(builder):
+    builder.StartObject(0)
+
+def SignOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class SignOptionsT(object):
+
+    # SignOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        signOptions = SignOptions()
+        signOptions.Init(buf, pos)
+        return cls.InitFromObj(signOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, signOptions):
+        x = SignOptionsT()
+        x._UnPack(signOptions)
+        return x
+
+    # SignOptionsT
+    def _UnPack(self, signOptions):
+        if signOptions is None:
+            return
+
+    # SignOptionsT
+    def Pack(self, builder):
+        SignOptionsStart(builder)
+        signOptions = SignOptionsEnd(builder)
+        return signOptions
+
+
+class BitcastOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = BitcastOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsBitcastOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def BitcastOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # BitcastOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def BitcastOptionsStart(builder):
+    builder.StartObject(0)
+
+def BitcastOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class BitcastOptionsT(object):
+
+    # BitcastOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        bitcastOptions = BitcastOptions()
+        bitcastOptions.Init(buf, pos)
+        return cls.InitFromObj(bitcastOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, bitcastOptions):
+        x = BitcastOptionsT()
+        x._UnPack(bitcastOptions)
+        return x
+
+    # BitcastOptionsT
+    def _UnPack(self, bitcastOptions):
+        if bitcastOptions is None:
+            return
+
+    # BitcastOptionsT
+    def Pack(self, builder):
+        BitcastOptionsStart(builder)
+        bitcastOptions = BitcastOptionsEnd(builder)
+        return bitcastOptions
+
+
+class BitwiseXorOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = BitwiseXorOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsBitwiseXorOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def BitwiseXorOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # BitwiseXorOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def BitwiseXorOptionsStart(builder):
+    builder.StartObject(0)
+
+def BitwiseXorOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class BitwiseXorOptionsT(object):
+
+    # BitwiseXorOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        bitwiseXorOptions = BitwiseXorOptions()
+        bitwiseXorOptions.Init(buf, pos)
+        return cls.InitFromObj(bitwiseXorOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, bitwiseXorOptions):
+        x = BitwiseXorOptionsT()
+        x._UnPack(bitwiseXorOptions)
+        return x
+
+    # BitwiseXorOptionsT
+    def _UnPack(self, bitwiseXorOptions):
+        if bitwiseXorOptions is None:
+            return
+
+    # BitwiseXorOptionsT
+    def Pack(self, builder):
+        BitwiseXorOptionsStart(builder)
+        bitwiseXorOptions = BitwiseXorOptionsEnd(builder)
+        return bitwiseXorOptions
+
+
+class RightShiftOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = RightShiftOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsRightShiftOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def RightShiftOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # RightShiftOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def RightShiftOptionsStart(builder):
+    builder.StartObject(0)
+
+def RightShiftOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class RightShiftOptionsT(object):
+
+    # RightShiftOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        rightShiftOptions = RightShiftOptions()
+        rightShiftOptions.Init(buf, pos)
+        return cls.InitFromObj(rightShiftOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, rightShiftOptions):
+        x = RightShiftOptionsT()
+        x._UnPack(rightShiftOptions)
+        return x
+
+    # RightShiftOptionsT
+    def _UnPack(self, rightShiftOptions):
+        if rightShiftOptions is None:
+            return
+
+    # RightShiftOptionsT
+    def Pack(self, builder):
+        RightShiftOptionsStart(builder)
+        rightShiftOptions = RightShiftOptionsEnd(builder)
+        return rightShiftOptions
+
+
+class DilateOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = DilateOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsDilateOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def DilateOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # DilateOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+def DilateOptionsStart(builder):
+    builder.StartObject(0)
+
+def DilateOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class DilateOptionsT(object):
+
+    # DilateOptionsT
+    def __init__(self):
+        pass
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        dilateOptions = DilateOptions()
+        dilateOptions.Init(buf, pos)
+        return cls.InitFromObj(dilateOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, dilateOptions):
+        x = DilateOptionsT()
+        x._UnPack(dilateOptions)
+        return x
+
+    # DilateOptionsT
+    def _UnPack(self, dilateOptions):
+        if dilateOptions is None:
+            return
+
+    # DilateOptionsT
+    def Pack(self, builder):
+        DilateOptionsStart(builder)
+        dilateOptions = DilateOptionsEnd(builder)
+        return dilateOptions
+
+
+class ReduceWindowOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = ReduceWindowOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsReduceWindowOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def ReduceWindowOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # ReduceWindowOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # ReduceWindowOptions
+    def ReduceFunction(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+def ReduceWindowOptionsStart(builder):
+    builder.StartObject(1)
+
+def ReduceWindowOptionsAddReduceFunction(builder, reduceFunction):
+    builder.PrependInt32Slot(0, reduceFunction, 0)
+
+def ReduceWindowOptionsEnd(builder):
+    return builder.EndObject()
+
+
+
+class ReduceWindowOptionsT(object):
+
+    # ReduceWindowOptionsT
+    def __init__(self):
+        self.reduceFunction = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        reduceWindowOptions = ReduceWindowOptions()
+        reduceWindowOptions.Init(buf, pos)
+        return cls.InitFromObj(reduceWindowOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, reduceWindowOptions):
+        x = ReduceWindowOptionsT()
+        x._UnPack(reduceWindowOptions)
+        return x
+
+    # ReduceWindowOptionsT
+    def _UnPack(self, reduceWindowOptions):
+        if reduceWindowOptions is None:
+            return
+        self.reduceFunction = reduceWindowOptions.ReduceFunction()
+
+    # ReduceWindowOptionsT
+    def Pack(self, builder):
+        ReduceWindowOptionsStart(builder)
+        ReduceWindowOptionsAddReduceFunction(builder, self.reduceFunction)
+        reduceWindowOptions = ReduceWindowOptionsEnd(builder)
+        return reduceWindowOptions
+
+
+class OperatorCode(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = OperatorCode()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsOperatorCode(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def OperatorCodeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # OperatorCode
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # OperatorCode
+    def DeprecatedBuiltinCode(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+    # OperatorCode
+    def CustomCode(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.String(o + self._tab.Pos)
+        return None
+
+    # OperatorCode
+    def Version(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 1
+
+    # OperatorCode
+    def BuiltinCode(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+def OperatorCodeStart(builder):
+    builder.StartObject(4)
+
+def OperatorCodeAddDeprecatedBuiltinCode(builder, deprecatedBuiltinCode):
+    builder.PrependInt8Slot(0, deprecatedBuiltinCode, 0)
+
+def OperatorCodeAddCustomCode(builder, customCode):
+    builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(customCode), 0)
+
+def OperatorCodeAddVersion(builder, version):
+    builder.PrependInt32Slot(2, version, 1)
+
+def OperatorCodeAddBuiltinCode(builder, builtinCode):
+    builder.PrependInt32Slot(3, builtinCode, 0)
+
+def OperatorCodeEnd(builder):
+    return builder.EndObject()
+
+
+
+class OperatorCodeT(object):
+
+    # OperatorCodeT
+    def __init__(self):
+        self.deprecatedBuiltinCode = 0  # type: int
+        self.customCode = None  # type: str
+        self.version = 1  # type: int
+        self.builtinCode = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        operatorCode = OperatorCode()
+        operatorCode.Init(buf, pos)
+        return cls.InitFromObj(operatorCode)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, operatorCode):
+        x = OperatorCodeT()
+        x._UnPack(operatorCode)
+        return x
+
+    # OperatorCodeT
+    def _UnPack(self, operatorCode):
+        if operatorCode is None:
+            return
+        self.deprecatedBuiltinCode = operatorCode.DeprecatedBuiltinCode()
+        self.customCode = operatorCode.CustomCode()
+        self.version = operatorCode.Version()
+        self.builtinCode = operatorCode.BuiltinCode()
+
+    # OperatorCodeT
+    def Pack(self, builder):
+        if self.customCode is not None:
+            customCode = builder.CreateString(self.customCode)
+        OperatorCodeStart(builder)
+        OperatorCodeAddDeprecatedBuiltinCode(builder, self.deprecatedBuiltinCode)
+        if self.customCode is not None:
+            OperatorCodeAddCustomCode(builder, customCode)
+        OperatorCodeAddVersion(builder, self.version)
+        OperatorCodeAddBuiltinCode(builder, self.builtinCode)
+        operatorCode = OperatorCodeEnd(builder)
+        return operatorCode
+
+
+class StableHLOCompositeOptions(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = StableHLOCompositeOptions()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsStableHLOCompositeOptions(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def StableHLOCompositeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # StableHLOCompositeOptions
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # StableHLOCompositeOptions
+    def Name(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.String(o + self._tab.Pos)
+        return None
+
+    # StableHLOCompositeOptions
+    def DecompositionSubgraphIndex(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+    # StableHLOCompositeOptions
+    def CompositeAttributes(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            a = self._tab.Vector(o)
+            return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
+        return 0
+
+    # StableHLOCompositeOptions
+    def CompositeAttributesAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
+        return 0
+
+    # StableHLOCompositeOptions
+    def CompositeAttributesLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # StableHLOCompositeOptions
+    def CompositeAttributesIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        return o == 0
+
+    # StableHLOCompositeOptions
+    def CompositeAttributesFormat(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+        return 0
+
+    # StableHLOCompositeOptions
+    def Version(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+        return 0
+
+def StableHLOCompositeOptionsStart(builder):
+    builder.StartObject(5)
+
+def StableHLOCompositeOptionsAddName(builder, name):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
+
+def StableHLOCompositeOptionsAddDecompositionSubgraphIndex(builder, decompositionSubgraphIndex):
+    builder.PrependInt32Slot(1, decompositionSubgraphIndex, 0)
+
+def StableHLOCompositeOptionsAddCompositeAttributes(builder, compositeAttributes):
+    builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(compositeAttributes), 0)
+
+def StableHLOCompositeOptionsStartCompositeAttributesVector(builder, numElems):
+    return builder.StartVector(1, numElems, 1)
+
+def StableHLOCompositeOptionsAddCompositeAttributesFormat(builder, compositeAttributesFormat):
+    builder.PrependInt8Slot(3, compositeAttributesFormat, 0)
+
+def StableHLOCompositeOptionsAddVersion(builder, version):
+    builder.PrependInt32Slot(4, version, 0)
+
+def StableHLOCompositeOptionsEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class StableHLOCompositeOptionsT(object):
+
+    # StableHLOCompositeOptionsT
+    def __init__(self):
+        self.name = None  # type: str
+        self.decompositionSubgraphIndex = 0  # type: int
+        self.compositeAttributes = None  # type: List[int]
+        self.compositeAttributesFormat = 0  # type: int
+        self.version = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        stableHlocompositeOptions = StableHLOCompositeOptions()
+        stableHlocompositeOptions.Init(buf, pos)
+        return cls.InitFromObj(stableHlocompositeOptions)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, stableHlocompositeOptions):
+        x = StableHLOCompositeOptionsT()
+        x._UnPack(stableHlocompositeOptions)
+        return x
+
+    # StableHLOCompositeOptionsT
+    def _UnPack(self, stableHlocompositeOptions):
+        if stableHlocompositeOptions is None:
+            return
+        self.name = stableHlocompositeOptions.Name()
+        self.decompositionSubgraphIndex = stableHlocompositeOptions.DecompositionSubgraphIndex()
+        if not stableHlocompositeOptions.CompositeAttributesIsNone():
+            if np is None:
+                self.compositeAttributes = []
+                for i in range(stableHlocompositeOptions.CompositeAttributesLength()):
+                    self.compositeAttributes.append(stableHlocompositeOptions.CompositeAttributes(i))
+            else:
+                self.compositeAttributes = stableHlocompositeOptions.CompositeAttributesAsNumpy()
+        self.compositeAttributesFormat = stableHlocompositeOptions.CompositeAttributesFormat()
+        self.version = stableHlocompositeOptions.Version()
+
+    # StableHLOCompositeOptionsT
+    def Pack(self, builder):
+        if self.name is not None:
+            name = builder.CreateString(self.name)
+        if self.compositeAttributes is not None:
+            if np is not None and type(self.compositeAttributes) is np.ndarray:
+                compositeAttributes = builder.CreateNumpyVector(self.compositeAttributes)
+            else:
+                StableHLOCompositeOptionsStartCompositeAttributesVector(builder, len(self.compositeAttributes))
+                for i in reversed(range(len(self.compositeAttributes))):
+                    builder.PrependUint8(self.compositeAttributes[i])
+                compositeAttributes = builder.EndVector()
+        StableHLOCompositeOptionsStart(builder)
+        if self.name is not None:
+            StableHLOCompositeOptionsAddName(builder, name)
+        StableHLOCompositeOptionsAddDecompositionSubgraphIndex(builder, self.decompositionSubgraphIndex)
+        if self.compositeAttributes is not None:
+            StableHLOCompositeOptionsAddCompositeAttributes(builder, compositeAttributes)
+        StableHLOCompositeOptionsAddCompositeAttributesFormat(builder, self.compositeAttributesFormat)
+        StableHLOCompositeOptionsAddVersion(builder, self.version)
+        stableHlocompositeOptions = StableHLOCompositeOptionsEnd(builder)
+        return stableHlocompositeOptions
+
 
 class Operator(object):
     __slots__ = ['_tab']
@@ -7573,60 +16900,84 @@
             return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
         return 0
 
-def OperatorStart(builder): builder.StartObject(11)
-def Start(builder):
-    return OperatorStart(builder)
-def OperatorAddOpcodeIndex(builder, opcodeIndex): builder.PrependUint32Slot(0, opcodeIndex, 0)
-def AddOpcodeIndex(builder, opcodeIndex):
-    return OperatorAddOpcodeIndex(builder, opcodeIndex)
-def OperatorAddInputs(builder, inputs): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0)
-def AddInputs(builder, inputs):
-    return OperatorAddInputs(builder, inputs)
-def OperatorStartInputsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartInputsVector(builder, numElems):
-    return OperatorStartInputsVector(builder, numElems)
-def OperatorAddOutputs(builder, outputs): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0)
-def AddOutputs(builder, outputs):
-    return OperatorAddOutputs(builder, outputs)
-def OperatorStartOutputsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartOutputsVector(builder, numElems):
-    return OperatorStartOutputsVector(builder, numElems)
-def OperatorAddBuiltinOptionsType(builder, builtinOptionsType): builder.PrependUint8Slot(3, builtinOptionsType, 0)
-def AddBuiltinOptionsType(builder, builtinOptionsType):
-    return OperatorAddBuiltinOptionsType(builder, builtinOptionsType)
-def OperatorAddBuiltinOptions(builder, builtinOptions): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(builtinOptions), 0)
-def AddBuiltinOptions(builder, builtinOptions):
-    return OperatorAddBuiltinOptions(builder, builtinOptions)
-def OperatorAddCustomOptions(builder, customOptions): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(customOptions), 0)
-def AddCustomOptions(builder, customOptions):
-    return OperatorAddCustomOptions(builder, customOptions)
-def OperatorStartCustomOptionsVector(builder, numElems): return builder.StartVector(1, numElems, 1)
-def StartCustomOptionsVector(builder, numElems):
-    return OperatorStartCustomOptionsVector(builder, numElems)
-def OperatorAddCustomOptionsFormat(builder, customOptionsFormat): builder.PrependInt8Slot(6, customOptionsFormat, 0)
-def AddCustomOptionsFormat(builder, customOptionsFormat):
-    return OperatorAddCustomOptionsFormat(builder, customOptionsFormat)
-def OperatorAddMutatingVariableInputs(builder, mutatingVariableInputs): builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(mutatingVariableInputs), 0)
-def AddMutatingVariableInputs(builder, mutatingVariableInputs):
-    return OperatorAddMutatingVariableInputs(builder, mutatingVariableInputs)
-def OperatorStartMutatingVariableInputsVector(builder, numElems): return builder.StartVector(1, numElems, 1)
-def StartMutatingVariableInputsVector(builder, numElems):
-    return OperatorStartMutatingVariableInputsVector(builder, numElems)
-def OperatorAddIntermediates(builder, intermediates): builder.PrependUOffsetTRelativeSlot(8, flatbuffers.number_types.UOffsetTFlags.py_type(intermediates), 0)
-def AddIntermediates(builder, intermediates):
-    return OperatorAddIntermediates(builder, intermediates)
-def OperatorStartIntermediatesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartIntermediatesVector(builder, numElems):
-    return OperatorStartIntermediatesVector(builder, numElems)
-def OperatorAddLargeCustomOptionsOffset(builder, largeCustomOptionsOffset): builder.PrependUint64Slot(9, largeCustomOptionsOffset, 0)
-def AddLargeCustomOptionsOffset(builder, largeCustomOptionsOffset):
-    return OperatorAddLargeCustomOptionsOffset(builder, largeCustomOptionsOffset)
-def OperatorAddLargeCustomOptionsSize(builder, largeCustomOptionsSize): builder.PrependUint64Slot(10, largeCustomOptionsSize, 0)
-def AddLargeCustomOptionsSize(builder, largeCustomOptionsSize):
-    return OperatorAddLargeCustomOptionsSize(builder, largeCustomOptionsSize)
-def OperatorEnd(builder): return builder.EndObject()
-def End(builder):
-    return OperatorEnd(builder)
+    # Operator
+    def BuiltinOptions2Type(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
+        return 0
+
+    # Operator
+    def BuiltinOptions2(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(28))
+        if o != 0:
+            from flatbuffers.table import Table
+            obj = Table(bytearray(), 0)
+            self._tab.Union(obj, o)
+            return obj
+        return None
+
+def OperatorStart(builder):
+    builder.StartObject(13)
+
+def OperatorAddOpcodeIndex(builder, opcodeIndex):
+    builder.PrependUint32Slot(0, opcodeIndex, 0)
+
+def OperatorAddInputs(builder, inputs):
+    builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0)
+
+def OperatorStartInputsVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def OperatorAddOutputs(builder, outputs):
+    builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0)
+
+def OperatorStartOutputsVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def OperatorAddBuiltinOptionsType(builder, builtinOptionsType):
+    builder.PrependUint8Slot(3, builtinOptionsType, 0)
+
+def OperatorAddBuiltinOptions(builder, builtinOptions):
+    builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(builtinOptions), 0)
+
+def OperatorAddCustomOptions(builder, customOptions):
+    builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(customOptions), 0)
+
+def OperatorStartCustomOptionsVector(builder, numElems):
+    return builder.StartVector(1, numElems, 1)
+
+def OperatorAddCustomOptionsFormat(builder, customOptionsFormat):
+    builder.PrependInt8Slot(6, customOptionsFormat, 0)
+
+def OperatorAddMutatingVariableInputs(builder, mutatingVariableInputs):
+    builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(mutatingVariableInputs), 0)
+
+def OperatorStartMutatingVariableInputsVector(builder, numElems):
+    return builder.StartVector(1, numElems, 1)
+
+def OperatorAddIntermediates(builder, intermediates):
+    builder.PrependUOffsetTRelativeSlot(8, flatbuffers.number_types.UOffsetTFlags.py_type(intermediates), 0)
+
+def OperatorStartIntermediatesVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def OperatorAddLargeCustomOptionsOffset(builder, largeCustomOptionsOffset):
+    builder.PrependUint64Slot(9, largeCustomOptionsOffset, 0)
+
+def OperatorAddLargeCustomOptionsSize(builder, largeCustomOptionsSize):
+    builder.PrependUint64Slot(10, largeCustomOptionsSize, 0)
+
+def OperatorAddBuiltinOptions2Type(builder, builtinOptions2Type):
+    builder.PrependUint8Slot(11, builtinOptions2Type, 0)
+
+def OperatorAddBuiltinOptions2(builder, builtinOptions2):
+    builder.PrependUOffsetTRelativeSlot(12, flatbuffers.number_types.UOffsetTFlags.py_type(builtinOptions2), 0)
+
+def OperatorEnd(builder):
+    return builder.EndObject()
+
+
 try:
     from typing import List, Union
 except:
@@ -7647,6 +16998,8 @@
         self.intermediates = None  # type: List[int]
         self.largeCustomOptionsOffset = 0  # type: int
         self.largeCustomOptionsSize = 0  # type: int
+        self.builtinOptions2Type = 0  # type: int
+        self.builtinOptions2 = None  # type: Union[None, StablehloConcatenateOptionsT, StablehloBroadcastInDimOptionsT, StablehloSliceOptionsT, StablehloConvolutionOptionsT, StablehloCustomCallOptionsT, StablehloReduceOptionsT, StablehloScatterOptionsT, StablehloCompareOptionsT, StablehloDynamicSliceOptionsT, StablehloPadOptionsT, StablehloIotaOptionsT, StablehloDotGeneralOptionsT, StablehloReduceWindowOptionsT, StablehloSortOptionsT, StablehloWhileOptionsT, StablehloGatherOptionsT, StablehloTransposeOptionsT, DilateOptionsT, StablehloRngBitGeneratorOptionsT, ReduceWindowOptionsT, StableHLOCompositeOptionsT]
 
     @classmethod
     def InitFromBuf(cls, buf, pos):
@@ -7655,6 +17008,11 @@
         return cls.InitFromObj(operator)
 
     @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
     def InitFromObj(cls, operator):
         x = OperatorT()
         x._UnPack(operator)
@@ -7705,6 +17063,8 @@
                 self.intermediates = operator.IntermediatesAsNumpy()
         self.largeCustomOptionsOffset = operator.LargeCustomOptionsOffset()
         self.largeCustomOptionsSize = operator.LargeCustomOptionsSize()
+        self.builtinOptions2Type = operator.BuiltinOptions2Type()
+        self.builtinOptions2 = BuiltinOptions2Creator(self.builtinOptions2Type, operator.BuiltinOptions2())
 
     # OperatorT
     def Pack(self, builder):
@@ -7750,6 +17110,8 @@
                 for i in reversed(range(len(self.intermediates))):
                     builder.PrependInt32(self.intermediates[i])
                 intermediates = builder.EndVector()
+        if self.builtinOptions2 is not None:
+            builtinOptions2 = self.builtinOptions2.Pack(builder)
         OperatorStart(builder)
         OperatorAddOpcodeIndex(builder, self.opcodeIndex)
         if self.inputs is not None:
@@ -7768,4009 +17130,12 @@
             OperatorAddIntermediates(builder, intermediates)
         OperatorAddLargeCustomOptionsOffset(builder, self.largeCustomOptionsOffset)
         OperatorAddLargeCustomOptionsSize(builder, self.largeCustomOptionsSize)
+        OperatorAddBuiltinOptions2Type(builder, self.builtinOptions2Type)
+        if self.builtinOptions2 is not None:
+            OperatorAddBuiltinOptions2(builder, builtinOptions2)
         operator = OperatorEnd(builder)
         return operator
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class OperatorCode(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = OperatorCode()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsOperatorCode(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def OperatorCodeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # OperatorCode
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # OperatorCode
-    def DeprecatedBuiltinCode(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-    # OperatorCode
-    def CustomCode(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.String(o + self._tab.Pos)
-        return None
-
-    # OperatorCode
-    def Version(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 1
-
-    # OperatorCode
-    def BuiltinCode(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-def OperatorCodeStart(builder): builder.StartObject(4)
-def Start(builder):
-    return OperatorCodeStart(builder)
-def OperatorCodeAddDeprecatedBuiltinCode(builder, deprecatedBuiltinCode): builder.PrependInt8Slot(0, deprecatedBuiltinCode, 0)
-def AddDeprecatedBuiltinCode(builder, deprecatedBuiltinCode):
-    return OperatorCodeAddDeprecatedBuiltinCode(builder, deprecatedBuiltinCode)
-def OperatorCodeAddCustomCode(builder, customCode): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(customCode), 0)
-def AddCustomCode(builder, customCode):
-    return OperatorCodeAddCustomCode(builder, customCode)
-def OperatorCodeAddVersion(builder, version): builder.PrependInt32Slot(2, version, 1)
-def AddVersion(builder, version):
-    return OperatorCodeAddVersion(builder, version)
-def OperatorCodeAddBuiltinCode(builder, builtinCode): builder.PrependInt32Slot(3, builtinCode, 0)
-def AddBuiltinCode(builder, builtinCode):
-    return OperatorCodeAddBuiltinCode(builder, builtinCode)
-def OperatorCodeEnd(builder): return builder.EndObject()
-def End(builder):
-    return OperatorCodeEnd(builder)
-
-class OperatorCodeT(object):
-
-    # OperatorCodeT
-    def __init__(self):
-        self.deprecatedBuiltinCode = 0  # type: int
-        self.customCode = None  # type: str
-        self.version = 1  # type: int
-        self.builtinCode = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        operatorCode = OperatorCode()
-        operatorCode.Init(buf, pos)
-        return cls.InitFromObj(operatorCode)
-
-    @classmethod
-    def InitFromObj(cls, operatorCode):
-        x = OperatorCodeT()
-        x._UnPack(operatorCode)
-        return x
-
-    # OperatorCodeT
-    def _UnPack(self, operatorCode):
-        if operatorCode is None:
-            return
-        self.deprecatedBuiltinCode = operatorCode.DeprecatedBuiltinCode()
-        self.customCode = operatorCode.CustomCode()
-        self.version = operatorCode.Version()
-        self.builtinCode = operatorCode.BuiltinCode()
-
-    # OperatorCodeT
-    def Pack(self, builder):
-        if self.customCode is not None:
-            customCode = builder.CreateString(self.customCode)
-        OperatorCodeStart(builder)
-        OperatorCodeAddDeprecatedBuiltinCode(builder, self.deprecatedBuiltinCode)
-        if self.customCode is not None:
-            OperatorCodeAddCustomCode(builder, customCode)
-        OperatorCodeAddVersion(builder, self.version)
-        OperatorCodeAddBuiltinCode(builder, self.builtinCode)
-        operatorCode = OperatorCodeEnd(builder)
-        return operatorCode
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class PackOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = PackOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsPackOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def PackOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # PackOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # PackOptions
-    def ValuesCount(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # PackOptions
-    def Axis(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-def PackOptionsStart(builder): builder.StartObject(2)
-def Start(builder):
-    return PackOptionsStart(builder)
-def PackOptionsAddValuesCount(builder, valuesCount): builder.PrependInt32Slot(0, valuesCount, 0)
-def AddValuesCount(builder, valuesCount):
-    return PackOptionsAddValuesCount(builder, valuesCount)
-def PackOptionsAddAxis(builder, axis): builder.PrependInt32Slot(1, axis, 0)
-def AddAxis(builder, axis):
-    return PackOptionsAddAxis(builder, axis)
-def PackOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return PackOptionsEnd(builder)
-
-class PackOptionsT(object):
-
-    # PackOptionsT
-    def __init__(self):
-        self.valuesCount = 0  # type: int
-        self.axis = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        packOptions = PackOptions()
-        packOptions.Init(buf, pos)
-        return cls.InitFromObj(packOptions)
-
-    @classmethod
-    def InitFromObj(cls, packOptions):
-        x = PackOptionsT()
-        x._UnPack(packOptions)
-        return x
-
-    # PackOptionsT
-    def _UnPack(self, packOptions):
-        if packOptions is None:
-            return
-        self.valuesCount = packOptions.ValuesCount()
-        self.axis = packOptions.Axis()
-
-    # PackOptionsT
-    def Pack(self, builder):
-        PackOptionsStart(builder)
-        PackOptionsAddValuesCount(builder, self.valuesCount)
-        PackOptionsAddAxis(builder, self.axis)
-        packOptions = PackOptionsEnd(builder)
-        return packOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class PadOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = PadOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsPadOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def PadOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # PadOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def PadOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return PadOptionsStart(builder)
-def PadOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return PadOptionsEnd(builder)
-
-class PadOptionsT(object):
-
-    # PadOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        padOptions = PadOptions()
-        padOptions.Init(buf, pos)
-        return cls.InitFromObj(padOptions)
-
-    @classmethod
-    def InitFromObj(cls, padOptions):
-        x = PadOptionsT()
-        x._UnPack(padOptions)
-        return x
-
-    # PadOptionsT
-    def _UnPack(self, padOptions):
-        if padOptions is None:
-            return
-
-    # PadOptionsT
-    def Pack(self, builder):
-        PadOptionsStart(builder)
-        padOptions = PadOptionsEnd(builder)
-        return padOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class PadV2Options(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = PadV2Options()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsPadV2Options(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def PadV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # PadV2Options
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def PadV2OptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return PadV2OptionsStart(builder)
-def PadV2OptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return PadV2OptionsEnd(builder)
-
-class PadV2OptionsT(object):
-
-    # PadV2OptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        padV2options = PadV2Options()
-        padV2options.Init(buf, pos)
-        return cls.InitFromObj(padV2options)
-
-    @classmethod
-    def InitFromObj(cls, padV2options):
-        x = PadV2OptionsT()
-        x._UnPack(padV2options)
-        return x
-
-    # PadV2OptionsT
-    def _UnPack(self, padV2options):
-        if padV2options is None:
-            return
-
-    # PadV2OptionsT
-    def Pack(self, builder):
-        PadV2OptionsStart(builder)
-        padV2options = PadV2OptionsEnd(builder)
-        return padV2options
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-class Padding(object):
-    SAME = 0
-    VALID = 1
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class Pool2DOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = Pool2DOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsPool2DOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def Pool2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # Pool2DOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # Pool2DOptions
-    def Padding(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-    # Pool2DOptions
-    def StrideW(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # Pool2DOptions
-    def StrideH(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # Pool2DOptions
-    def FilterWidth(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # Pool2DOptions
-    def FilterHeight(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # Pool2DOptions
-    def FusedActivationFunction(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-def Pool2DOptionsStart(builder): builder.StartObject(6)
-def Start(builder):
-    return Pool2DOptionsStart(builder)
-def Pool2DOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0)
-def AddPadding(builder, padding):
-    return Pool2DOptionsAddPadding(builder, padding)
-def Pool2DOptionsAddStrideW(builder, strideW): builder.PrependInt32Slot(1, strideW, 0)
-def AddStrideW(builder, strideW):
-    return Pool2DOptionsAddStrideW(builder, strideW)
-def Pool2DOptionsAddStrideH(builder, strideH): builder.PrependInt32Slot(2, strideH, 0)
-def AddStrideH(builder, strideH):
-    return Pool2DOptionsAddStrideH(builder, strideH)
-def Pool2DOptionsAddFilterWidth(builder, filterWidth): builder.PrependInt32Slot(3, filterWidth, 0)
-def AddFilterWidth(builder, filterWidth):
-    return Pool2DOptionsAddFilterWidth(builder, filterWidth)
-def Pool2DOptionsAddFilterHeight(builder, filterHeight): builder.PrependInt32Slot(4, filterHeight, 0)
-def AddFilterHeight(builder, filterHeight):
-    return Pool2DOptionsAddFilterHeight(builder, filterHeight)
-def Pool2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(5, fusedActivationFunction, 0)
-def AddFusedActivationFunction(builder, fusedActivationFunction):
-    return Pool2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction)
-def Pool2DOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return Pool2DOptionsEnd(builder)
-
-class Pool2DOptionsT(object):
-
-    # Pool2DOptionsT
-    def __init__(self):
-        self.padding = 0  # type: int
-        self.strideW = 0  # type: int
-        self.strideH = 0  # type: int
-        self.filterWidth = 0  # type: int
-        self.filterHeight = 0  # type: int
-        self.fusedActivationFunction = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        pool2doptions = Pool2DOptions()
-        pool2doptions.Init(buf, pos)
-        return cls.InitFromObj(pool2doptions)
-
-    @classmethod
-    def InitFromObj(cls, pool2doptions):
-        x = Pool2DOptionsT()
-        x._UnPack(pool2doptions)
-        return x
-
-    # Pool2DOptionsT
-    def _UnPack(self, pool2doptions):
-        if pool2doptions is None:
-            return
-        self.padding = pool2doptions.Padding()
-        self.strideW = pool2doptions.StrideW()
-        self.strideH = pool2doptions.StrideH()
-        self.filterWidth = pool2doptions.FilterWidth()
-        self.filterHeight = pool2doptions.FilterHeight()
-        self.fusedActivationFunction = pool2doptions.FusedActivationFunction()
-
-    # Pool2DOptionsT
-    def Pack(self, builder):
-        Pool2DOptionsStart(builder)
-        Pool2DOptionsAddPadding(builder, self.padding)
-        Pool2DOptionsAddStrideW(builder, self.strideW)
-        Pool2DOptionsAddStrideH(builder, self.strideH)
-        Pool2DOptionsAddFilterWidth(builder, self.filterWidth)
-        Pool2DOptionsAddFilterHeight(builder, self.filterHeight)
-        Pool2DOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
-        pool2doptions = Pool2DOptionsEnd(builder)
-        return pool2doptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class PowOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = PowOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsPowOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def PowOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # PowOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def PowOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return PowOptionsStart(builder)
-def PowOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return PowOptionsEnd(builder)
-
-class PowOptionsT(object):
-
-    # PowOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        powOptions = PowOptions()
-        powOptions.Init(buf, pos)
-        return cls.InitFromObj(powOptions)
-
-    @classmethod
-    def InitFromObj(cls, powOptions):
-        x = PowOptionsT()
-        x._UnPack(powOptions)
-        return x
-
-    # PowOptionsT
-    def _UnPack(self, powOptions):
-        if powOptions is None:
-            return
-
-    # PowOptionsT
-    def Pack(self, builder):
-        PowOptionsStart(builder)
-        powOptions = PowOptionsEnd(builder)
-        return powOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-class QuantizationDetails(object):
-    NONE = 0
-    CustomQuantization = 1
-
-def QuantizationDetailsCreator(unionType, table):
-    from flatbuffers.table import Table
-    if not isinstance(table, Table):
-        return None
-    if unionType == QuantizationDetails().CustomQuantization:
-        return CustomQuantizationT.InitFromBuf(table.Bytes, table.Pos)
-    return None
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class QuantizationParameters(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = QuantizationParameters()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsQuantizationParameters(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def QuantizationParametersBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # QuantizationParameters
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # QuantizationParameters
-    def Min(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            a = self._tab.Vector(o)
-            return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
-        return 0
-
-    # QuantizationParameters
-    def MinAsNumpy(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
-        return 0
-
-    # QuantizationParameters
-    def MinLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
-
-    # QuantizationParameters
-    def MinIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        return o == 0
-
-    # QuantizationParameters
-    def Max(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            a = self._tab.Vector(o)
-            return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
-        return 0
-
-    # QuantizationParameters
-    def MaxAsNumpy(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
-        return 0
-
-    # QuantizationParameters
-    def MaxLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
-
-    # QuantizationParameters
-    def MaxIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        return o == 0
-
-    # QuantizationParameters
-    def Scale(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            a = self._tab.Vector(o)
-            return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
-        return 0
-
-    # QuantizationParameters
-    def ScaleAsNumpy(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
-        return 0
-
-    # QuantizationParameters
-    def ScaleLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
-
-    # QuantizationParameters
-    def ScaleIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        return o == 0
-
-    # QuantizationParameters
-    def ZeroPoint(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
-        if o != 0:
-            a = self._tab.Vector(o)
-            return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
-        return 0
-
-    # QuantizationParameters
-    def ZeroPointAsNumpy(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
-        if o != 0:
-            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
-        return 0
-
-    # QuantizationParameters
-    def ZeroPointLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
-
-    # QuantizationParameters
-    def ZeroPointIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
-        return o == 0
-
-    # QuantizationParameters
-    def DetailsType(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
-        return 0
-
-    # QuantizationParameters
-    def Details(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
-        if o != 0:
-            from flatbuffers.table import Table
-            obj = Table(bytearray(), 0)
-            self._tab.Union(obj, o)
-            return obj
-        return None
-
-    # QuantizationParameters
-    def QuantizedDimension(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-def QuantizationParametersStart(builder): builder.StartObject(7)
-def Start(builder):
-    return QuantizationParametersStart(builder)
-def QuantizationParametersAddMin(builder, min): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(min), 0)
-def AddMin(builder, min):
-    return QuantizationParametersAddMin(builder, min)
-def QuantizationParametersStartMinVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartMinVector(builder, numElems):
-    return QuantizationParametersStartMinVector(builder, numElems)
-def QuantizationParametersAddMax(builder, max): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(max), 0)
-def AddMax(builder, max):
-    return QuantizationParametersAddMax(builder, max)
-def QuantizationParametersStartMaxVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartMaxVector(builder, numElems):
-    return QuantizationParametersStartMaxVector(builder, numElems)
-def QuantizationParametersAddScale(builder, scale): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(scale), 0)
-def AddScale(builder, scale):
-    return QuantizationParametersAddScale(builder, scale)
-def QuantizationParametersStartScaleVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartScaleVector(builder, numElems):
-    return QuantizationParametersStartScaleVector(builder, numElems)
-def QuantizationParametersAddZeroPoint(builder, zeroPoint): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(zeroPoint), 0)
-def AddZeroPoint(builder, zeroPoint):
-    return QuantizationParametersAddZeroPoint(builder, zeroPoint)
-def QuantizationParametersStartZeroPointVector(builder, numElems): return builder.StartVector(8, numElems, 8)
-def StartZeroPointVector(builder, numElems):
-    return QuantizationParametersStartZeroPointVector(builder, numElems)
-def QuantizationParametersAddDetailsType(builder, detailsType): builder.PrependUint8Slot(4, detailsType, 0)
-def AddDetailsType(builder, detailsType):
-    return QuantizationParametersAddDetailsType(builder, detailsType)
-def QuantizationParametersAddDetails(builder, details): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(details), 0)
-def AddDetails(builder, details):
-    return QuantizationParametersAddDetails(builder, details)
-def QuantizationParametersAddQuantizedDimension(builder, quantizedDimension): builder.PrependInt32Slot(6, quantizedDimension, 0)
-def AddQuantizedDimension(builder, quantizedDimension):
-    return QuantizationParametersAddQuantizedDimension(builder, quantizedDimension)
-def QuantizationParametersEnd(builder): return builder.EndObject()
-def End(builder):
-    return QuantizationParametersEnd(builder)
-try:
-    from typing import List, Union
-except:
-    pass
-
-class QuantizationParametersT(object):
-
-    # QuantizationParametersT
-    def __init__(self):
-        self.min = None  # type: List[float]
-        self.max = None  # type: List[float]
-        self.scale = None  # type: List[float]
-        self.zeroPoint = None  # type: List[int]
-        self.detailsType = 0  # type: int
-        self.details = None  # type: Union[None, CustomQuantizationT]
-        self.quantizedDimension = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        quantizationParameters = QuantizationParameters()
-        quantizationParameters.Init(buf, pos)
-        return cls.InitFromObj(quantizationParameters)
-
-    @classmethod
-    def InitFromObj(cls, quantizationParameters):
-        x = QuantizationParametersT()
-        x._UnPack(quantizationParameters)
-        return x
-
-    # QuantizationParametersT
-    def _UnPack(self, quantizationParameters):
-        if quantizationParameters is None:
-            return
-        if not quantizationParameters.MinIsNone():
-            if np is None:
-                self.min = []
-                for i in range(quantizationParameters.MinLength()):
-                    self.min.append(quantizationParameters.Min(i))
-            else:
-                self.min = quantizationParameters.MinAsNumpy()
-        if not quantizationParameters.MaxIsNone():
-            if np is None:
-                self.max = []
-                for i in range(quantizationParameters.MaxLength()):
-                    self.max.append(quantizationParameters.Max(i))
-            else:
-                self.max = quantizationParameters.MaxAsNumpy()
-        if not quantizationParameters.ScaleIsNone():
-            if np is None:
-                self.scale = []
-                for i in range(quantizationParameters.ScaleLength()):
-                    self.scale.append(quantizationParameters.Scale(i))
-            else:
-                self.scale = quantizationParameters.ScaleAsNumpy()
-        if not quantizationParameters.ZeroPointIsNone():
-            if np is None:
-                self.zeroPoint = []
-                for i in range(quantizationParameters.ZeroPointLength()):
-                    self.zeroPoint.append(quantizationParameters.ZeroPoint(i))
-            else:
-                self.zeroPoint = quantizationParameters.ZeroPointAsNumpy()
-        self.detailsType = quantizationParameters.DetailsType()
-        self.details = QuantizationDetailsCreator(self.detailsType, quantizationParameters.Details())
-        self.quantizedDimension = quantizationParameters.QuantizedDimension()
-
-    # QuantizationParametersT
-    def Pack(self, builder):
-        if self.min is not None:
-            if np is not None and type(self.min) is np.ndarray:
-                min = builder.CreateNumpyVector(self.min)
-            else:
-                QuantizationParametersStartMinVector(builder, len(self.min))
-                for i in reversed(range(len(self.min))):
-                    builder.PrependFloat32(self.min[i])
-                min = builder.EndVector()
-        if self.max is not None:
-            if np is not None and type(self.max) is np.ndarray:
-                max = builder.CreateNumpyVector(self.max)
-            else:
-                QuantizationParametersStartMaxVector(builder, len(self.max))
-                for i in reversed(range(len(self.max))):
-                    builder.PrependFloat32(self.max[i])
-                max = builder.EndVector()
-        if self.scale is not None:
-            if np is not None and type(self.scale) is np.ndarray:
-                scale = builder.CreateNumpyVector(self.scale)
-            else:
-                QuantizationParametersStartScaleVector(builder, len(self.scale))
-                for i in reversed(range(len(self.scale))):
-                    builder.PrependFloat32(self.scale[i])
-                scale = builder.EndVector()
-        if self.zeroPoint is not None:
-            if np is not None and type(self.zeroPoint) is np.ndarray:
-                zeroPoint = builder.CreateNumpyVector(self.zeroPoint)
-            else:
-                QuantizationParametersStartZeroPointVector(builder, len(self.zeroPoint))
-                for i in reversed(range(len(self.zeroPoint))):
-                    builder.PrependInt64(self.zeroPoint[i])
-                zeroPoint = builder.EndVector()
-        if self.details is not None:
-            details = self.details.Pack(builder)
-        QuantizationParametersStart(builder)
-        if self.min is not None:
-            QuantizationParametersAddMin(builder, min)
-        if self.max is not None:
-            QuantizationParametersAddMax(builder, max)
-        if self.scale is not None:
-            QuantizationParametersAddScale(builder, scale)
-        if self.zeroPoint is not None:
-            QuantizationParametersAddZeroPoint(builder, zeroPoint)
-        QuantizationParametersAddDetailsType(builder, self.detailsType)
-        if self.details is not None:
-            QuantizationParametersAddDetails(builder, details)
-        QuantizationParametersAddQuantizedDimension(builder, self.quantizedDimension)
-        quantizationParameters = QuantizationParametersEnd(builder)
-        return quantizationParameters
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class QuantizeOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = QuantizeOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsQuantizeOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def QuantizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # QuantizeOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def QuantizeOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return QuantizeOptionsStart(builder)
-def QuantizeOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return QuantizeOptionsEnd(builder)
-
-class QuantizeOptionsT(object):
-
-    # QuantizeOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        quantizeOptions = QuantizeOptions()
-        quantizeOptions.Init(buf, pos)
-        return cls.InitFromObj(quantizeOptions)
-
-    @classmethod
-    def InitFromObj(cls, quantizeOptions):
-        x = QuantizeOptionsT()
-        x._UnPack(quantizeOptions)
-        return x
-
-    # QuantizeOptionsT
-    def _UnPack(self, quantizeOptions):
-        if quantizeOptions is None:
-            return
-
-    # QuantizeOptionsT
-    def Pack(self, builder):
-        QuantizeOptionsStart(builder)
-        quantizeOptions = QuantizeOptionsEnd(builder)
-        return quantizeOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class RNNOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = RNNOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsRNNOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def RNNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # RNNOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # RNNOptions
-    def FusedActivationFunction(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-    # RNNOptions
-    def AsymmetricQuantizeInputs(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-def RNNOptionsStart(builder): builder.StartObject(2)
-def Start(builder):
-    return RNNOptionsStart(builder)
-def RNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0)
-def AddFusedActivationFunction(builder, fusedActivationFunction):
-    return RNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction)
-def RNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(1, asymmetricQuantizeInputs, 0)
-def AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs):
-    return RNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs)
-def RNNOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return RNNOptionsEnd(builder)
-
-class RNNOptionsT(object):
-
-    # RNNOptionsT
-    def __init__(self):
-        self.fusedActivationFunction = 0  # type: int
-        self.asymmetricQuantizeInputs = False  # type: bool
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        rnnoptions = RNNOptions()
-        rnnoptions.Init(buf, pos)
-        return cls.InitFromObj(rnnoptions)
-
-    @classmethod
-    def InitFromObj(cls, rnnoptions):
-        x = RNNOptionsT()
-        x._UnPack(rnnoptions)
-        return x
-
-    # RNNOptionsT
-    def _UnPack(self, rnnoptions):
-        if rnnoptions is None:
-            return
-        self.fusedActivationFunction = rnnoptions.FusedActivationFunction()
-        self.asymmetricQuantizeInputs = rnnoptions.AsymmetricQuantizeInputs()
-
-    # RNNOptionsT
-    def Pack(self, builder):
-        RNNOptionsStart(builder)
-        RNNOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
-        RNNOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs)
-        rnnoptions = RNNOptionsEnd(builder)
-        return rnnoptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class RandomOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = RandomOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsRandomOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def RandomOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # RandomOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # RandomOptions
-    def Seed(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
-        return 0
-
-    # RandomOptions
-    def Seed2(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
-        return 0
-
-def RandomOptionsStart(builder): builder.StartObject(2)
-def Start(builder):
-    return RandomOptionsStart(builder)
-def RandomOptionsAddSeed(builder, seed): builder.PrependInt64Slot(0, seed, 0)
-def AddSeed(builder, seed):
-    return RandomOptionsAddSeed(builder, seed)
-def RandomOptionsAddSeed2(builder, seed2): builder.PrependInt64Slot(1, seed2, 0)
-def AddSeed2(builder, seed2):
-    return RandomOptionsAddSeed2(builder, seed2)
-def RandomOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return RandomOptionsEnd(builder)
-
-class RandomOptionsT(object):
-
-    # RandomOptionsT
-    def __init__(self):
-        self.seed = 0  # type: int
-        self.seed2 = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        randomOptions = RandomOptions()
-        randomOptions.Init(buf, pos)
-        return cls.InitFromObj(randomOptions)
-
-    @classmethod
-    def InitFromObj(cls, randomOptions):
-        x = RandomOptionsT()
-        x._UnPack(randomOptions)
-        return x
-
-    # RandomOptionsT
-    def _UnPack(self, randomOptions):
-        if randomOptions is None:
-            return
-        self.seed = randomOptions.Seed()
-        self.seed2 = randomOptions.Seed2()
-
-    # RandomOptionsT
-    def Pack(self, builder):
-        RandomOptionsStart(builder)
-        RandomOptionsAddSeed(builder, self.seed)
-        RandomOptionsAddSeed2(builder, self.seed2)
-        randomOptions = RandomOptionsEnd(builder)
-        return randomOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class RangeOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = RangeOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsRangeOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def RangeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # RangeOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def RangeOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return RangeOptionsStart(builder)
-def RangeOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return RangeOptionsEnd(builder)
-
-class RangeOptionsT(object):
-
-    # RangeOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        rangeOptions = RangeOptions()
-        rangeOptions.Init(buf, pos)
-        return cls.InitFromObj(rangeOptions)
-
-    @classmethod
-    def InitFromObj(cls, rangeOptions):
-        x = RangeOptionsT()
-        x._UnPack(rangeOptions)
-        return x
-
-    # RangeOptionsT
-    def _UnPack(self, rangeOptions):
-        if rangeOptions is None:
-            return
-
-    # RangeOptionsT
-    def Pack(self, builder):
-        RangeOptionsStart(builder)
-        rangeOptions = RangeOptionsEnd(builder)
-        return rangeOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class RankOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = RankOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsRankOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def RankOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # RankOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def RankOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return RankOptionsStart(builder)
-def RankOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return RankOptionsEnd(builder)
-
-class RankOptionsT(object):
-
-    # RankOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        rankOptions = RankOptions()
-        rankOptions.Init(buf, pos)
-        return cls.InitFromObj(rankOptions)
-
-    @classmethod
-    def InitFromObj(cls, rankOptions):
-        x = RankOptionsT()
-        x._UnPack(rankOptions)
-        return x
-
-    # RankOptionsT
-    def _UnPack(self, rankOptions):
-        if rankOptions is None:
-            return
-
-    # RankOptionsT
-    def Pack(self, builder):
-        RankOptionsStart(builder)
-        rankOptions = RankOptionsEnd(builder)
-        return rankOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class ReadVariableOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = ReadVariableOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsReadVariableOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def ReadVariableOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # ReadVariableOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def ReadVariableOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return ReadVariableOptionsStart(builder)
-def ReadVariableOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return ReadVariableOptionsEnd(builder)
-
-class ReadVariableOptionsT(object):
-
-    # ReadVariableOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        readVariableOptions = ReadVariableOptions()
-        readVariableOptions.Init(buf, pos)
-        return cls.InitFromObj(readVariableOptions)
-
-    @classmethod
-    def InitFromObj(cls, readVariableOptions):
-        x = ReadVariableOptionsT()
-        x._UnPack(readVariableOptions)
-        return x
-
-    # ReadVariableOptionsT
-    def _UnPack(self, readVariableOptions):
-        if readVariableOptions is None:
-            return
-
-    # ReadVariableOptionsT
-    def Pack(self, builder):
-        ReadVariableOptionsStart(builder)
-        readVariableOptions = ReadVariableOptionsEnd(builder)
-        return readVariableOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class ReducerOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = ReducerOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsReducerOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def ReducerOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # ReducerOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # ReducerOptions
-    def KeepDims(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-def ReducerOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return ReducerOptionsStart(builder)
-def ReducerOptionsAddKeepDims(builder, keepDims): builder.PrependBoolSlot(0, keepDims, 0)
-def AddKeepDims(builder, keepDims):
-    return ReducerOptionsAddKeepDims(builder, keepDims)
-def ReducerOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return ReducerOptionsEnd(builder)
-
-class ReducerOptionsT(object):
-
-    # ReducerOptionsT
-    def __init__(self):
-        self.keepDims = False  # type: bool
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        reducerOptions = ReducerOptions()
-        reducerOptions.Init(buf, pos)
-        return cls.InitFromObj(reducerOptions)
-
-    @classmethod
-    def InitFromObj(cls, reducerOptions):
-        x = ReducerOptionsT()
-        x._UnPack(reducerOptions)
-        return x
-
-    # ReducerOptionsT
-    def _UnPack(self, reducerOptions):
-        if reducerOptions is None:
-            return
-        self.keepDims = reducerOptions.KeepDims()
-
-    # ReducerOptionsT
-    def Pack(self, builder):
-        ReducerOptionsStart(builder)
-        ReducerOptionsAddKeepDims(builder, self.keepDims)
-        reducerOptions = ReducerOptionsEnd(builder)
-        return reducerOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class ReshapeOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = ReshapeOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsReshapeOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def ReshapeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # ReshapeOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # ReshapeOptions
-    def NewShape(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            a = self._tab.Vector(o)
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
-        return 0
-
-    # ReshapeOptions
-    def NewShapeAsNumpy(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
-        return 0
-
-    # ReshapeOptions
-    def NewShapeLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
-
-    # ReshapeOptions
-    def NewShapeIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        return o == 0
-
-def ReshapeOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return ReshapeOptionsStart(builder)
-def ReshapeOptionsAddNewShape(builder, newShape): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(newShape), 0)
-def AddNewShape(builder, newShape):
-    return ReshapeOptionsAddNewShape(builder, newShape)
-def ReshapeOptionsStartNewShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartNewShapeVector(builder, numElems):
-    return ReshapeOptionsStartNewShapeVector(builder, numElems)
-def ReshapeOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return ReshapeOptionsEnd(builder)
-try:
-    from typing import List
-except:
-    pass
-
-class ReshapeOptionsT(object):
-
-    # ReshapeOptionsT
-    def __init__(self):
-        self.newShape = None  # type: List[int]
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        reshapeOptions = ReshapeOptions()
-        reshapeOptions.Init(buf, pos)
-        return cls.InitFromObj(reshapeOptions)
-
-    @classmethod
-    def InitFromObj(cls, reshapeOptions):
-        x = ReshapeOptionsT()
-        x._UnPack(reshapeOptions)
-        return x
-
-    # ReshapeOptionsT
-    def _UnPack(self, reshapeOptions):
-        if reshapeOptions is None:
-            return
-        if not reshapeOptions.NewShapeIsNone():
-            if np is None:
-                self.newShape = []
-                for i in range(reshapeOptions.NewShapeLength()):
-                    self.newShape.append(reshapeOptions.NewShape(i))
-            else:
-                self.newShape = reshapeOptions.NewShapeAsNumpy()
-
-    # ReshapeOptionsT
-    def Pack(self, builder):
-        if self.newShape is not None:
-            if np is not None and type(self.newShape) is np.ndarray:
-                newShape = builder.CreateNumpyVector(self.newShape)
-            else:
-                ReshapeOptionsStartNewShapeVector(builder, len(self.newShape))
-                for i in reversed(range(len(self.newShape))):
-                    builder.PrependInt32(self.newShape[i])
-                newShape = builder.EndVector()
-        ReshapeOptionsStart(builder)
-        if self.newShape is not None:
-            ReshapeOptionsAddNewShape(builder, newShape)
-        reshapeOptions = ReshapeOptionsEnd(builder)
-        return reshapeOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class ResizeBilinearOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = ResizeBilinearOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsResizeBilinearOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def ResizeBilinearOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # ResizeBilinearOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # ResizeBilinearOptions
-    def AlignCorners(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-    # ResizeBilinearOptions
-    def HalfPixelCenters(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-def ResizeBilinearOptionsStart(builder): builder.StartObject(4)
-def Start(builder):
-    return ResizeBilinearOptionsStart(builder)
-def ResizeBilinearOptionsAddAlignCorners(builder, alignCorners): builder.PrependBoolSlot(2, alignCorners, 0)
-def AddAlignCorners(builder, alignCorners):
-    return ResizeBilinearOptionsAddAlignCorners(builder, alignCorners)
-def ResizeBilinearOptionsAddHalfPixelCenters(builder, halfPixelCenters): builder.PrependBoolSlot(3, halfPixelCenters, 0)
-def AddHalfPixelCenters(builder, halfPixelCenters):
-    return ResizeBilinearOptionsAddHalfPixelCenters(builder, halfPixelCenters)
-def ResizeBilinearOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return ResizeBilinearOptionsEnd(builder)
-
-class ResizeBilinearOptionsT(object):
-
-    # ResizeBilinearOptionsT
-    def __init__(self):
-        self.alignCorners = False  # type: bool
-        self.halfPixelCenters = False  # type: bool
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        resizeBilinearOptions = ResizeBilinearOptions()
-        resizeBilinearOptions.Init(buf, pos)
-        return cls.InitFromObj(resizeBilinearOptions)
-
-    @classmethod
-    def InitFromObj(cls, resizeBilinearOptions):
-        x = ResizeBilinearOptionsT()
-        x._UnPack(resizeBilinearOptions)
-        return x
-
-    # ResizeBilinearOptionsT
-    def _UnPack(self, resizeBilinearOptions):
-        if resizeBilinearOptions is None:
-            return
-        self.alignCorners = resizeBilinearOptions.AlignCorners()
-        self.halfPixelCenters = resizeBilinearOptions.HalfPixelCenters()
-
-    # ResizeBilinearOptionsT
-    def Pack(self, builder):
-        ResizeBilinearOptionsStart(builder)
-        ResizeBilinearOptionsAddAlignCorners(builder, self.alignCorners)
-        ResizeBilinearOptionsAddHalfPixelCenters(builder, self.halfPixelCenters)
-        resizeBilinearOptions = ResizeBilinearOptionsEnd(builder)
-        return resizeBilinearOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class ResizeNearestNeighborOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = ResizeNearestNeighborOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsResizeNearestNeighborOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def ResizeNearestNeighborOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # ResizeNearestNeighborOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # ResizeNearestNeighborOptions
-    def AlignCorners(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-    # ResizeNearestNeighborOptions
-    def HalfPixelCenters(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-def ResizeNearestNeighborOptionsStart(builder): builder.StartObject(2)
-def Start(builder):
-    return ResizeNearestNeighborOptionsStart(builder)
-def ResizeNearestNeighborOptionsAddAlignCorners(builder, alignCorners): builder.PrependBoolSlot(0, alignCorners, 0)
-def AddAlignCorners(builder, alignCorners):
-    return ResizeNearestNeighborOptionsAddAlignCorners(builder, alignCorners)
-def ResizeNearestNeighborOptionsAddHalfPixelCenters(builder, halfPixelCenters): builder.PrependBoolSlot(1, halfPixelCenters, 0)
-def AddHalfPixelCenters(builder, halfPixelCenters):
-    return ResizeNearestNeighborOptionsAddHalfPixelCenters(builder, halfPixelCenters)
-def ResizeNearestNeighborOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return ResizeNearestNeighborOptionsEnd(builder)
-
-class ResizeNearestNeighborOptionsT(object):
-
-    # ResizeNearestNeighborOptionsT
-    def __init__(self):
-        self.alignCorners = False  # type: bool
-        self.halfPixelCenters = False  # type: bool
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        resizeNearestNeighborOptions = ResizeNearestNeighborOptions()
-        resizeNearestNeighborOptions.Init(buf, pos)
-        return cls.InitFromObj(resizeNearestNeighborOptions)
-
-    @classmethod
-    def InitFromObj(cls, resizeNearestNeighborOptions):
-        x = ResizeNearestNeighborOptionsT()
-        x._UnPack(resizeNearestNeighborOptions)
-        return x
-
-    # ResizeNearestNeighborOptionsT
-    def _UnPack(self, resizeNearestNeighborOptions):
-        if resizeNearestNeighborOptions is None:
-            return
-        self.alignCorners = resizeNearestNeighborOptions.AlignCorners()
-        self.halfPixelCenters = resizeNearestNeighborOptions.HalfPixelCenters()
-
-    # ResizeNearestNeighborOptionsT
-    def Pack(self, builder):
-        ResizeNearestNeighborOptionsStart(builder)
-        ResizeNearestNeighborOptionsAddAlignCorners(builder, self.alignCorners)
-        ResizeNearestNeighborOptionsAddHalfPixelCenters(builder, self.halfPixelCenters)
-        resizeNearestNeighborOptions = ResizeNearestNeighborOptionsEnd(builder)
-        return resizeNearestNeighborOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class ReverseSequenceOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = ReverseSequenceOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsReverseSequenceOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def ReverseSequenceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # ReverseSequenceOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # ReverseSequenceOptions
-    def SeqDim(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # ReverseSequenceOptions
-    def BatchDim(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-def ReverseSequenceOptionsStart(builder): builder.StartObject(2)
-def Start(builder):
-    return ReverseSequenceOptionsStart(builder)
-def ReverseSequenceOptionsAddSeqDim(builder, seqDim): builder.PrependInt32Slot(0, seqDim, 0)
-def AddSeqDim(builder, seqDim):
-    return ReverseSequenceOptionsAddSeqDim(builder, seqDim)
-def ReverseSequenceOptionsAddBatchDim(builder, batchDim): builder.PrependInt32Slot(1, batchDim, 0)
-def AddBatchDim(builder, batchDim):
-    return ReverseSequenceOptionsAddBatchDim(builder, batchDim)
-def ReverseSequenceOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return ReverseSequenceOptionsEnd(builder)
-
-class ReverseSequenceOptionsT(object):
-
-    # ReverseSequenceOptionsT
-    def __init__(self):
-        self.seqDim = 0  # type: int
-        self.batchDim = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        reverseSequenceOptions = ReverseSequenceOptions()
-        reverseSequenceOptions.Init(buf, pos)
-        return cls.InitFromObj(reverseSequenceOptions)
-
-    @classmethod
-    def InitFromObj(cls, reverseSequenceOptions):
-        x = ReverseSequenceOptionsT()
-        x._UnPack(reverseSequenceOptions)
-        return x
-
-    # ReverseSequenceOptionsT
-    def _UnPack(self, reverseSequenceOptions):
-        if reverseSequenceOptions is None:
-            return
-        self.seqDim = reverseSequenceOptions.SeqDim()
-        self.batchDim = reverseSequenceOptions.BatchDim()
-
-    # ReverseSequenceOptionsT
-    def Pack(self, builder):
-        ReverseSequenceOptionsStart(builder)
-        ReverseSequenceOptionsAddSeqDim(builder, self.seqDim)
-        ReverseSequenceOptionsAddBatchDim(builder, self.batchDim)
-        reverseSequenceOptions = ReverseSequenceOptionsEnd(builder)
-        return reverseSequenceOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class ReverseV2Options(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = ReverseV2Options()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsReverseV2Options(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def ReverseV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # ReverseV2Options
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def ReverseV2OptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return ReverseV2OptionsStart(builder)
-def ReverseV2OptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return ReverseV2OptionsEnd(builder)
-
-class ReverseV2OptionsT(object):
-
-    # ReverseV2OptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        reverseV2options = ReverseV2Options()
-        reverseV2options.Init(buf, pos)
-        return cls.InitFromObj(reverseV2options)
-
-    @classmethod
-    def InitFromObj(cls, reverseV2options):
-        x = ReverseV2OptionsT()
-        x._UnPack(reverseV2options)
-        return x
-
-    # ReverseV2OptionsT
-    def _UnPack(self, reverseV2options):
-        if reverseV2options is None:
-            return
-
-    # ReverseV2OptionsT
-    def Pack(self, builder):
-        ReverseV2OptionsStart(builder)
-        reverseV2options = ReverseV2OptionsEnd(builder)
-        return reverseV2options
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class Rfft2dOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = Rfft2dOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsRfft2dOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def Rfft2dOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # Rfft2dOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def Rfft2dOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return Rfft2dOptionsStart(builder)
-def Rfft2dOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return Rfft2dOptionsEnd(builder)
-
-class Rfft2dOptionsT(object):
-
-    # Rfft2dOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        rfft2dOptions = Rfft2dOptions()
-        rfft2dOptions.Init(buf, pos)
-        return cls.InitFromObj(rfft2dOptions)
-
-    @classmethod
-    def InitFromObj(cls, rfft2dOptions):
-        x = Rfft2dOptionsT()
-        x._UnPack(rfft2dOptions)
-        return x
-
-    # Rfft2dOptionsT
-    def _UnPack(self, rfft2dOptions):
-        if rfft2dOptions is None:
-            return
-
-    # Rfft2dOptionsT
-    def Pack(self, builder):
-        Rfft2dOptionsStart(builder)
-        rfft2dOptions = Rfft2dOptionsEnd(builder)
-        return rfft2dOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class RightShiftOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = RightShiftOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsRightShiftOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def RightShiftOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # RightShiftOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def RightShiftOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return RightShiftOptionsStart(builder)
-def RightShiftOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return RightShiftOptionsEnd(builder)
-
-class RightShiftOptionsT(object):
-
-    # RightShiftOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        rightShiftOptions = RightShiftOptions()
-        rightShiftOptions.Init(buf, pos)
-        return cls.InitFromObj(rightShiftOptions)
-
-    @classmethod
-    def InitFromObj(cls, rightShiftOptions):
-        x = RightShiftOptionsT()
-        x._UnPack(rightShiftOptions)
-        return x
-
-    # RightShiftOptionsT
-    def _UnPack(self, rightShiftOptions):
-        if rightShiftOptions is None:
-            return
-
-    # RightShiftOptionsT
-    def Pack(self, builder):
-        RightShiftOptionsStart(builder)
-        rightShiftOptions = RightShiftOptionsEnd(builder)
-        return rightShiftOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class SVDFOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = SVDFOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsSVDFOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def SVDFOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # SVDFOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # SVDFOptions
-    def Rank(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # SVDFOptions
-    def FusedActivationFunction(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-    # SVDFOptions
-    def AsymmetricQuantizeInputs(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-def SVDFOptionsStart(builder): builder.StartObject(3)
-def Start(builder):
-    return SVDFOptionsStart(builder)
-def SVDFOptionsAddRank(builder, rank): builder.PrependInt32Slot(0, rank, 0)
-def AddRank(builder, rank):
-    return SVDFOptionsAddRank(builder, rank)
-def SVDFOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(1, fusedActivationFunction, 0)
-def AddFusedActivationFunction(builder, fusedActivationFunction):
-    return SVDFOptionsAddFusedActivationFunction(builder, fusedActivationFunction)
-def SVDFOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(2, asymmetricQuantizeInputs, 0)
-def AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs):
-    return SVDFOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs)
-def SVDFOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return SVDFOptionsEnd(builder)
-
-class SVDFOptionsT(object):
-
-    # SVDFOptionsT
-    def __init__(self):
-        self.rank = 0  # type: int
-        self.fusedActivationFunction = 0  # type: int
-        self.asymmetricQuantizeInputs = False  # type: bool
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        svdfoptions = SVDFOptions()
-        svdfoptions.Init(buf, pos)
-        return cls.InitFromObj(svdfoptions)
-
-    @classmethod
-    def InitFromObj(cls, svdfoptions):
-        x = SVDFOptionsT()
-        x._UnPack(svdfoptions)
-        return x
-
-    # SVDFOptionsT
-    def _UnPack(self, svdfoptions):
-        if svdfoptions is None:
-            return
-        self.rank = svdfoptions.Rank()
-        self.fusedActivationFunction = svdfoptions.FusedActivationFunction()
-        self.asymmetricQuantizeInputs = svdfoptions.AsymmetricQuantizeInputs()
-
-    # SVDFOptionsT
-    def Pack(self, builder):
-        SVDFOptionsStart(builder)
-        SVDFOptionsAddRank(builder, self.rank)
-        SVDFOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
-        SVDFOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs)
-        svdfoptions = SVDFOptionsEnd(builder)
-        return svdfoptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class ScatterNdOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = ScatterNdOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsScatterNdOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def ScatterNdOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # ScatterNdOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def ScatterNdOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return ScatterNdOptionsStart(builder)
-def ScatterNdOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return ScatterNdOptionsEnd(builder)
-
-class ScatterNdOptionsT(object):
-
-    # ScatterNdOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        scatterNdOptions = ScatterNdOptions()
-        scatterNdOptions.Init(buf, pos)
-        return cls.InitFromObj(scatterNdOptions)
-
-    @classmethod
-    def InitFromObj(cls, scatterNdOptions):
-        x = ScatterNdOptionsT()
-        x._UnPack(scatterNdOptions)
-        return x
-
-    # ScatterNdOptionsT
-    def _UnPack(self, scatterNdOptions):
-        if scatterNdOptions is None:
-            return
-
-    # ScatterNdOptionsT
-    def Pack(self, builder):
-        ScatterNdOptionsStart(builder)
-        scatterNdOptions = ScatterNdOptionsEnd(builder)
-        return scatterNdOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class SegmentSumOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = SegmentSumOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsSegmentSumOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def SegmentSumOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # SegmentSumOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def SegmentSumOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return SegmentSumOptionsStart(builder)
-def SegmentSumOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return SegmentSumOptionsEnd(builder)
-
-class SegmentSumOptionsT(object):
-
-    # SegmentSumOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        segmentSumOptions = SegmentSumOptions()
-        segmentSumOptions.Init(buf, pos)
-        return cls.InitFromObj(segmentSumOptions)
-
-    @classmethod
-    def InitFromObj(cls, segmentSumOptions):
-        x = SegmentSumOptionsT()
-        x._UnPack(segmentSumOptions)
-        return x
-
-    # SegmentSumOptionsT
-    def _UnPack(self, segmentSumOptions):
-        if segmentSumOptions is None:
-            return
-
-    # SegmentSumOptionsT
-    def Pack(self, builder):
-        SegmentSumOptionsStart(builder)
-        segmentSumOptions = SegmentSumOptionsEnd(builder)
-        return segmentSumOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class SelectOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = SelectOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsSelectOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def SelectOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # SelectOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def SelectOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return SelectOptionsStart(builder)
-def SelectOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return SelectOptionsEnd(builder)
-
-class SelectOptionsT(object):
-
-    # SelectOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        selectOptions = SelectOptions()
-        selectOptions.Init(buf, pos)
-        return cls.InitFromObj(selectOptions)
-
-    @classmethod
-    def InitFromObj(cls, selectOptions):
-        x = SelectOptionsT()
-        x._UnPack(selectOptions)
-        return x
-
-    # SelectOptionsT
-    def _UnPack(self, selectOptions):
-        if selectOptions is None:
-            return
-
-    # SelectOptionsT
-    def Pack(self, builder):
-        SelectOptionsStart(builder)
-        selectOptions = SelectOptionsEnd(builder)
-        return selectOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class SelectV2Options(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = SelectV2Options()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsSelectV2Options(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def SelectV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # SelectV2Options
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def SelectV2OptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return SelectV2OptionsStart(builder)
-def SelectV2OptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return SelectV2OptionsEnd(builder)
-
-class SelectV2OptionsT(object):
-
-    # SelectV2OptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        selectV2options = SelectV2Options()
-        selectV2options.Init(buf, pos)
-        return cls.InitFromObj(selectV2options)
-
-    @classmethod
-    def InitFromObj(cls, selectV2options):
-        x = SelectV2OptionsT()
-        x._UnPack(selectV2options)
-        return x
-
-    # SelectV2OptionsT
-    def _UnPack(self, selectV2options):
-        if selectV2options is None:
-            return
-
-    # SelectV2OptionsT
-    def Pack(self, builder):
-        SelectV2OptionsStart(builder)
-        selectV2options = SelectV2OptionsEnd(builder)
-        return selectV2options
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class SequenceRNNOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = SequenceRNNOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsSequenceRNNOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def SequenceRNNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # SequenceRNNOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # SequenceRNNOptions
-    def TimeMajor(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-    # SequenceRNNOptions
-    def FusedActivationFunction(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-    # SequenceRNNOptions
-    def AsymmetricQuantizeInputs(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-def SequenceRNNOptionsStart(builder): builder.StartObject(3)
-def Start(builder):
-    return SequenceRNNOptionsStart(builder)
-def SequenceRNNOptionsAddTimeMajor(builder, timeMajor): builder.PrependBoolSlot(0, timeMajor, 0)
-def AddTimeMajor(builder, timeMajor):
-    return SequenceRNNOptionsAddTimeMajor(builder, timeMajor)
-def SequenceRNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(1, fusedActivationFunction, 0)
-def AddFusedActivationFunction(builder, fusedActivationFunction):
-    return SequenceRNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction)
-def SequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(2, asymmetricQuantizeInputs, 0)
-def AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs):
-    return SequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs)
-def SequenceRNNOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return SequenceRNNOptionsEnd(builder)
-
-class SequenceRNNOptionsT(object):
-
-    # SequenceRNNOptionsT
-    def __init__(self):
-        self.timeMajor = False  # type: bool
-        self.fusedActivationFunction = 0  # type: int
-        self.asymmetricQuantizeInputs = False  # type: bool
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        sequenceRnnoptions = SequenceRNNOptions()
-        sequenceRnnoptions.Init(buf, pos)
-        return cls.InitFromObj(sequenceRnnoptions)
-
-    @classmethod
-    def InitFromObj(cls, sequenceRnnoptions):
-        x = SequenceRNNOptionsT()
-        x._UnPack(sequenceRnnoptions)
-        return x
-
-    # SequenceRNNOptionsT
-    def _UnPack(self, sequenceRnnoptions):
-        if sequenceRnnoptions is None:
-            return
-        self.timeMajor = sequenceRnnoptions.TimeMajor()
-        self.fusedActivationFunction = sequenceRnnoptions.FusedActivationFunction()
-        self.asymmetricQuantizeInputs = sequenceRnnoptions.AsymmetricQuantizeInputs()
-
-    # SequenceRNNOptionsT
-    def Pack(self, builder):
-        SequenceRNNOptionsStart(builder)
-        SequenceRNNOptionsAddTimeMajor(builder, self.timeMajor)
-        SequenceRNNOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
-        SequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs)
-        sequenceRnnoptions = SequenceRNNOptionsEnd(builder)
-        return sequenceRnnoptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class ShapeOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = ShapeOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsShapeOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def ShapeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # ShapeOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # ShapeOptions
-    def OutType(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-def ShapeOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return ShapeOptionsStart(builder)
-def ShapeOptionsAddOutType(builder, outType): builder.PrependInt8Slot(0, outType, 0)
-def AddOutType(builder, outType):
-    return ShapeOptionsAddOutType(builder, outType)
-def ShapeOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return ShapeOptionsEnd(builder)
-
-class ShapeOptionsT(object):
-
-    # ShapeOptionsT
-    def __init__(self):
-        self.outType = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        shapeOptions = ShapeOptions()
-        shapeOptions.Init(buf, pos)
-        return cls.InitFromObj(shapeOptions)
-
-    @classmethod
-    def InitFromObj(cls, shapeOptions):
-        x = ShapeOptionsT()
-        x._UnPack(shapeOptions)
-        return x
-
-    # ShapeOptionsT
-    def _UnPack(self, shapeOptions):
-        if shapeOptions is None:
-            return
-        self.outType = shapeOptions.OutType()
-
-    # ShapeOptionsT
-    def Pack(self, builder):
-        ShapeOptionsStart(builder)
-        ShapeOptionsAddOutType(builder, self.outType)
-        shapeOptions = ShapeOptionsEnd(builder)
-        return shapeOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class SignOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = SignOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsSignOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def SignOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # SignOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def SignOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return SignOptionsStart(builder)
-def SignOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return SignOptionsEnd(builder)
-
-class SignOptionsT(object):
-
-    # SignOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        signOptions = SignOptions()
-        signOptions.Init(buf, pos)
-        return cls.InitFromObj(signOptions)
-
-    @classmethod
-    def InitFromObj(cls, signOptions):
-        x = SignOptionsT()
-        x._UnPack(signOptions)
-        return x
-
-    # SignOptionsT
-    def _UnPack(self, signOptions):
-        if signOptions is None:
-            return
-
-    # SignOptionsT
-    def Pack(self, builder):
-        SignOptionsStart(builder)
-        signOptions = SignOptionsEnd(builder)
-        return signOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class SignatureDef(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = SignatureDef()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsSignatureDef(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def SignatureDefBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # SignatureDef
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # SignatureDef
-    def Inputs(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            x = self._tab.Vector(o)
-            x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
-            x = self._tab.Indirect(x)
-            obj = TensorMap()
-            obj.Init(self._tab.Bytes, x)
-            return obj
-        return None
-
-    # SignatureDef
-    def InputsLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
-
-    # SignatureDef
-    def InputsIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        return o == 0
-
-    # SignatureDef
-    def Outputs(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            x = self._tab.Vector(o)
-            x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
-            x = self._tab.Indirect(x)
-            obj = TensorMap()
-            obj.Init(self._tab.Bytes, x)
-            return obj
-        return None
-
-    # SignatureDef
-    def OutputsLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
-
-    # SignatureDef
-    def OutputsIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        return o == 0
-
-    # SignatureDef
-    def SignatureKey(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            return self._tab.String(o + self._tab.Pos)
-        return None
-
-    # SignatureDef
-    def SubgraphIndex(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
-        return 0
-
-def SignatureDefStart(builder): builder.StartObject(5)
-def Start(builder):
-    return SignatureDefStart(builder)
-def SignatureDefAddInputs(builder, inputs): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0)
-def AddInputs(builder, inputs):
-    return SignatureDefAddInputs(builder, inputs)
-def SignatureDefStartInputsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartInputsVector(builder, numElems):
-    return SignatureDefStartInputsVector(builder, numElems)
-def SignatureDefAddOutputs(builder, outputs): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0)
-def AddOutputs(builder, outputs):
-    return SignatureDefAddOutputs(builder, outputs)
-def SignatureDefStartOutputsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartOutputsVector(builder, numElems):
-    return SignatureDefStartOutputsVector(builder, numElems)
-def SignatureDefAddSignatureKey(builder, signatureKey): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(signatureKey), 0)
-def AddSignatureKey(builder, signatureKey):
-    return SignatureDefAddSignatureKey(builder, signatureKey)
-def SignatureDefAddSubgraphIndex(builder, subgraphIndex): builder.PrependUint32Slot(4, subgraphIndex, 0)
-def AddSubgraphIndex(builder, subgraphIndex):
-    return SignatureDefAddSubgraphIndex(builder, subgraphIndex)
-def SignatureDefEnd(builder): return builder.EndObject()
-def End(builder):
-    return SignatureDefEnd(builder)
-try:
-    from typing import List
-except:
-    pass
-
-class SignatureDefT(object):
-
-    # SignatureDefT
-    def __init__(self):
-        self.inputs = None  # type: List[TensorMapT]
-        self.outputs = None  # type: List[TensorMapT]
-        self.signatureKey = None  # type: str
-        self.subgraphIndex = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        signatureDef = SignatureDef()
-        signatureDef.Init(buf, pos)
-        return cls.InitFromObj(signatureDef)
-
-    @classmethod
-    def InitFromObj(cls, signatureDef):
-        x = SignatureDefT()
-        x._UnPack(signatureDef)
-        return x
-
-    # SignatureDefT
-    def _UnPack(self, signatureDef):
-        if signatureDef is None:
-            return
-        if not signatureDef.InputsIsNone():
-            self.inputs = []
-            for i in range(signatureDef.InputsLength()):
-                if signatureDef.Inputs(i) is None:
-                    self.inputs.append(None)
-                else:
-                    tensorMap_ = TensorMapT.InitFromObj(signatureDef.Inputs(i))
-                    self.inputs.append(tensorMap_)
-        if not signatureDef.OutputsIsNone():
-            self.outputs = []
-            for i in range(signatureDef.OutputsLength()):
-                if signatureDef.Outputs(i) is None:
-                    self.outputs.append(None)
-                else:
-                    tensorMap_ = TensorMapT.InitFromObj(signatureDef.Outputs(i))
-                    self.outputs.append(tensorMap_)
-        self.signatureKey = signatureDef.SignatureKey()
-        self.subgraphIndex = signatureDef.SubgraphIndex()
-
-    # SignatureDefT
-    def Pack(self, builder):
-        if self.inputs is not None:
-            inputslist = []
-            for i in range(len(self.inputs)):
-                inputslist.append(self.inputs[i].Pack(builder))
-            SignatureDefStartInputsVector(builder, len(self.inputs))
-            for i in reversed(range(len(self.inputs))):
-                builder.PrependUOffsetTRelative(inputslist[i])
-            inputs = builder.EndVector()
-        if self.outputs is not None:
-            outputslist = []
-            for i in range(len(self.outputs)):
-                outputslist.append(self.outputs[i].Pack(builder))
-            SignatureDefStartOutputsVector(builder, len(self.outputs))
-            for i in reversed(range(len(self.outputs))):
-                builder.PrependUOffsetTRelative(outputslist[i])
-            outputs = builder.EndVector()
-        if self.signatureKey is not None:
-            signatureKey = builder.CreateString(self.signatureKey)
-        SignatureDefStart(builder)
-        if self.inputs is not None:
-            SignatureDefAddInputs(builder, inputs)
-        if self.outputs is not None:
-            SignatureDefAddOutputs(builder, outputs)
-        if self.signatureKey is not None:
-            SignatureDefAddSignatureKey(builder, signatureKey)
-        SignatureDefAddSubgraphIndex(builder, self.subgraphIndex)
-        signatureDef = SignatureDefEnd(builder)
-        return signatureDef
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class SkipGramOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = SkipGramOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsSkipGramOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def SkipGramOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # SkipGramOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # SkipGramOptions
-    def NgramSize(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # SkipGramOptions
-    def MaxSkipSize(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # SkipGramOptions
-    def IncludeAllNgrams(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-def SkipGramOptionsStart(builder): builder.StartObject(3)
-def Start(builder):
-    return SkipGramOptionsStart(builder)
-def SkipGramOptionsAddNgramSize(builder, ngramSize): builder.PrependInt32Slot(0, ngramSize, 0)
-def AddNgramSize(builder, ngramSize):
-    return SkipGramOptionsAddNgramSize(builder, ngramSize)
-def SkipGramOptionsAddMaxSkipSize(builder, maxSkipSize): builder.PrependInt32Slot(1, maxSkipSize, 0)
-def AddMaxSkipSize(builder, maxSkipSize):
-    return SkipGramOptionsAddMaxSkipSize(builder, maxSkipSize)
-def SkipGramOptionsAddIncludeAllNgrams(builder, includeAllNgrams): builder.PrependBoolSlot(2, includeAllNgrams, 0)
-def AddIncludeAllNgrams(builder, includeAllNgrams):
-    return SkipGramOptionsAddIncludeAllNgrams(builder, includeAllNgrams)
-def SkipGramOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return SkipGramOptionsEnd(builder)
-
-class SkipGramOptionsT(object):
-
-    # SkipGramOptionsT
-    def __init__(self):
-        self.ngramSize = 0  # type: int
-        self.maxSkipSize = 0  # type: int
-        self.includeAllNgrams = False  # type: bool
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        skipGramOptions = SkipGramOptions()
-        skipGramOptions.Init(buf, pos)
-        return cls.InitFromObj(skipGramOptions)
-
-    @classmethod
-    def InitFromObj(cls, skipGramOptions):
-        x = SkipGramOptionsT()
-        x._UnPack(skipGramOptions)
-        return x
-
-    # SkipGramOptionsT
-    def _UnPack(self, skipGramOptions):
-        if skipGramOptions is None:
-            return
-        self.ngramSize = skipGramOptions.NgramSize()
-        self.maxSkipSize = skipGramOptions.MaxSkipSize()
-        self.includeAllNgrams = skipGramOptions.IncludeAllNgrams()
-
-    # SkipGramOptionsT
-    def Pack(self, builder):
-        SkipGramOptionsStart(builder)
-        SkipGramOptionsAddNgramSize(builder, self.ngramSize)
-        SkipGramOptionsAddMaxSkipSize(builder, self.maxSkipSize)
-        SkipGramOptionsAddIncludeAllNgrams(builder, self.includeAllNgrams)
-        skipGramOptions = SkipGramOptionsEnd(builder)
-        return skipGramOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class SliceOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = SliceOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsSliceOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def SliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # SliceOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def SliceOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return SliceOptionsStart(builder)
-def SliceOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return SliceOptionsEnd(builder)
-
-class SliceOptionsT(object):
-
-    # SliceOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        sliceOptions = SliceOptions()
-        sliceOptions.Init(buf, pos)
-        return cls.InitFromObj(sliceOptions)
-
-    @classmethod
-    def InitFromObj(cls, sliceOptions):
-        x = SliceOptionsT()
-        x._UnPack(sliceOptions)
-        return x
-
-    # SliceOptionsT
-    def _UnPack(self, sliceOptions):
-        if sliceOptions is None:
-            return
-
-    # SliceOptionsT
-    def Pack(self, builder):
-        SliceOptionsStart(builder)
-        sliceOptions = SliceOptionsEnd(builder)
-        return sliceOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class SoftmaxOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = SoftmaxOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsSoftmaxOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def SoftmaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # SoftmaxOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # SoftmaxOptions
-    def Beta(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
-        return 0.0
-
-def SoftmaxOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return SoftmaxOptionsStart(builder)
-def SoftmaxOptionsAddBeta(builder, beta): builder.PrependFloat32Slot(0, beta, 0.0)
-def AddBeta(builder, beta):
-    return SoftmaxOptionsAddBeta(builder, beta)
-def SoftmaxOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return SoftmaxOptionsEnd(builder)
-
-class SoftmaxOptionsT(object):
-
-    # SoftmaxOptionsT
-    def __init__(self):
-        self.beta = 0.0  # type: float
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        softmaxOptions = SoftmaxOptions()
-        softmaxOptions.Init(buf, pos)
-        return cls.InitFromObj(softmaxOptions)
-
-    @classmethod
-    def InitFromObj(cls, softmaxOptions):
-        x = SoftmaxOptionsT()
-        x._UnPack(softmaxOptions)
-        return x
-
-    # SoftmaxOptionsT
-    def _UnPack(self, softmaxOptions):
-        if softmaxOptions is None:
-            return
-        self.beta = softmaxOptions.Beta()
-
-    # SoftmaxOptionsT
-    def Pack(self, builder):
-        SoftmaxOptionsStart(builder)
-        SoftmaxOptionsAddBeta(builder, self.beta)
-        softmaxOptions = SoftmaxOptionsEnd(builder)
-        return softmaxOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class SpaceToBatchNDOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = SpaceToBatchNDOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsSpaceToBatchNDOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def SpaceToBatchNDOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # SpaceToBatchNDOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def SpaceToBatchNDOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return SpaceToBatchNDOptionsStart(builder)
-def SpaceToBatchNDOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return SpaceToBatchNDOptionsEnd(builder)
-
-class SpaceToBatchNDOptionsT(object):
-
-    # SpaceToBatchNDOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        spaceToBatchNdoptions = SpaceToBatchNDOptions()
-        spaceToBatchNdoptions.Init(buf, pos)
-        return cls.InitFromObj(spaceToBatchNdoptions)
-
-    @classmethod
-    def InitFromObj(cls, spaceToBatchNdoptions):
-        x = SpaceToBatchNDOptionsT()
-        x._UnPack(spaceToBatchNdoptions)
-        return x
-
-    # SpaceToBatchNDOptionsT
-    def _UnPack(self, spaceToBatchNdoptions):
-        if spaceToBatchNdoptions is None:
-            return
-
-    # SpaceToBatchNDOptionsT
-    def Pack(self, builder):
-        SpaceToBatchNDOptionsStart(builder)
-        spaceToBatchNdoptions = SpaceToBatchNDOptionsEnd(builder)
-        return spaceToBatchNdoptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class SpaceToDepthOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = SpaceToDepthOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsSpaceToDepthOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def SpaceToDepthOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # SpaceToDepthOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # SpaceToDepthOptions
-    def BlockSize(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-def SpaceToDepthOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return SpaceToDepthOptionsStart(builder)
-def SpaceToDepthOptionsAddBlockSize(builder, blockSize): builder.PrependInt32Slot(0, blockSize, 0)
-def AddBlockSize(builder, blockSize):
-    return SpaceToDepthOptionsAddBlockSize(builder, blockSize)
-def SpaceToDepthOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return SpaceToDepthOptionsEnd(builder)
-
-class SpaceToDepthOptionsT(object):
-
-    # SpaceToDepthOptionsT
-    def __init__(self):
-        self.blockSize = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        spaceToDepthOptions = SpaceToDepthOptions()
-        spaceToDepthOptions.Init(buf, pos)
-        return cls.InitFromObj(spaceToDepthOptions)
-
-    @classmethod
-    def InitFromObj(cls, spaceToDepthOptions):
-        x = SpaceToDepthOptionsT()
-        x._UnPack(spaceToDepthOptions)
-        return x
-
-    # SpaceToDepthOptionsT
-    def _UnPack(self, spaceToDepthOptions):
-        if spaceToDepthOptions is None:
-            return
-        self.blockSize = spaceToDepthOptions.BlockSize()
-
-    # SpaceToDepthOptionsT
-    def Pack(self, builder):
-        SpaceToDepthOptionsStart(builder)
-        SpaceToDepthOptionsAddBlockSize(builder, self.blockSize)
-        spaceToDepthOptions = SpaceToDepthOptionsEnd(builder)
-        return spaceToDepthOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-class SparseIndexVector(object):
-    NONE = 0
-    Int32Vector = 1
-    Uint16Vector = 2
-    Uint8Vector = 3
-
-def SparseIndexVectorCreator(unionType, table):
-    from flatbuffers.table import Table
-    if not isinstance(table, Table):
-        return None
-    if unionType == SparseIndexVector().Int32Vector:
-        return Int32VectorT.InitFromBuf(table.Bytes, table.Pos)
-    if unionType == SparseIndexVector().Uint16Vector:
-        return Uint16VectorT.InitFromBuf(table.Bytes, table.Pos)
-    if unionType == SparseIndexVector().Uint8Vector:
-        return Uint8VectorT.InitFromBuf(table.Bytes, table.Pos)
-    return None
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class SparseToDenseOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = SparseToDenseOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsSparseToDenseOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def SparseToDenseOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # SparseToDenseOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # SparseToDenseOptions
-    def ValidateIndices(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-def SparseToDenseOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return SparseToDenseOptionsStart(builder)
-def SparseToDenseOptionsAddValidateIndices(builder, validateIndices): builder.PrependBoolSlot(0, validateIndices, 0)
-def AddValidateIndices(builder, validateIndices):
-    return SparseToDenseOptionsAddValidateIndices(builder, validateIndices)
-def SparseToDenseOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return SparseToDenseOptionsEnd(builder)
-
-class SparseToDenseOptionsT(object):
-
-    # SparseToDenseOptionsT
-    def __init__(self):
-        self.validateIndices = False  # type: bool
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        sparseToDenseOptions = SparseToDenseOptions()
-        sparseToDenseOptions.Init(buf, pos)
-        return cls.InitFromObj(sparseToDenseOptions)
-
-    @classmethod
-    def InitFromObj(cls, sparseToDenseOptions):
-        x = SparseToDenseOptionsT()
-        x._UnPack(sparseToDenseOptions)
-        return x
-
-    # SparseToDenseOptionsT
-    def _UnPack(self, sparseToDenseOptions):
-        if sparseToDenseOptions is None:
-            return
-        self.validateIndices = sparseToDenseOptions.ValidateIndices()
-
-    # SparseToDenseOptionsT
-    def Pack(self, builder):
-        SparseToDenseOptionsStart(builder)
-        SparseToDenseOptionsAddValidateIndices(builder, self.validateIndices)
-        sparseToDenseOptions = SparseToDenseOptionsEnd(builder)
-        return sparseToDenseOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class SparsityParameters(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = SparsityParameters()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsSparsityParameters(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def SparsityParametersBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # SparsityParameters
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # SparsityParameters
-    def TraversalOrder(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            a = self._tab.Vector(o)
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
-        return 0
-
-    # SparsityParameters
-    def TraversalOrderAsNumpy(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
-        return 0
-
-    # SparsityParameters
-    def TraversalOrderLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
-
-    # SparsityParameters
-    def TraversalOrderIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        return o == 0
-
-    # SparsityParameters
-    def BlockMap(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            a = self._tab.Vector(o)
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
-        return 0
-
-    # SparsityParameters
-    def BlockMapAsNumpy(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
-        return 0
-
-    # SparsityParameters
-    def BlockMapLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
-
-    # SparsityParameters
-    def BlockMapIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        return o == 0
-
-    # SparsityParameters
-    def DimMetadata(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            x = self._tab.Vector(o)
-            x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
-            x = self._tab.Indirect(x)
-            obj = DimensionMetadata()
-            obj.Init(self._tab.Bytes, x)
-            return obj
-        return None
-
-    # SparsityParameters
-    def DimMetadataLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
-
-    # SparsityParameters
-    def DimMetadataIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        return o == 0
-
-def SparsityParametersStart(builder): builder.StartObject(3)
-def Start(builder):
-    return SparsityParametersStart(builder)
-def SparsityParametersAddTraversalOrder(builder, traversalOrder): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(traversalOrder), 0)
-def AddTraversalOrder(builder, traversalOrder):
-    return SparsityParametersAddTraversalOrder(builder, traversalOrder)
-def SparsityParametersStartTraversalOrderVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartTraversalOrderVector(builder, numElems):
-    return SparsityParametersStartTraversalOrderVector(builder, numElems)
-def SparsityParametersAddBlockMap(builder, blockMap): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(blockMap), 0)
-def AddBlockMap(builder, blockMap):
-    return SparsityParametersAddBlockMap(builder, blockMap)
-def SparsityParametersStartBlockMapVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartBlockMapVector(builder, numElems):
-    return SparsityParametersStartBlockMapVector(builder, numElems)
-def SparsityParametersAddDimMetadata(builder, dimMetadata): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(dimMetadata), 0)
-def AddDimMetadata(builder, dimMetadata):
-    return SparsityParametersAddDimMetadata(builder, dimMetadata)
-def SparsityParametersStartDimMetadataVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartDimMetadataVector(builder, numElems):
-    return SparsityParametersStartDimMetadataVector(builder, numElems)
-def SparsityParametersEnd(builder): return builder.EndObject()
-def End(builder):
-    return SparsityParametersEnd(builder)
-try:
-    from typing import List
-except:
-    pass
-
-class SparsityParametersT(object):
-
-    # SparsityParametersT
-    def __init__(self):
-        self.traversalOrder = None  # type: List[int]
-        self.blockMap = None  # type: List[int]
-        self.dimMetadata = None  # type: List[DimensionMetadataT]
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        sparsityParameters = SparsityParameters()
-        sparsityParameters.Init(buf, pos)
-        return cls.InitFromObj(sparsityParameters)
-
-    @classmethod
-    def InitFromObj(cls, sparsityParameters):
-        x = SparsityParametersT()
-        x._UnPack(sparsityParameters)
-        return x
-
-    # SparsityParametersT
-    def _UnPack(self, sparsityParameters):
-        if sparsityParameters is None:
-            return
-        if not sparsityParameters.TraversalOrderIsNone():
-            if np is None:
-                self.traversalOrder = []
-                for i in range(sparsityParameters.TraversalOrderLength()):
-                    self.traversalOrder.append(sparsityParameters.TraversalOrder(i))
-            else:
-                self.traversalOrder = sparsityParameters.TraversalOrderAsNumpy()
-        if not sparsityParameters.BlockMapIsNone():
-            if np is None:
-                self.blockMap = []
-                for i in range(sparsityParameters.BlockMapLength()):
-                    self.blockMap.append(sparsityParameters.BlockMap(i))
-            else:
-                self.blockMap = sparsityParameters.BlockMapAsNumpy()
-        if not sparsityParameters.DimMetadataIsNone():
-            self.dimMetadata = []
-            for i in range(sparsityParameters.DimMetadataLength()):
-                if sparsityParameters.DimMetadata(i) is None:
-                    self.dimMetadata.append(None)
-                else:
-                    dimensionMetadata_ = DimensionMetadataT.InitFromObj(sparsityParameters.DimMetadata(i))
-                    self.dimMetadata.append(dimensionMetadata_)
-
-    # SparsityParametersT
-    def Pack(self, builder):
-        if self.traversalOrder is not None:
-            if np is not None and type(self.traversalOrder) is np.ndarray:
-                traversalOrder = builder.CreateNumpyVector(self.traversalOrder)
-            else:
-                SparsityParametersStartTraversalOrderVector(builder, len(self.traversalOrder))
-                for i in reversed(range(len(self.traversalOrder))):
-                    builder.PrependInt32(self.traversalOrder[i])
-                traversalOrder = builder.EndVector()
-        if self.blockMap is not None:
-            if np is not None and type(self.blockMap) is np.ndarray:
-                blockMap = builder.CreateNumpyVector(self.blockMap)
-            else:
-                SparsityParametersStartBlockMapVector(builder, len(self.blockMap))
-                for i in reversed(range(len(self.blockMap))):
-                    builder.PrependInt32(self.blockMap[i])
-                blockMap = builder.EndVector()
-        if self.dimMetadata is not None:
-            dimMetadatalist = []
-            for i in range(len(self.dimMetadata)):
-                dimMetadatalist.append(self.dimMetadata[i].Pack(builder))
-            SparsityParametersStartDimMetadataVector(builder, len(self.dimMetadata))
-            for i in reversed(range(len(self.dimMetadata))):
-                builder.PrependUOffsetTRelative(dimMetadatalist[i])
-            dimMetadata = builder.EndVector()
-        SparsityParametersStart(builder)
-        if self.traversalOrder is not None:
-            SparsityParametersAddTraversalOrder(builder, traversalOrder)
-        if self.blockMap is not None:
-            SparsityParametersAddBlockMap(builder, blockMap)
-        if self.dimMetadata is not None:
-            SparsityParametersAddDimMetadata(builder, dimMetadata)
-        sparsityParameters = SparsityParametersEnd(builder)
-        return sparsityParameters
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class SplitOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = SplitOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsSplitOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def SplitOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # SplitOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # SplitOptions
-    def NumSplits(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-def SplitOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return SplitOptionsStart(builder)
-def SplitOptionsAddNumSplits(builder, numSplits): builder.PrependInt32Slot(0, numSplits, 0)
-def AddNumSplits(builder, numSplits):
-    return SplitOptionsAddNumSplits(builder, numSplits)
-def SplitOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return SplitOptionsEnd(builder)
-
-class SplitOptionsT(object):
-
-    # SplitOptionsT
-    def __init__(self):
-        self.numSplits = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        splitOptions = SplitOptions()
-        splitOptions.Init(buf, pos)
-        return cls.InitFromObj(splitOptions)
-
-    @classmethod
-    def InitFromObj(cls, splitOptions):
-        x = SplitOptionsT()
-        x._UnPack(splitOptions)
-        return x
-
-    # SplitOptionsT
-    def _UnPack(self, splitOptions):
-        if splitOptions is None:
-            return
-        self.numSplits = splitOptions.NumSplits()
-
-    # SplitOptionsT
-    def Pack(self, builder):
-        SplitOptionsStart(builder)
-        SplitOptionsAddNumSplits(builder, self.numSplits)
-        splitOptions = SplitOptionsEnd(builder)
-        return splitOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class SplitVOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = SplitVOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsSplitVOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def SplitVOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # SplitVOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # SplitVOptions
-    def NumSplits(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-def SplitVOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return SplitVOptionsStart(builder)
-def SplitVOptionsAddNumSplits(builder, numSplits): builder.PrependInt32Slot(0, numSplits, 0)
-def AddNumSplits(builder, numSplits):
-    return SplitVOptionsAddNumSplits(builder, numSplits)
-def SplitVOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return SplitVOptionsEnd(builder)
-
-class SplitVOptionsT(object):
-
-    # SplitVOptionsT
-    def __init__(self):
-        self.numSplits = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        splitVoptions = SplitVOptions()
-        splitVoptions.Init(buf, pos)
-        return cls.InitFromObj(splitVoptions)
-
-    @classmethod
-    def InitFromObj(cls, splitVoptions):
-        x = SplitVOptionsT()
-        x._UnPack(splitVoptions)
-        return x
-
-    # SplitVOptionsT
-    def _UnPack(self, splitVoptions):
-        if splitVoptions is None:
-            return
-        self.numSplits = splitVoptions.NumSplits()
-
-    # SplitVOptionsT
-    def Pack(self, builder):
-        SplitVOptionsStart(builder)
-        SplitVOptionsAddNumSplits(builder, self.numSplits)
-        splitVoptions = SplitVOptionsEnd(builder)
-        return splitVoptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class SquareOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = SquareOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsSquareOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def SquareOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # SquareOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def SquareOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return SquareOptionsStart(builder)
-def SquareOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return SquareOptionsEnd(builder)
-
-class SquareOptionsT(object):
-
-    # SquareOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        squareOptions = SquareOptions()
-        squareOptions.Init(buf, pos)
-        return cls.InitFromObj(squareOptions)
-
-    @classmethod
-    def InitFromObj(cls, squareOptions):
-        x = SquareOptionsT()
-        x._UnPack(squareOptions)
-        return x
-
-    # SquareOptionsT
-    def _UnPack(self, squareOptions):
-        if squareOptions is None:
-            return
-
-    # SquareOptionsT
-    def Pack(self, builder):
-        SquareOptionsStart(builder)
-        squareOptions = SquareOptionsEnd(builder)
-        return squareOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class SquaredDifferenceOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = SquaredDifferenceOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsSquaredDifferenceOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def SquaredDifferenceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # SquaredDifferenceOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def SquaredDifferenceOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return SquaredDifferenceOptionsStart(builder)
-def SquaredDifferenceOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return SquaredDifferenceOptionsEnd(builder)
-
-class SquaredDifferenceOptionsT(object):
-
-    # SquaredDifferenceOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        squaredDifferenceOptions = SquaredDifferenceOptions()
-        squaredDifferenceOptions.Init(buf, pos)
-        return cls.InitFromObj(squaredDifferenceOptions)
-
-    @classmethod
-    def InitFromObj(cls, squaredDifferenceOptions):
-        x = SquaredDifferenceOptionsT()
-        x._UnPack(squaredDifferenceOptions)
-        return x
-
-    # SquaredDifferenceOptionsT
-    def _UnPack(self, squaredDifferenceOptions):
-        if squaredDifferenceOptions is None:
-            return
-
-    # SquaredDifferenceOptionsT
-    def Pack(self, builder):
-        SquaredDifferenceOptionsStart(builder)
-        squaredDifferenceOptions = SquaredDifferenceOptionsEnd(builder)
-        return squaredDifferenceOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class SqueezeOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = SqueezeOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsSqueezeOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def SqueezeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # SqueezeOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # SqueezeOptions
-    def SqueezeDims(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            a = self._tab.Vector(o)
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
-        return 0
-
-    # SqueezeOptions
-    def SqueezeDimsAsNumpy(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
-        return 0
-
-    # SqueezeOptions
-    def SqueezeDimsLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
-
-    # SqueezeOptions
-    def SqueezeDimsIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        return o == 0
-
-def SqueezeOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return SqueezeOptionsStart(builder)
-def SqueezeOptionsAddSqueezeDims(builder, squeezeDims): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(squeezeDims), 0)
-def AddSqueezeDims(builder, squeezeDims):
-    return SqueezeOptionsAddSqueezeDims(builder, squeezeDims)
-def SqueezeOptionsStartSqueezeDimsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartSqueezeDimsVector(builder, numElems):
-    return SqueezeOptionsStartSqueezeDimsVector(builder, numElems)
-def SqueezeOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return SqueezeOptionsEnd(builder)
-try:
-    from typing import List
-except:
-    pass
-
-class SqueezeOptionsT(object):
-
-    # SqueezeOptionsT
-    def __init__(self):
-        self.squeezeDims = None  # type: List[int]
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        squeezeOptions = SqueezeOptions()
-        squeezeOptions.Init(buf, pos)
-        return cls.InitFromObj(squeezeOptions)
-
-    @classmethod
-    def InitFromObj(cls, squeezeOptions):
-        x = SqueezeOptionsT()
-        x._UnPack(squeezeOptions)
-        return x
-
-    # SqueezeOptionsT
-    def _UnPack(self, squeezeOptions):
-        if squeezeOptions is None:
-            return
-        if not squeezeOptions.SqueezeDimsIsNone():
-            if np is None:
-                self.squeezeDims = []
-                for i in range(squeezeOptions.SqueezeDimsLength()):
-                    self.squeezeDims.append(squeezeOptions.SqueezeDims(i))
-            else:
-                self.squeezeDims = squeezeOptions.SqueezeDimsAsNumpy()
-
-    # SqueezeOptionsT
-    def Pack(self, builder):
-        if self.squeezeDims is not None:
-            if np is not None and type(self.squeezeDims) is np.ndarray:
-                squeezeDims = builder.CreateNumpyVector(self.squeezeDims)
-            else:
-                SqueezeOptionsStartSqueezeDimsVector(builder, len(self.squeezeDims))
-                for i in reversed(range(len(self.squeezeDims))):
-                    builder.PrependInt32(self.squeezeDims[i])
-                squeezeDims = builder.EndVector()
-        SqueezeOptionsStart(builder)
-        if self.squeezeDims is not None:
-            SqueezeOptionsAddSqueezeDims(builder, squeezeDims)
-        squeezeOptions = SqueezeOptionsEnd(builder)
-        return squeezeOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class StridedSliceOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = StridedSliceOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsStridedSliceOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def StridedSliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # StridedSliceOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # StridedSliceOptions
-    def BeginMask(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # StridedSliceOptions
-    def EndMask(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # StridedSliceOptions
-    def EllipsisMask(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # StridedSliceOptions
-    def NewAxisMask(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # StridedSliceOptions
-    def ShrinkAxisMask(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # StridedSliceOptions
-    def Offset(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-def StridedSliceOptionsStart(builder): builder.StartObject(6)
-def Start(builder):
-    return StridedSliceOptionsStart(builder)
-def StridedSliceOptionsAddBeginMask(builder, beginMask): builder.PrependInt32Slot(0, beginMask, 0)
-def AddBeginMask(builder, beginMask):
-    return StridedSliceOptionsAddBeginMask(builder, beginMask)
-def StridedSliceOptionsAddEndMask(builder, endMask): builder.PrependInt32Slot(1, endMask, 0)
-def AddEndMask(builder, endMask):
-    return StridedSliceOptionsAddEndMask(builder, endMask)
-def StridedSliceOptionsAddEllipsisMask(builder, ellipsisMask): builder.PrependInt32Slot(2, ellipsisMask, 0)
-def AddEllipsisMask(builder, ellipsisMask):
-    return StridedSliceOptionsAddEllipsisMask(builder, ellipsisMask)
-def StridedSliceOptionsAddNewAxisMask(builder, newAxisMask): builder.PrependInt32Slot(3, newAxisMask, 0)
-def AddNewAxisMask(builder, newAxisMask):
-    return StridedSliceOptionsAddNewAxisMask(builder, newAxisMask)
-def StridedSliceOptionsAddShrinkAxisMask(builder, shrinkAxisMask): builder.PrependInt32Slot(4, shrinkAxisMask, 0)
-def AddShrinkAxisMask(builder, shrinkAxisMask):
-    return StridedSliceOptionsAddShrinkAxisMask(builder, shrinkAxisMask)
-def StridedSliceOptionsAddOffset(builder, offset): builder.PrependBoolSlot(5, offset, 0)
-def AddOffset(builder, offset):
-    return StridedSliceOptionsAddOffset(builder, offset)
-def StridedSliceOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return StridedSliceOptionsEnd(builder)
-
-class StridedSliceOptionsT(object):
-
-    # StridedSliceOptionsT
-    def __init__(self):
-        self.beginMask = 0  # type: int
-        self.endMask = 0  # type: int
-        self.ellipsisMask = 0  # type: int
-        self.newAxisMask = 0  # type: int
-        self.shrinkAxisMask = 0  # type: int
-        self.offset = False  # type: bool
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        stridedSliceOptions = StridedSliceOptions()
-        stridedSliceOptions.Init(buf, pos)
-        return cls.InitFromObj(stridedSliceOptions)
-
-    @classmethod
-    def InitFromObj(cls, stridedSliceOptions):
-        x = StridedSliceOptionsT()
-        x._UnPack(stridedSliceOptions)
-        return x
-
-    # StridedSliceOptionsT
-    def _UnPack(self, stridedSliceOptions):
-        if stridedSliceOptions is None:
-            return
-        self.beginMask = stridedSliceOptions.BeginMask()
-        self.endMask = stridedSliceOptions.EndMask()
-        self.ellipsisMask = stridedSliceOptions.EllipsisMask()
-        self.newAxisMask = stridedSliceOptions.NewAxisMask()
-        self.shrinkAxisMask = stridedSliceOptions.ShrinkAxisMask()
-        self.offset = stridedSliceOptions.Offset()
-
-    # StridedSliceOptionsT
-    def Pack(self, builder):
-        StridedSliceOptionsStart(builder)
-        StridedSliceOptionsAddBeginMask(builder, self.beginMask)
-        StridedSliceOptionsAddEndMask(builder, self.endMask)
-        StridedSliceOptionsAddEllipsisMask(builder, self.ellipsisMask)
-        StridedSliceOptionsAddNewAxisMask(builder, self.newAxisMask)
-        StridedSliceOptionsAddShrinkAxisMask(builder, self.shrinkAxisMask)
-        StridedSliceOptionsAddOffset(builder, self.offset)
-        stridedSliceOptions = StridedSliceOptionsEnd(builder)
-        return stridedSliceOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
 
 class SubGraph(object):
     __slots__ = ['_tab']
@@ -11903,39 +17268,40 @@
             return self._tab.String(o + self._tab.Pos)
         return None
 
-def SubGraphStart(builder): builder.StartObject(5)
-def Start(builder):
-    return SubGraphStart(builder)
-def SubGraphAddTensors(builder, tensors): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(tensors), 0)
-def AddTensors(builder, tensors):
-    return SubGraphAddTensors(builder, tensors)
-def SubGraphStartTensorsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartTensorsVector(builder, numElems):
-    return SubGraphStartTensorsVector(builder, numElems)
-def SubGraphAddInputs(builder, inputs): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0)
-def AddInputs(builder, inputs):
-    return SubGraphAddInputs(builder, inputs)
-def SubGraphStartInputsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartInputsVector(builder, numElems):
-    return SubGraphStartInputsVector(builder, numElems)
-def SubGraphAddOutputs(builder, outputs): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0)
-def AddOutputs(builder, outputs):
-    return SubGraphAddOutputs(builder, outputs)
-def SubGraphStartOutputsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartOutputsVector(builder, numElems):
-    return SubGraphStartOutputsVector(builder, numElems)
-def SubGraphAddOperators(builder, operators): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(operators), 0)
-def AddOperators(builder, operators):
-    return SubGraphAddOperators(builder, operators)
-def SubGraphStartOperatorsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartOperatorsVector(builder, numElems):
-    return SubGraphStartOperatorsVector(builder, numElems)
-def SubGraphAddName(builder, name): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
-def AddName(builder, name):
-    return SubGraphAddName(builder, name)
-def SubGraphEnd(builder): return builder.EndObject()
-def End(builder):
-    return SubGraphEnd(builder)
+def SubGraphStart(builder):
+    builder.StartObject(5)
+
+def SubGraphAddTensors(builder, tensors):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(tensors), 0)
+
+def SubGraphStartTensorsVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def SubGraphAddInputs(builder, inputs):
+    builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0)
+
+def SubGraphStartInputsVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def SubGraphAddOutputs(builder, outputs):
+    builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0)
+
+def SubGraphStartOutputsVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def SubGraphAddOperators(builder, operators):
+    builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(operators), 0)
+
+def SubGraphStartOperatorsVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def SubGraphAddName(builder, name):
+    builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
+
+def SubGraphEnd(builder):
+    return builder.EndObject()
+
+
 try:
     from typing import List
 except:
@@ -11958,6 +17324,11 @@
         return cls.InitFromObj(subGraph)
 
     @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
     def InitFromObj(cls, subGraph):
         x = SubGraphT()
         x._UnPack(subGraph)
@@ -12048,427 +17419,245 @@
             SubGraphAddName(builder, name)
         subGraph = SubGraphEnd(builder)
         return subGraph
-# automatically generated by the FlatBuffers compiler, do not modify
 
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class SubOptions(object):
+class Buffer(object):
     __slots__ = ['_tab']
 
     @classmethod
     def GetRootAs(cls, buf, offset=0):
         n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = SubOptions()
+        x = Buffer()
         x.Init(buf, n + offset)
         return x
 
     @classmethod
-    def GetRootAsSubOptions(cls, buf, offset=0):
+    def GetRootAsBuffer(cls, buf, offset=0):
         """This method is deprecated. Please switch to GetRootAs."""
         return cls.GetRootAs(buf, offset)
     @classmethod
-    def SubOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+    def BufferBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
         return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
 
-    # SubOptions
+    # Buffer
     def Init(self, buf, pos):
         self._tab = flatbuffers.table.Table(buf, pos)
 
-    # SubOptions
-    def FusedActivationFunction(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-    # SubOptions
-    def PotScaleInt16(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return True
-
-def SubOptionsStart(builder): builder.StartObject(2)
-def Start(builder):
-    return SubOptionsStart(builder)
-def SubOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0)
-def AddFusedActivationFunction(builder, fusedActivationFunction):
-    return SubOptionsAddFusedActivationFunction(builder, fusedActivationFunction)
-def SubOptionsAddPotScaleInt16(builder, potScaleInt16): builder.PrependBoolSlot(1, potScaleInt16, 1)
-def AddPotScaleInt16(builder, potScaleInt16):
-    return SubOptionsAddPotScaleInt16(builder, potScaleInt16)
-def SubOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return SubOptionsEnd(builder)
-
-class SubOptionsT(object):
-
-    # SubOptionsT
-    def __init__(self):
-        self.fusedActivationFunction = 0  # type: int
-        self.potScaleInt16 = True  # type: bool
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        subOptions = SubOptions()
-        subOptions.Init(buf, pos)
-        return cls.InitFromObj(subOptions)
-
-    @classmethod
-    def InitFromObj(cls, subOptions):
-        x = SubOptionsT()
-        x._UnPack(subOptions)
-        return x
-
-    # SubOptionsT
-    def _UnPack(self, subOptions):
-        if subOptions is None:
-            return
-        self.fusedActivationFunction = subOptions.FusedActivationFunction()
-        self.potScaleInt16 = subOptions.PotScaleInt16()
-
-    # SubOptionsT
-    def Pack(self, builder):
-        SubOptionsStart(builder)
-        SubOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
-        SubOptionsAddPotScaleInt16(builder, self.potScaleInt16)
-        subOptions = SubOptionsEnd(builder)
-        return subOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class Tensor(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = Tensor()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsTensor(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def TensorBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # Tensor
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # Tensor
-    def Shape(self, j):
+    # Buffer
+    def Data(self, j):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
         if o != 0:
             a = self._tab.Vector(o)
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+            return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
         return 0
 
-    # Tensor
-    def ShapeAsNumpy(self):
+    # Buffer
+    def DataAsNumpy(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
         if o != 0:
-            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
         return 0
 
-    # Tensor
-    def ShapeLength(self):
+    # Buffer
+    def DataLength(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
         if o != 0:
             return self._tab.VectorLen(o)
         return 0
 
-    # Tensor
-    def ShapeIsNone(self):
+    # Buffer
+    def DataIsNone(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
         return o == 0
 
-    # Tensor
-    def Type(self):
+    # Buffer
+    def Offset(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
         if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+            return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
         return 0
 
-    # Tensor
-    def Buffer(self):
+    # Buffer
+    def Size(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
         if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
+            return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
         return 0
 
-    # Tensor
+def BufferStart(builder):
+    builder.StartObject(3)
+
+def BufferAddData(builder, data):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(data), 0)
+
+def BufferStartDataVector(builder, numElems):
+    return builder.StartVector(1, numElems, 1)
+
+def BufferAddOffset(builder, offset):
+    builder.PrependUint64Slot(1, offset, 0)
+
+def BufferAddSize(builder, size):
+    builder.PrependUint64Slot(2, size, 0)
+
+def BufferEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class BufferT(object):
+
+    # BufferT
+    def __init__(self):
+        self.data = None  # type: List[int]
+        self.offset = 0  # type: int
+        self.size = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        buffer = Buffer()
+        buffer.Init(buf, pos)
+        return cls.InitFromObj(buffer)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, buffer):
+        x = BufferT()
+        x._UnPack(buffer)
+        return x
+
+    # BufferT
+    def _UnPack(self, buffer):
+        if buffer is None:
+            return
+        if not buffer.DataIsNone():
+            if np is None:
+                self.data = []
+                for i in range(buffer.DataLength()):
+                    self.data.append(buffer.Data(i))
+            else:
+                self.data = buffer.DataAsNumpy()
+        self.offset = buffer.Offset()
+        self.size = buffer.Size()
+
+    # BufferT
+    def Pack(self, builder):
+        if self.data is not None:
+            if np is not None and type(self.data) is np.ndarray:
+                data = builder.CreateNumpyVector(self.data)
+            else:
+                BufferStartDataVector(builder, len(self.data))
+                for i in reversed(range(len(self.data))):
+                    builder.PrependUint8(self.data[i])
+                data = builder.EndVector()
+        BufferStart(builder)
+        if self.data is not None:
+            BufferAddData(builder, data)
+        BufferAddOffset(builder, self.offset)
+        BufferAddSize(builder, self.size)
+        buffer = BufferEnd(builder)
+        return buffer
+
+
+class Metadata(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = Metadata()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsMetadata(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def MetadataBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # Metadata
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # Metadata
     def Name(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
         if o != 0:
             return self._tab.String(o + self._tab.Pos)
         return None
 
-    # Tensor
-    def Quantization(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+    # Metadata
+    def Buffer(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
         if o != 0:
-            x = self._tab.Indirect(o + self._tab.Pos)
-            obj = QuantizationParameters()
-            obj.Init(self._tab.Bytes, x)
-            return obj
-        return None
-
-    # Tensor
-    def IsVariable(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-    # Tensor
-    def Sparsity(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
-        if o != 0:
-            x = self._tab.Indirect(o + self._tab.Pos)
-            obj = SparsityParameters()
-            obj.Init(self._tab.Bytes, x)
-            return obj
-        return None
-
-    # Tensor
-    def ShapeSignature(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
-        if o != 0:
-            a = self._tab.Vector(o)
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+            return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
         return 0
 
-    # Tensor
-    def ShapeSignatureAsNumpy(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
-        if o != 0:
-            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
-        return 0
+def MetadataStart(builder):
+    builder.StartObject(2)
 
-    # Tensor
-    def ShapeSignatureLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
+def MetadataAddName(builder, name):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
 
-    # Tensor
-    def ShapeSignatureIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
-        return o == 0
+def MetadataAddBuffer(builder, buffer):
+    builder.PrependUint32Slot(1, buffer, 0)
 
-    # Tensor
-    def HasRank(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
+def MetadataEnd(builder):
+    return builder.EndObject()
 
-    # Tensor
-    def VariantTensors(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
-        if o != 0:
-            x = self._tab.Vector(o)
-            x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
-            x = self._tab.Indirect(x)
-            obj = VariantSubType()
-            obj.Init(self._tab.Bytes, x)
-            return obj
-        return None
 
-    # Tensor
-    def VariantTensorsLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
-        if o != 0:
-            return self._tab.VectorLen(o)
-        return 0
 
-    # Tensor
-    def VariantTensorsIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
-        return o == 0
+class MetadataT(object):
 
-def TensorStart(builder): builder.StartObject(10)
-def Start(builder):
-    return TensorStart(builder)
-def TensorAddShape(builder, shape): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0)
-def AddShape(builder, shape):
-    return TensorAddShape(builder, shape)
-def TensorStartShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartShapeVector(builder, numElems):
-    return TensorStartShapeVector(builder, numElems)
-def TensorAddType(builder, type): builder.PrependInt8Slot(1, type, 0)
-def AddType(builder, type):
-    return TensorAddType(builder, type)
-def TensorAddBuffer(builder, buffer): builder.PrependUint32Slot(2, buffer, 0)
-def AddBuffer(builder, buffer):
-    return TensorAddBuffer(builder, buffer)
-def TensorAddName(builder, name): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
-def AddName(builder, name):
-    return TensorAddName(builder, name)
-def TensorAddQuantization(builder, quantization): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(quantization), 0)
-def AddQuantization(builder, quantization):
-    return TensorAddQuantization(builder, quantization)
-def TensorAddIsVariable(builder, isVariable): builder.PrependBoolSlot(5, isVariable, 0)
-def AddIsVariable(builder, isVariable):
-    return TensorAddIsVariable(builder, isVariable)
-def TensorAddSparsity(builder, sparsity): builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(sparsity), 0)
-def AddSparsity(builder, sparsity):
-    return TensorAddSparsity(builder, sparsity)
-def TensorAddShapeSignature(builder, shapeSignature): builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(shapeSignature), 0)
-def AddShapeSignature(builder, shapeSignature):
-    return TensorAddShapeSignature(builder, shapeSignature)
-def TensorStartShapeSignatureVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartShapeSignatureVector(builder, numElems):
-    return TensorStartShapeSignatureVector(builder, numElems)
-def TensorAddHasRank(builder, hasRank): builder.PrependBoolSlot(8, hasRank, 0)
-def AddHasRank(builder, hasRank):
-    return TensorAddHasRank(builder, hasRank)
-def TensorAddVariantTensors(builder, variantTensors): builder.PrependUOffsetTRelativeSlot(9, flatbuffers.number_types.UOffsetTFlags.py_type(variantTensors), 0)
-def AddVariantTensors(builder, variantTensors):
-    return TensorAddVariantTensors(builder, variantTensors)
-def TensorStartVariantTensorsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartVariantTensorsVector(builder, numElems):
-    return TensorStartVariantTensorsVector(builder, numElems)
-def TensorEnd(builder): return builder.EndObject()
-def End(builder):
-    return TensorEnd(builder)
-try:
-    from typing import List, Optional
-except:
-    pass
-
-class TensorT(object):
-
-    # TensorT
+    # MetadataT
     def __init__(self):
-        self.shape = None  # type: List[int]
-        self.type = 0  # type: int
-        self.buffer = 0  # type: int
         self.name = None  # type: str
-        self.quantization = None  # type: Optional[QuantizationParametersT]
-        self.isVariable = False  # type: bool
-        self.sparsity = None  # type: Optional[SparsityParametersT]
-        self.shapeSignature = None  # type: List[int]
-        self.hasRank = False  # type: bool
-        self.variantTensors = None  # type: List[VariantSubTypeT]
+        self.buffer = 0  # type: int
 
     @classmethod
     def InitFromBuf(cls, buf, pos):
-        tensor = Tensor()
-        tensor.Init(buf, pos)
-        return cls.InitFromObj(tensor)
+        metadata = Metadata()
+        metadata.Init(buf, pos)
+        return cls.InitFromObj(metadata)
 
     @classmethod
-    def InitFromObj(cls, tensor):
-        x = TensorT()
-        x._UnPack(tensor)
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, metadata):
+        x = MetadataT()
+        x._UnPack(metadata)
         return x
 
-    # TensorT
-    def _UnPack(self, tensor):
-        if tensor is None:
+    # MetadataT
+    def _UnPack(self, metadata):
+        if metadata is None:
             return
-        if not tensor.ShapeIsNone():
-            if np is None:
-                self.shape = []
-                for i in range(tensor.ShapeLength()):
-                    self.shape.append(tensor.Shape(i))
-            else:
-                self.shape = tensor.ShapeAsNumpy()
-        self.type = tensor.Type()
-        self.buffer = tensor.Buffer()
-        self.name = tensor.Name()
-        if tensor.Quantization() is not None:
-            self.quantization = QuantizationParametersT.InitFromObj(tensor.Quantization())
-        self.isVariable = tensor.IsVariable()
-        if tensor.Sparsity() is not None:
-            self.sparsity = SparsityParametersT.InitFromObj(tensor.Sparsity())
-        if not tensor.ShapeSignatureIsNone():
-            if np is None:
-                self.shapeSignature = []
-                for i in range(tensor.ShapeSignatureLength()):
-                    self.shapeSignature.append(tensor.ShapeSignature(i))
-            else:
-                self.shapeSignature = tensor.ShapeSignatureAsNumpy()
-        self.hasRank = tensor.HasRank()
-        if not tensor.VariantTensorsIsNone():
-            self.variantTensors = []
-            for i in range(tensor.VariantTensorsLength()):
-                if tensor.VariantTensors(i) is None:
-                    self.variantTensors.append(None)
-                else:
-                    variantSubType_ = VariantSubTypeT.InitFromObj(tensor.VariantTensors(i))
-                    self.variantTensors.append(variantSubType_)
+        self.name = metadata.Name()
+        self.buffer = metadata.Buffer()
 
-    # TensorT
+    # MetadataT
     def Pack(self, builder):
-        if self.shape is not None:
-            if np is not None and type(self.shape) is np.ndarray:
-                shape = builder.CreateNumpyVector(self.shape)
-            else:
-                TensorStartShapeVector(builder, len(self.shape))
-                for i in reversed(range(len(self.shape))):
-                    builder.PrependInt32(self.shape[i])
-                shape = builder.EndVector()
         if self.name is not None:
             name = builder.CreateString(self.name)
-        if self.quantization is not None:
-            quantization = self.quantization.Pack(builder)
-        if self.sparsity is not None:
-            sparsity = self.sparsity.Pack(builder)
-        if self.shapeSignature is not None:
-            if np is not None and type(self.shapeSignature) is np.ndarray:
-                shapeSignature = builder.CreateNumpyVector(self.shapeSignature)
-            else:
-                TensorStartShapeSignatureVector(builder, len(self.shapeSignature))
-                for i in reversed(range(len(self.shapeSignature))):
-                    builder.PrependInt32(self.shapeSignature[i])
-                shapeSignature = builder.EndVector()
-        if self.variantTensors is not None:
-            variantTensorslist = []
-            for i in range(len(self.variantTensors)):
-                variantTensorslist.append(self.variantTensors[i].Pack(builder))
-            TensorStartVariantTensorsVector(builder, len(self.variantTensors))
-            for i in reversed(range(len(self.variantTensors))):
-                builder.PrependUOffsetTRelative(variantTensorslist[i])
-            variantTensors = builder.EndVector()
-        TensorStart(builder)
-        if self.shape is not None:
-            TensorAddShape(builder, shape)
-        TensorAddType(builder, self.type)
-        TensorAddBuffer(builder, self.buffer)
+        MetadataStart(builder)
         if self.name is not None:
-            TensorAddName(builder, name)
-        if self.quantization is not None:
-            TensorAddQuantization(builder, quantization)
-        TensorAddIsVariable(builder, self.isVariable)
-        if self.sparsity is not None:
-            TensorAddSparsity(builder, sparsity)
-        if self.shapeSignature is not None:
-            TensorAddShapeSignature(builder, shapeSignature)
-        TensorAddHasRank(builder, self.hasRank)
-        if self.variantTensors is not None:
-            TensorAddVariantTensors(builder, variantTensors)
-        tensor = TensorEnd(builder)
-        return tensor
-# automatically generated by the FlatBuffers compiler, do not modify
+            MetadataAddName(builder, name)
+        MetadataAddBuffer(builder, self.buffer)
+        metadata = MetadataEnd(builder)
+        return metadata
 
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
 
 class TensorMap(object):
     __slots__ = ['_tab']
@@ -12506,18 +17695,19 @@
             return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
         return 0
 
-def TensorMapStart(builder): builder.StartObject(2)
-def Start(builder):
-    return TensorMapStart(builder)
-def TensorMapAddName(builder, name): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
-def AddName(builder, name):
-    return TensorMapAddName(builder, name)
-def TensorMapAddTensorIndex(builder, tensorIndex): builder.PrependUint32Slot(1, tensorIndex, 0)
-def AddTensorIndex(builder, tensorIndex):
-    return TensorMapAddTensorIndex(builder, tensorIndex)
-def TensorMapEnd(builder): return builder.EndObject()
-def End(builder):
-    return TensorMapEnd(builder)
+def TensorMapStart(builder):
+    builder.StartObject(2)
+
+def TensorMapAddName(builder, name):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
+
+def TensorMapAddTensorIndex(builder, tensorIndex):
+    builder.PrependUint32Slot(1, tensorIndex, 0)
+
+def TensorMapEnd(builder):
+    return builder.EndObject()
+
+
 
 class TensorMapT(object):
 
@@ -12533,6 +17723,11 @@
         return cls.InitFromObj(tensorMap)
 
     @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
     def InitFromObj(cls, tensorMap):
         x = TensorMapT()
         x._UnPack(tensorMap)
@@ -12555,1587 +17750,593 @@
         TensorMapAddTensorIndex(builder, self.tensorIndex)
         tensorMap = TensorMapEnd(builder)
         return tensorMap
-# automatically generated by the FlatBuffers compiler, do not modify
 
-# namespace: tflite
 
-class TensorType(object):
-    FLOAT32 = 0
-    FLOAT16 = 1
-    INT32 = 2
-    UINT8 = 3
-    INT64 = 4
-    STRING = 5
-    BOOL = 6
-    INT16 = 7
-    COMPLEX64 = 8
-    INT8 = 9
-    FLOAT64 = 10
-    COMPLEX128 = 11
-    UINT64 = 12
-    RESOURCE = 13
-    VARIANT = 14
-    UINT32 = 15
-    UINT16 = 16
-    INT4 = 17
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class TileOptions(object):
+class SignatureDef(object):
     __slots__ = ['_tab']
 
     @classmethod
     def GetRootAs(cls, buf, offset=0):
         n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = TileOptions()
+        x = SignatureDef()
         x.Init(buf, n + offset)
         return x
 
     @classmethod
-    def GetRootAsTileOptions(cls, buf, offset=0):
+    def GetRootAsSignatureDef(cls, buf, offset=0):
         """This method is deprecated. Please switch to GetRootAs."""
         return cls.GetRootAs(buf, offset)
     @classmethod
-    def TileOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+    def SignatureDefBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
         return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
 
-    # TileOptions
+    # SignatureDef
     def Init(self, buf, pos):
         self._tab = flatbuffers.table.Table(buf, pos)
 
-def TileOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return TileOptionsStart(builder)
-def TileOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return TileOptionsEnd(builder)
-
-class TileOptionsT(object):
-
-    # TileOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        tileOptions = TileOptions()
-        tileOptions.Init(buf, pos)
-        return cls.InitFromObj(tileOptions)
-
-    @classmethod
-    def InitFromObj(cls, tileOptions):
-        x = TileOptionsT()
-        x._UnPack(tileOptions)
-        return x
-
-    # TileOptionsT
-    def _UnPack(self, tileOptions):
-        if tileOptions is None:
-            return
-
-    # TileOptionsT
-    def Pack(self, builder):
-        TileOptionsStart(builder)
-        tileOptions = TileOptionsEnd(builder)
-        return tileOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class TopKV2Options(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = TopKV2Options()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsTopKV2Options(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def TopKV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # TopKV2Options
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def TopKV2OptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return TopKV2OptionsStart(builder)
-def TopKV2OptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return TopKV2OptionsEnd(builder)
-
-class TopKV2OptionsT(object):
-
-    # TopKV2OptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        topKv2options = TopKV2Options()
-        topKv2options.Init(buf, pos)
-        return cls.InitFromObj(topKv2options)
-
-    @classmethod
-    def InitFromObj(cls, topKv2options):
-        x = TopKV2OptionsT()
-        x._UnPack(topKv2options)
-        return x
-
-    # TopKV2OptionsT
-    def _UnPack(self, topKv2options):
-        if topKv2options is None:
-            return
-
-    # TopKV2OptionsT
-    def Pack(self, builder):
-        TopKV2OptionsStart(builder)
-        topKv2options = TopKV2OptionsEnd(builder)
-        return topKv2options
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class TransposeConvOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = TransposeConvOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsTransposeConvOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def TransposeConvOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # TransposeConvOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # TransposeConvOptions
-    def Padding(self):
+    # SignatureDef
+    def Inputs(self, j):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
         if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
+            x = self._tab.Vector(o)
+            x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
+            x = self._tab.Indirect(x)
+            obj = TensorMap()
+            obj.Init(self._tab.Bytes, x)
+            return obj
+        return None
 
-    # TransposeConvOptions
-    def StrideW(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # TransposeConvOptions
-    def StrideH(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # TransposeConvOptions
-    def FusedActivationFunction(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-def TransposeConvOptionsStart(builder): builder.StartObject(4)
-def Start(builder):
-    return TransposeConvOptionsStart(builder)
-def TransposeConvOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0)
-def AddPadding(builder, padding):
-    return TransposeConvOptionsAddPadding(builder, padding)
-def TransposeConvOptionsAddStrideW(builder, strideW): builder.PrependInt32Slot(1, strideW, 0)
-def AddStrideW(builder, strideW):
-    return TransposeConvOptionsAddStrideW(builder, strideW)
-def TransposeConvOptionsAddStrideH(builder, strideH): builder.PrependInt32Slot(2, strideH, 0)
-def AddStrideH(builder, strideH):
-    return TransposeConvOptionsAddStrideH(builder, strideH)
-def TransposeConvOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(3, fusedActivationFunction, 0)
-def AddFusedActivationFunction(builder, fusedActivationFunction):
-    return TransposeConvOptionsAddFusedActivationFunction(builder, fusedActivationFunction)
-def TransposeConvOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return TransposeConvOptionsEnd(builder)
-
-class TransposeConvOptionsT(object):
-
-    # TransposeConvOptionsT
-    def __init__(self):
-        self.padding = 0  # type: int
-        self.strideW = 0  # type: int
-        self.strideH = 0  # type: int
-        self.fusedActivationFunction = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        transposeConvOptions = TransposeConvOptions()
-        transposeConvOptions.Init(buf, pos)
-        return cls.InitFromObj(transposeConvOptions)
-
-    @classmethod
-    def InitFromObj(cls, transposeConvOptions):
-        x = TransposeConvOptionsT()
-        x._UnPack(transposeConvOptions)
-        return x
-
-    # TransposeConvOptionsT
-    def _UnPack(self, transposeConvOptions):
-        if transposeConvOptions is None:
-            return
-        self.padding = transposeConvOptions.Padding()
-        self.strideW = transposeConvOptions.StrideW()
-        self.strideH = transposeConvOptions.StrideH()
-        self.fusedActivationFunction = transposeConvOptions.FusedActivationFunction()
-
-    # TransposeConvOptionsT
-    def Pack(self, builder):
-        TransposeConvOptionsStart(builder)
-        TransposeConvOptionsAddPadding(builder, self.padding)
-        TransposeConvOptionsAddStrideW(builder, self.strideW)
-        TransposeConvOptionsAddStrideH(builder, self.strideH)
-        TransposeConvOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
-        transposeConvOptions = TransposeConvOptionsEnd(builder)
-        return transposeConvOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class TransposeOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = TransposeOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsTransposeOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def TransposeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # TransposeOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def TransposeOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return TransposeOptionsStart(builder)
-def TransposeOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return TransposeOptionsEnd(builder)
-
-class TransposeOptionsT(object):
-
-    # TransposeOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        transposeOptions = TransposeOptions()
-        transposeOptions.Init(buf, pos)
-        return cls.InitFromObj(transposeOptions)
-
-    @classmethod
-    def InitFromObj(cls, transposeOptions):
-        x = TransposeOptionsT()
-        x._UnPack(transposeOptions)
-        return x
-
-    # TransposeOptionsT
-    def _UnPack(self, transposeOptions):
-        if transposeOptions is None:
-            return
-
-    # TransposeOptionsT
-    def Pack(self, builder):
-        TransposeOptionsStart(builder)
-        transposeOptions = TransposeOptionsEnd(builder)
-        return transposeOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class Uint16Vector(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = Uint16Vector()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsUint16Vector(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def Uint16VectorBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # Uint16Vector
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # Uint16Vector
-    def Values(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            a = self._tab.Vector(o)
-            return self._tab.Get(flatbuffers.number_types.Uint16Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 2))
-        return 0
-
-    # Uint16Vector
-    def ValuesAsNumpy(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint16Flags, o)
-        return 0
-
-    # Uint16Vector
-    def ValuesLength(self):
+    # SignatureDef
+    def InputsLength(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
         if o != 0:
             return self._tab.VectorLen(o)
         return 0
 
-    # Uint16Vector
-    def ValuesIsNone(self):
+    # SignatureDef
+    def InputsIsNone(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
         return o == 0
 
-def Uint16VectorStart(builder): builder.StartObject(1)
-def Start(builder):
-    return Uint16VectorStart(builder)
-def Uint16VectorAddValues(builder, values): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0)
-def AddValues(builder, values):
-    return Uint16VectorAddValues(builder, values)
-def Uint16VectorStartValuesVector(builder, numElems): return builder.StartVector(2, numElems, 2)
-def StartValuesVector(builder, numElems):
-    return Uint16VectorStartValuesVector(builder, numElems)
-def Uint16VectorEnd(builder): return builder.EndObject()
-def End(builder):
-    return Uint16VectorEnd(builder)
-try:
-    from typing import List
-except:
-    pass
-
-class Uint16VectorT(object):
-
-    # Uint16VectorT
-    def __init__(self):
-        self.values = None  # type: List[int]
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        uint16vector = Uint16Vector()
-        uint16vector.Init(buf, pos)
-        return cls.InitFromObj(uint16vector)
-
-    @classmethod
-    def InitFromObj(cls, uint16vector):
-        x = Uint16VectorT()
-        x._UnPack(uint16vector)
-        return x
-
-    # Uint16VectorT
-    def _UnPack(self, uint16vector):
-        if uint16vector is None:
-            return
-        if not uint16vector.ValuesIsNone():
-            if np is None:
-                self.values = []
-                for i in range(uint16vector.ValuesLength()):
-                    self.values.append(uint16vector.Values(i))
-            else:
-                self.values = uint16vector.ValuesAsNumpy()
-
-    # Uint16VectorT
-    def Pack(self, builder):
-        if self.values is not None:
-            if np is not None and type(self.values) is np.ndarray:
-                values = builder.CreateNumpyVector(self.values)
-            else:
-                Uint16VectorStartValuesVector(builder, len(self.values))
-                for i in reversed(range(len(self.values))):
-                    builder.PrependUint16(self.values[i])
-                values = builder.EndVector()
-        Uint16VectorStart(builder)
-        if self.values is not None:
-            Uint16VectorAddValues(builder, values)
-        uint16vector = Uint16VectorEnd(builder)
-        return uint16vector
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class Uint8Vector(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = Uint8Vector()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsUint8Vector(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def Uint8VectorBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # Uint8Vector
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # Uint8Vector
-    def Values(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+    # SignatureDef
+    def Outputs(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
         if o != 0:
-            a = self._tab.Vector(o)
-            return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
-        return 0
+            x = self._tab.Vector(o)
+            x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
+            x = self._tab.Indirect(x)
+            obj = TensorMap()
+            obj.Init(self._tab.Bytes, x)
+            return obj
+        return None
 
-    # Uint8Vector
-    def ValuesAsNumpy(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
-        return 0
-
-    # Uint8Vector
-    def ValuesLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+    # SignatureDef
+    def OutputsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
         if o != 0:
             return self._tab.VectorLen(o)
         return 0
 
-    # Uint8Vector
-    def ValuesIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+    # SignatureDef
+    def OutputsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
         return o == 0
 
-def Uint8VectorStart(builder): builder.StartObject(1)
-def Start(builder):
-    return Uint8VectorStart(builder)
-def Uint8VectorAddValues(builder, values): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0)
-def AddValues(builder, values):
-    return Uint8VectorAddValues(builder, values)
-def Uint8VectorStartValuesVector(builder, numElems): return builder.StartVector(1, numElems, 1)
-def StartValuesVector(builder, numElems):
-    return Uint8VectorStartValuesVector(builder, numElems)
-def Uint8VectorEnd(builder): return builder.EndObject()
-def End(builder):
-    return Uint8VectorEnd(builder)
-try:
-    from typing import List
-except:
-    pass
-
-class Uint8VectorT(object):
-
-    # Uint8VectorT
-    def __init__(self):
-        self.values = None  # type: List[int]
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        uint8vector = Uint8Vector()
-        uint8vector.Init(buf, pos)
-        return cls.InitFromObj(uint8vector)
-
-    @classmethod
-    def InitFromObj(cls, uint8vector):
-        x = Uint8VectorT()
-        x._UnPack(uint8vector)
-        return x
-
-    # Uint8VectorT
-    def _UnPack(self, uint8vector):
-        if uint8vector is None:
-            return
-        if not uint8vector.ValuesIsNone():
-            if np is None:
-                self.values = []
-                for i in range(uint8vector.ValuesLength()):
-                    self.values.append(uint8vector.Values(i))
-            else:
-                self.values = uint8vector.ValuesAsNumpy()
-
-    # Uint8VectorT
-    def Pack(self, builder):
-        if self.values is not None:
-            if np is not None and type(self.values) is np.ndarray:
-                values = builder.CreateNumpyVector(self.values)
-            else:
-                Uint8VectorStartValuesVector(builder, len(self.values))
-                for i in reversed(range(len(self.values))):
-                    builder.PrependUint8(self.values[i])
-                values = builder.EndVector()
-        Uint8VectorStart(builder)
-        if self.values is not None:
-            Uint8VectorAddValues(builder, values)
-        uint8vector = Uint8VectorEnd(builder)
-        return uint8vector
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class UnidirectionalSequenceLSTMOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = UnidirectionalSequenceLSTMOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsUnidirectionalSequenceLSTMOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def UnidirectionalSequenceLSTMOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # UnidirectionalSequenceLSTMOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # UnidirectionalSequenceLSTMOptions
-    def FusedActivationFunction(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 0
-
-    # UnidirectionalSequenceLSTMOptions
-    def CellClip(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
-        return 0.0
-
-    # UnidirectionalSequenceLSTMOptions
-    def ProjClip(self):
+    # SignatureDef
+    def SignatureKey(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
         if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
-        return 0.0
+            return self._tab.String(o + self._tab.Pos)
+        return None
 
-    # UnidirectionalSequenceLSTMOptions
-    def TimeMajor(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-    # UnidirectionalSequenceLSTMOptions
-    def AsymmetricQuantizeInputs(self):
+    # SignatureDef
+    def SubgraphIndex(self):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
         if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
+            return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
+        return 0
 
-    # UnidirectionalSequenceLSTMOptions
-    def DiagonalRecurrentTensors(self):
+def SignatureDefStart(builder):
+    builder.StartObject(5)
+
+def SignatureDefAddInputs(builder, inputs):
+    builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0)
+
+def SignatureDefStartInputsVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def SignatureDefAddOutputs(builder, outputs):
+    builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0)
+
+def SignatureDefStartOutputsVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def SignatureDefAddSignatureKey(builder, signatureKey):
+    builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(signatureKey), 0)
+
+def SignatureDefAddSubgraphIndex(builder, subgraphIndex):
+    builder.PrependUint32Slot(4, subgraphIndex, 0)
+
+def SignatureDefEnd(builder):
+    return builder.EndObject()
+
+
+try:
+    from typing import List
+except:
+    pass
+
+class SignatureDefT(object):
+
+    # SignatureDefT
+    def __init__(self):
+        self.inputs = None  # type: List[TensorMapT]
+        self.outputs = None  # type: List[TensorMapT]
+        self.signatureKey = None  # type: str
+        self.subgraphIndex = 0  # type: int
+
+    @classmethod
+    def InitFromBuf(cls, buf, pos):
+        signatureDef = SignatureDef()
+        signatureDef.Init(buf, pos)
+        return cls.InitFromObj(signatureDef)
+
+    @classmethod
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, signatureDef):
+        x = SignatureDefT()
+        x._UnPack(signatureDef)
+        return x
+
+    # SignatureDefT
+    def _UnPack(self, signatureDef):
+        if signatureDef is None:
+            return
+        if not signatureDef.InputsIsNone():
+            self.inputs = []
+            for i in range(signatureDef.InputsLength()):
+                if signatureDef.Inputs(i) is None:
+                    self.inputs.append(None)
+                else:
+                    tensorMap_ = TensorMapT.InitFromObj(signatureDef.Inputs(i))
+                    self.inputs.append(tensorMap_)
+        if not signatureDef.OutputsIsNone():
+            self.outputs = []
+            for i in range(signatureDef.OutputsLength()):
+                if signatureDef.Outputs(i) is None:
+                    self.outputs.append(None)
+                else:
+                    tensorMap_ = TensorMapT.InitFromObj(signatureDef.Outputs(i))
+                    self.outputs.append(tensorMap_)
+        self.signatureKey = signatureDef.SignatureKey()
+        self.subgraphIndex = signatureDef.SubgraphIndex()
+
+    # SignatureDefT
+    def Pack(self, builder):
+        if self.inputs is not None:
+            inputslist = []
+            for i in range(len(self.inputs)):
+                inputslist.append(self.inputs[i].Pack(builder))
+            SignatureDefStartInputsVector(builder, len(self.inputs))
+            for i in reversed(range(len(self.inputs))):
+                builder.PrependUOffsetTRelative(inputslist[i])
+            inputs = builder.EndVector()
+        if self.outputs is not None:
+            outputslist = []
+            for i in range(len(self.outputs)):
+                outputslist.append(self.outputs[i].Pack(builder))
+            SignatureDefStartOutputsVector(builder, len(self.outputs))
+            for i in reversed(range(len(self.outputs))):
+                builder.PrependUOffsetTRelative(outputslist[i])
+            outputs = builder.EndVector()
+        if self.signatureKey is not None:
+            signatureKey = builder.CreateString(self.signatureKey)
+        SignatureDefStart(builder)
+        if self.inputs is not None:
+            SignatureDefAddInputs(builder, inputs)
+        if self.outputs is not None:
+            SignatureDefAddOutputs(builder, outputs)
+        if self.signatureKey is not None:
+            SignatureDefAddSignatureKey(builder, signatureKey)
+        SignatureDefAddSubgraphIndex(builder, self.subgraphIndex)
+        signatureDef = SignatureDefEnd(builder)
+        return signatureDef
+
+
+class Model(object):
+    __slots__ = ['_tab']
+
+    @classmethod
+    def GetRootAs(cls, buf, offset=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+        x = Model()
+        x.Init(buf, n + offset)
+        return x
+
+    @classmethod
+    def GetRootAsModel(cls, buf, offset=0):
+        """This method is deprecated. Please switch to GetRootAs."""
+        return cls.GetRootAs(buf, offset)
+    @classmethod
+    def ModelBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
+
+    # Model
+    def Init(self, buf, pos):
+        self._tab = flatbuffers.table.Table(buf, pos)
+
+    # Model
+    def Version(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+        if o != 0:
+            return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
+        return 0
+
+    # Model
+    def OperatorCodes(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            x = self._tab.Vector(o)
+            x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
+            x = self._tab.Indirect(x)
+            obj = OperatorCode()
+            obj.Init(self._tab.Bytes, x)
+            return obj
+        return None
+
+    # Model
+    def OperatorCodesLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # Model
+    def OperatorCodesIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+        return o == 0
+
+    # Model
+    def Subgraphs(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            x = self._tab.Vector(o)
+            x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
+            x = self._tab.Indirect(x)
+            obj = SubGraph()
+            obj.Init(self._tab.Bytes, x)
+            return obj
+        return None
+
+    # Model
+    def SubgraphsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # Model
+    def SubgraphsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+        return o == 0
+
+    # Model
+    def Description(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+        if o != 0:
+            return self._tab.String(o + self._tab.Pos)
+        return None
+
+    # Model
+    def Buffers(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            x = self._tab.Vector(o)
+            x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
+            x = self._tab.Indirect(x)
+            obj = Buffer()
+            obj.Init(self._tab.Bytes, x)
+            return obj
+        return None
+
+    # Model
+    def BuffersLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # Model
+    def BuffersIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+        return o == 0
+
+    # Model
+    def MetadataBuffer(self, j):
         o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
         if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
-
-def UnidirectionalSequenceLSTMOptionsStart(builder): builder.StartObject(6)
-def Start(builder):
-    return UnidirectionalSequenceLSTMOptionsStart(builder)
-def UnidirectionalSequenceLSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0)
-def AddFusedActivationFunction(builder, fusedActivationFunction):
-    return UnidirectionalSequenceLSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction)
-def UnidirectionalSequenceLSTMOptionsAddCellClip(builder, cellClip): builder.PrependFloat32Slot(1, cellClip, 0.0)
-def AddCellClip(builder, cellClip):
-    return UnidirectionalSequenceLSTMOptionsAddCellClip(builder, cellClip)
-def UnidirectionalSequenceLSTMOptionsAddProjClip(builder, projClip): builder.PrependFloat32Slot(2, projClip, 0.0)
-def AddProjClip(builder, projClip):
-    return UnidirectionalSequenceLSTMOptionsAddProjClip(builder, projClip)
-def UnidirectionalSequenceLSTMOptionsAddTimeMajor(builder, timeMajor): builder.PrependBoolSlot(3, timeMajor, 0)
-def AddTimeMajor(builder, timeMajor):
-    return UnidirectionalSequenceLSTMOptionsAddTimeMajor(builder, timeMajor)
-def UnidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(4, asymmetricQuantizeInputs, 0)
-def AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs):
-    return UnidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs)
-def UnidirectionalSequenceLSTMOptionsAddDiagonalRecurrentTensors(builder, diagonalRecurrentTensors): builder.PrependBoolSlot(5, diagonalRecurrentTensors, 0)
-def AddDiagonalRecurrentTensors(builder, diagonalRecurrentTensors):
-    return UnidirectionalSequenceLSTMOptionsAddDiagonalRecurrentTensors(builder, diagonalRecurrentTensors)
-def UnidirectionalSequenceLSTMOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return UnidirectionalSequenceLSTMOptionsEnd(builder)
-
-class UnidirectionalSequenceLSTMOptionsT(object):
-
-    # UnidirectionalSequenceLSTMOptionsT
-    def __init__(self):
-        self.fusedActivationFunction = 0  # type: int
-        self.cellClip = 0.0  # type: float
-        self.projClip = 0.0  # type: float
-        self.timeMajor = False  # type: bool
-        self.asymmetricQuantizeInputs = False  # type: bool
-        self.diagonalRecurrentTensors = False  # type: bool
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        unidirectionalSequenceLstmoptions = UnidirectionalSequenceLSTMOptions()
-        unidirectionalSequenceLstmoptions.Init(buf, pos)
-        return cls.InitFromObj(unidirectionalSequenceLstmoptions)
-
-    @classmethod
-    def InitFromObj(cls, unidirectionalSequenceLstmoptions):
-        x = UnidirectionalSequenceLSTMOptionsT()
-        x._UnPack(unidirectionalSequenceLstmoptions)
-        return x
-
-    # UnidirectionalSequenceLSTMOptionsT
-    def _UnPack(self, unidirectionalSequenceLstmoptions):
-        if unidirectionalSequenceLstmoptions is None:
-            return
-        self.fusedActivationFunction = unidirectionalSequenceLstmoptions.FusedActivationFunction()
-        self.cellClip = unidirectionalSequenceLstmoptions.CellClip()
-        self.projClip = unidirectionalSequenceLstmoptions.ProjClip()
-        self.timeMajor = unidirectionalSequenceLstmoptions.TimeMajor()
-        self.asymmetricQuantizeInputs = unidirectionalSequenceLstmoptions.AsymmetricQuantizeInputs()
-        self.diagonalRecurrentTensors = unidirectionalSequenceLstmoptions.DiagonalRecurrentTensors()
-
-    # UnidirectionalSequenceLSTMOptionsT
-    def Pack(self, builder):
-        UnidirectionalSequenceLSTMOptionsStart(builder)
-        UnidirectionalSequenceLSTMOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
-        UnidirectionalSequenceLSTMOptionsAddCellClip(builder, self.cellClip)
-        UnidirectionalSequenceLSTMOptionsAddProjClip(builder, self.projClip)
-        UnidirectionalSequenceLSTMOptionsAddTimeMajor(builder, self.timeMajor)
-        UnidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs)
-        UnidirectionalSequenceLSTMOptionsAddDiagonalRecurrentTensors(builder, self.diagonalRecurrentTensors)
-        unidirectionalSequenceLstmoptions = UnidirectionalSequenceLSTMOptionsEnd(builder)
-        return unidirectionalSequenceLstmoptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class UniqueOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = UniqueOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsUniqueOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def UniqueOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # UniqueOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # UniqueOptions
-    def IdxOutType(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
-        return 2
-
-def UniqueOptionsStart(builder): builder.StartObject(1)
-def Start(builder):
-    return UniqueOptionsStart(builder)
-def UniqueOptionsAddIdxOutType(builder, idxOutType): builder.PrependInt8Slot(0, idxOutType, 2)
-def AddIdxOutType(builder, idxOutType):
-    return UniqueOptionsAddIdxOutType(builder, idxOutType)
-def UniqueOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return UniqueOptionsEnd(builder)
-
-class UniqueOptionsT(object):
-
-    # UniqueOptionsT
-    def __init__(self):
-        self.idxOutType = 2  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        uniqueOptions = UniqueOptions()
-        uniqueOptions.Init(buf, pos)
-        return cls.InitFromObj(uniqueOptions)
-
-    @classmethod
-    def InitFromObj(cls, uniqueOptions):
-        x = UniqueOptionsT()
-        x._UnPack(uniqueOptions)
-        return x
-
-    # UniqueOptionsT
-    def _UnPack(self, uniqueOptions):
-        if uniqueOptions is None:
-            return
-        self.idxOutType = uniqueOptions.IdxOutType()
-
-    # UniqueOptionsT
-    def Pack(self, builder):
-        UniqueOptionsStart(builder)
-        UniqueOptionsAddIdxOutType(builder, self.idxOutType)
-        uniqueOptions = UniqueOptionsEnd(builder)
-        return uniqueOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class UnpackOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = UnpackOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsUnpackOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def UnpackOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # UnpackOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # UnpackOptions
-    def Num(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # UnpackOptions
-    def Axis(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-def UnpackOptionsStart(builder): builder.StartObject(2)
-def Start(builder):
-    return UnpackOptionsStart(builder)
-def UnpackOptionsAddNum(builder, num): builder.PrependInt32Slot(0, num, 0)
-def AddNum(builder, num):
-    return UnpackOptionsAddNum(builder, num)
-def UnpackOptionsAddAxis(builder, axis): builder.PrependInt32Slot(1, axis, 0)
-def AddAxis(builder, axis):
-    return UnpackOptionsAddAxis(builder, axis)
-def UnpackOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return UnpackOptionsEnd(builder)
-
-class UnpackOptionsT(object):
-
-    # UnpackOptionsT
-    def __init__(self):
-        self.num = 0  # type: int
-        self.axis = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        unpackOptions = UnpackOptions()
-        unpackOptions.Init(buf, pos)
-        return cls.InitFromObj(unpackOptions)
-
-    @classmethod
-    def InitFromObj(cls, unpackOptions):
-        x = UnpackOptionsT()
-        x._UnPack(unpackOptions)
-        return x
-
-    # UnpackOptionsT
-    def _UnPack(self, unpackOptions):
-        if unpackOptions is None:
-            return
-        self.num = unpackOptions.Num()
-        self.axis = unpackOptions.Axis()
-
-    # UnpackOptionsT
-    def Pack(self, builder):
-        UnpackOptionsStart(builder)
-        UnpackOptionsAddNum(builder, self.num)
-        UnpackOptionsAddAxis(builder, self.axis)
-        unpackOptions = UnpackOptionsEnd(builder)
-        return unpackOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class UnsortedSegmentMaxOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = UnsortedSegmentMaxOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsUnsortedSegmentMaxOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def UnsortedSegmentMaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # UnsortedSegmentMaxOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def UnsortedSegmentMaxOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return UnsortedSegmentMaxOptionsStart(builder)
-def UnsortedSegmentMaxOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return UnsortedSegmentMaxOptionsEnd(builder)
-
-class UnsortedSegmentMaxOptionsT(object):
-
-    # UnsortedSegmentMaxOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        unsortedSegmentMaxOptions = UnsortedSegmentMaxOptions()
-        unsortedSegmentMaxOptions.Init(buf, pos)
-        return cls.InitFromObj(unsortedSegmentMaxOptions)
-
-    @classmethod
-    def InitFromObj(cls, unsortedSegmentMaxOptions):
-        x = UnsortedSegmentMaxOptionsT()
-        x._UnPack(unsortedSegmentMaxOptions)
-        return x
-
-    # UnsortedSegmentMaxOptionsT
-    def _UnPack(self, unsortedSegmentMaxOptions):
-        if unsortedSegmentMaxOptions is None:
-            return
-
-    # UnsortedSegmentMaxOptionsT
-    def Pack(self, builder):
-        UnsortedSegmentMaxOptionsStart(builder)
-        unsortedSegmentMaxOptions = UnsortedSegmentMaxOptionsEnd(builder)
-        return unsortedSegmentMaxOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class UnsortedSegmentMinOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = UnsortedSegmentMinOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsUnsortedSegmentMinOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def UnsortedSegmentMinOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # UnsortedSegmentMinOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def UnsortedSegmentMinOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return UnsortedSegmentMinOptionsStart(builder)
-def UnsortedSegmentMinOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return UnsortedSegmentMinOptionsEnd(builder)
-
-class UnsortedSegmentMinOptionsT(object):
-
-    # UnsortedSegmentMinOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        unsortedSegmentMinOptions = UnsortedSegmentMinOptions()
-        unsortedSegmentMinOptions.Init(buf, pos)
-        return cls.InitFromObj(unsortedSegmentMinOptions)
-
-    @classmethod
-    def InitFromObj(cls, unsortedSegmentMinOptions):
-        x = UnsortedSegmentMinOptionsT()
-        x._UnPack(unsortedSegmentMinOptions)
-        return x
-
-    # UnsortedSegmentMinOptionsT
-    def _UnPack(self, unsortedSegmentMinOptions):
-        if unsortedSegmentMinOptions is None:
-            return
-
-    # UnsortedSegmentMinOptionsT
-    def Pack(self, builder):
-        UnsortedSegmentMinOptionsStart(builder)
-        unsortedSegmentMinOptions = UnsortedSegmentMinOptionsEnd(builder)
-        return unsortedSegmentMinOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class UnsortedSegmentProdOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = UnsortedSegmentProdOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsUnsortedSegmentProdOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def UnsortedSegmentProdOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # UnsortedSegmentProdOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def UnsortedSegmentProdOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return UnsortedSegmentProdOptionsStart(builder)
-def UnsortedSegmentProdOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return UnsortedSegmentProdOptionsEnd(builder)
-
-class UnsortedSegmentProdOptionsT(object):
-
-    # UnsortedSegmentProdOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        unsortedSegmentProdOptions = UnsortedSegmentProdOptions()
-        unsortedSegmentProdOptions.Init(buf, pos)
-        return cls.InitFromObj(unsortedSegmentProdOptions)
-
-    @classmethod
-    def InitFromObj(cls, unsortedSegmentProdOptions):
-        x = UnsortedSegmentProdOptionsT()
-        x._UnPack(unsortedSegmentProdOptions)
-        return x
-
-    # UnsortedSegmentProdOptionsT
-    def _UnPack(self, unsortedSegmentProdOptions):
-        if unsortedSegmentProdOptions is None:
-            return
-
-    # UnsortedSegmentProdOptionsT
-    def Pack(self, builder):
-        UnsortedSegmentProdOptionsStart(builder)
-        unsortedSegmentProdOptions = UnsortedSegmentProdOptionsEnd(builder)
-        return unsortedSegmentProdOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class UnsortedSegmentSumOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = UnsortedSegmentSumOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsUnsortedSegmentSumOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def UnsortedSegmentSumOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # UnsortedSegmentSumOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def UnsortedSegmentSumOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return UnsortedSegmentSumOptionsStart(builder)
-def UnsortedSegmentSumOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return UnsortedSegmentSumOptionsEnd(builder)
-
-class UnsortedSegmentSumOptionsT(object):
-
-    # UnsortedSegmentSumOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        unsortedSegmentSumOptions = UnsortedSegmentSumOptions()
-        unsortedSegmentSumOptions.Init(buf, pos)
-        return cls.InitFromObj(unsortedSegmentSumOptions)
-
-    @classmethod
-    def InitFromObj(cls, unsortedSegmentSumOptions):
-        x = UnsortedSegmentSumOptionsT()
-        x._UnPack(unsortedSegmentSumOptions)
-        return x
-
-    # UnsortedSegmentSumOptionsT
-    def _UnPack(self, unsortedSegmentSumOptions):
-        if unsortedSegmentSumOptions is None:
-            return
-
-    # UnsortedSegmentSumOptionsT
-    def Pack(self, builder):
-        UnsortedSegmentSumOptionsStart(builder)
-        unsortedSegmentSumOptions = UnsortedSegmentSumOptionsEnd(builder)
-        return unsortedSegmentSumOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class VarHandleOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = VarHandleOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsVarHandleOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def VarHandleOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # VarHandleOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # VarHandleOptions
-    def Container(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.String(o + self._tab.Pos)
-        return None
-
-    # VarHandleOptions
-    def SharedName(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.String(o + self._tab.Pos)
-        return None
-
-def VarHandleOptionsStart(builder): builder.StartObject(2)
-def Start(builder):
-    return VarHandleOptionsStart(builder)
-def VarHandleOptionsAddContainer(builder, container): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(container), 0)
-def AddContainer(builder, container):
-    return VarHandleOptionsAddContainer(builder, container)
-def VarHandleOptionsAddSharedName(builder, sharedName): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(sharedName), 0)
-def AddSharedName(builder, sharedName):
-    return VarHandleOptionsAddSharedName(builder, sharedName)
-def VarHandleOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return VarHandleOptionsEnd(builder)
-
-class VarHandleOptionsT(object):
-
-    # VarHandleOptionsT
-    def __init__(self):
-        self.container = None  # type: str
-        self.sharedName = None  # type: str
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        varHandleOptions = VarHandleOptions()
-        varHandleOptions.Init(buf, pos)
-        return cls.InitFromObj(varHandleOptions)
-
-    @classmethod
-    def InitFromObj(cls, varHandleOptions):
-        x = VarHandleOptionsT()
-        x._UnPack(varHandleOptions)
-        return x
-
-    # VarHandleOptionsT
-    def _UnPack(self, varHandleOptions):
-        if varHandleOptions is None:
-            return
-        self.container = varHandleOptions.Container()
-        self.sharedName = varHandleOptions.SharedName()
-
-    # VarHandleOptionsT
-    def Pack(self, builder):
-        if self.container is not None:
-            container = builder.CreateString(self.container)
-        if self.sharedName is not None:
-            sharedName = builder.CreateString(self.sharedName)
-        VarHandleOptionsStart(builder)
-        if self.container is not None:
-            VarHandleOptionsAddContainer(builder, container)
-        if self.sharedName is not None:
-            VarHandleOptionsAddSharedName(builder, sharedName)
-        varHandleOptions = VarHandleOptionsEnd(builder)
-        return varHandleOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class VariantSubType(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = VariantSubType()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsVariantSubType(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def VariantSubTypeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # VariantSubType
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # VariantSubType
-    def Shape(self, j):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
             a = self._tab.Vector(o)
             return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
         return 0
 
-    # VariantSubType
-    def ShapeAsNumpy(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+    # Model
+    def MetadataBufferAsNumpy(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
         if o != 0:
             return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
         return 0
 
-    # VariantSubType
-    def ShapeLength(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+    # Model
+    def MetadataBufferLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
         if o != 0:
             return self._tab.VectorLen(o)
         return 0
 
-    # VariantSubType
-    def ShapeIsNone(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+    # Model
+    def MetadataBufferIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
         return o == 0
 
-    # VariantSubType
-    def Type(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+    # Model
+    def Metadata(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
         if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+            x = self._tab.Vector(o)
+            x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
+            x = self._tab.Indirect(x)
+            obj = Metadata()
+            obj.Init(self._tab.Bytes, x)
+            return obj
+        return None
+
+    # Model
+    def MetadataLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
+        if o != 0:
+            return self._tab.VectorLen(o)
         return 0
 
-    # VariantSubType
-    def HasRank(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
-        if o != 0:
-            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
-        return False
+    # Model
+    def MetadataIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
+        return o == 0
 
-def VariantSubTypeStart(builder): builder.StartObject(3)
-def Start(builder):
-    return VariantSubTypeStart(builder)
-def VariantSubTypeAddShape(builder, shape): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0)
-def AddShape(builder, shape):
-    return VariantSubTypeAddShape(builder, shape)
-def VariantSubTypeStartShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def StartShapeVector(builder, numElems):
-    return VariantSubTypeStartShapeVector(builder, numElems)
-def VariantSubTypeAddType(builder, type): builder.PrependInt8Slot(1, type, 0)
-def AddType(builder, type):
-    return VariantSubTypeAddType(builder, type)
-def VariantSubTypeAddHasRank(builder, hasRank): builder.PrependBoolSlot(2, hasRank, 0)
-def AddHasRank(builder, hasRank):
-    return VariantSubTypeAddHasRank(builder, hasRank)
-def VariantSubTypeEnd(builder): return builder.EndObject()
-def End(builder):
-    return VariantSubTypeEnd(builder)
+    # Model
+    def SignatureDefs(self, j):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
+        if o != 0:
+            x = self._tab.Vector(o)
+            x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
+            x = self._tab.Indirect(x)
+            obj = SignatureDef()
+            obj.Init(self._tab.Bytes, x)
+            return obj
+        return None
+
+    # Model
+    def SignatureDefsLength(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
+        if o != 0:
+            return self._tab.VectorLen(o)
+        return 0
+
+    # Model
+    def SignatureDefsIsNone(self):
+        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
+        return o == 0
+
+def ModelStart(builder):
+    builder.StartObject(8)
+
+def ModelAddVersion(builder, version):
+    builder.PrependUint32Slot(0, version, 0)
+
+def ModelAddOperatorCodes(builder, operatorCodes):
+    builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(operatorCodes), 0)
+
+def ModelStartOperatorCodesVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def ModelAddSubgraphs(builder, subgraphs):
+    builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(subgraphs), 0)
+
+def ModelStartSubgraphsVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def ModelAddDescription(builder, description):
+    builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(description), 0)
+
+def ModelAddBuffers(builder, buffers):
+    builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(buffers), 0)
+
+def ModelStartBuffersVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def ModelAddMetadataBuffer(builder, metadataBuffer):
+    builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(metadataBuffer), 0)
+
+def ModelStartMetadataBufferVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def ModelAddMetadata(builder, metadata):
+    builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(metadata), 0)
+
+def ModelStartMetadataVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def ModelAddSignatureDefs(builder, signatureDefs):
+    builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(signatureDefs), 0)
+
+def ModelStartSignatureDefsVector(builder, numElems):
+    return builder.StartVector(4, numElems, 4)
+
+def ModelEnd(builder):
+    return builder.EndObject()
+
+
 try:
     from typing import List
 except:
     pass
 
-class VariantSubTypeT(object):
+class ModelT(object):
 
-    # VariantSubTypeT
+    # ModelT
     def __init__(self):
-        self.shape = None  # type: List[int]
-        self.type = 0  # type: int
-        self.hasRank = False  # type: bool
+        self.version = 0  # type: int
+        self.operatorCodes = None  # type: List[OperatorCodeT]
+        self.subgraphs = None  # type: List[SubGraphT]
+        self.description = None  # type: str
+        self.buffers = None  # type: List[BufferT]
+        self.metadataBuffer = None  # type: List[int]
+        self.metadata = None  # type: List[MetadataT]
+        self.signatureDefs = None  # type: List[SignatureDefT]
 
     @classmethod
     def InitFromBuf(cls, buf, pos):
-        variantSubType = VariantSubType()
-        variantSubType.Init(buf, pos)
-        return cls.InitFromObj(variantSubType)
+        model = Model()
+        model.Init(buf, pos)
+        return cls.InitFromObj(model)
 
     @classmethod
-    def InitFromObj(cls, variantSubType):
-        x = VariantSubTypeT()
-        x._UnPack(variantSubType)
+    def InitFromPackedBuf(cls, buf, pos=0):
+        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+        return cls.InitFromBuf(buf, pos+n)
+
+    @classmethod
+    def InitFromObj(cls, model):
+        x = ModelT()
+        x._UnPack(model)
         return x
 
-    # VariantSubTypeT
-    def _UnPack(self, variantSubType):
-        if variantSubType is None:
+    # ModelT
+    def _UnPack(self, model):
+        if model is None:
             return
-        if not variantSubType.ShapeIsNone():
+        self.version = model.Version()
+        if not model.OperatorCodesIsNone():
+            self.operatorCodes = []
+            for i in range(model.OperatorCodesLength()):
+                if model.OperatorCodes(i) is None:
+                    self.operatorCodes.append(None)
+                else:
+                    operatorCode_ = OperatorCodeT.InitFromObj(model.OperatorCodes(i))
+                    self.operatorCodes.append(operatorCode_)
+        if not model.SubgraphsIsNone():
+            self.subgraphs = []
+            for i in range(model.SubgraphsLength()):
+                if model.Subgraphs(i) is None:
+                    self.subgraphs.append(None)
+                else:
+                    subGraph_ = SubGraphT.InitFromObj(model.Subgraphs(i))
+                    self.subgraphs.append(subGraph_)
+        self.description = model.Description()
+        if not model.BuffersIsNone():
+            self.buffers = []
+            for i in range(model.BuffersLength()):
+                if model.Buffers(i) is None:
+                    self.buffers.append(None)
+                else:
+                    buffer_ = BufferT.InitFromObj(model.Buffers(i))
+                    self.buffers.append(buffer_)
+        if not model.MetadataBufferIsNone():
             if np is None:
-                self.shape = []
-                for i in range(variantSubType.ShapeLength()):
-                    self.shape.append(variantSubType.Shape(i))
+                self.metadataBuffer = []
+                for i in range(model.MetadataBufferLength()):
+                    self.metadataBuffer.append(model.MetadataBuffer(i))
             else:
-                self.shape = variantSubType.ShapeAsNumpy()
-        self.type = variantSubType.Type()
-        self.hasRank = variantSubType.HasRank()
+                self.metadataBuffer = model.MetadataBufferAsNumpy()
+        if not model.MetadataIsNone():
+            self.metadata = []
+            for i in range(model.MetadataLength()):
+                if model.Metadata(i) is None:
+                    self.metadata.append(None)
+                else:
+                    metadata_ = MetadataT.InitFromObj(model.Metadata(i))
+                    self.metadata.append(metadata_)
+        if not model.SignatureDefsIsNone():
+            self.signatureDefs = []
+            for i in range(model.SignatureDefsLength()):
+                if model.SignatureDefs(i) is None:
+                    self.signatureDefs.append(None)
+                else:
+                    signatureDef_ = SignatureDefT.InitFromObj(model.SignatureDefs(i))
+                    self.signatureDefs.append(signatureDef_)
 
-    # VariantSubTypeT
+    # ModelT
     def Pack(self, builder):
-        if self.shape is not None:
-            if np is not None and type(self.shape) is np.ndarray:
-                shape = builder.CreateNumpyVector(self.shape)
+        if self.operatorCodes is not None:
+            operatorCodeslist = []
+            for i in range(len(self.operatorCodes)):
+                operatorCodeslist.append(self.operatorCodes[i].Pack(builder))
+            ModelStartOperatorCodesVector(builder, len(self.operatorCodes))
+            for i in reversed(range(len(self.operatorCodes))):
+                builder.PrependUOffsetTRelative(operatorCodeslist[i])
+            operatorCodes = builder.EndVector()
+        if self.subgraphs is not None:
+            subgraphslist = []
+            for i in range(len(self.subgraphs)):
+                subgraphslist.append(self.subgraphs[i].Pack(builder))
+            ModelStartSubgraphsVector(builder, len(self.subgraphs))
+            for i in reversed(range(len(self.subgraphs))):
+                builder.PrependUOffsetTRelative(subgraphslist[i])
+            subgraphs = builder.EndVector()
+        if self.description is not None:
+            description = builder.CreateString(self.description)
+        if self.buffers is not None:
+            bufferslist = []
+            for i in range(len(self.buffers)):
+                bufferslist.append(self.buffers[i].Pack(builder))
+            ModelStartBuffersVector(builder, len(self.buffers))
+            for i in reversed(range(len(self.buffers))):
+                builder.PrependUOffsetTRelative(bufferslist[i])
+            buffers = builder.EndVector()
+        if self.metadataBuffer is not None:
+            if np is not None and type(self.metadataBuffer) is np.ndarray:
+                metadataBuffer = builder.CreateNumpyVector(self.metadataBuffer)
             else:
-                VariantSubTypeStartShapeVector(builder, len(self.shape))
-                for i in reversed(range(len(self.shape))):
-                    builder.PrependInt32(self.shape[i])
-                shape = builder.EndVector()
-        VariantSubTypeStart(builder)
-        if self.shape is not None:
-            VariantSubTypeAddShape(builder, shape)
-        VariantSubTypeAddType(builder, self.type)
-        VariantSubTypeAddHasRank(builder, self.hasRank)
-        variantSubType = VariantSubTypeEnd(builder)
-        return variantSubType
-# automatically generated by the FlatBuffers compiler, do not modify
+                ModelStartMetadataBufferVector(builder, len(self.metadataBuffer))
+                for i in reversed(range(len(self.metadataBuffer))):
+                    builder.PrependInt32(self.metadataBuffer[i])
+                metadataBuffer = builder.EndVector()
+        if self.metadata is not None:
+            metadatalist = []
+            for i in range(len(self.metadata)):
+                metadatalist.append(self.metadata[i].Pack(builder))
+            ModelStartMetadataVector(builder, len(self.metadata))
+            for i in reversed(range(len(self.metadata))):
+                builder.PrependUOffsetTRelative(metadatalist[i])
+            metadata = builder.EndVector()
+        if self.signatureDefs is not None:
+            signatureDefslist = []
+            for i in range(len(self.signatureDefs)):
+                signatureDefslist.append(self.signatureDefs[i].Pack(builder))
+            ModelStartSignatureDefsVector(builder, len(self.signatureDefs))
+            for i in reversed(range(len(self.signatureDefs))):
+                builder.PrependUOffsetTRelative(signatureDefslist[i])
+            signatureDefs = builder.EndVector()
+        ModelStart(builder)
+        ModelAddVersion(builder, self.version)
+        if self.operatorCodes is not None:
+            ModelAddOperatorCodes(builder, operatorCodes)
+        if self.subgraphs is not None:
+            ModelAddSubgraphs(builder, subgraphs)
+        if self.description is not None:
+            ModelAddDescription(builder, description)
+        if self.buffers is not None:
+            ModelAddBuffers(builder, buffers)
+        if self.metadataBuffer is not None:
+            ModelAddMetadataBuffer(builder, metadataBuffer)
+        if self.metadata is not None:
+            ModelAddMetadata(builder, metadata)
+        if self.signatureDefs is not None:
+            ModelAddSignatureDefs(builder, signatureDefs)
+        model = ModelEnd(builder)
+        return model
 
-# namespace: tflite
 
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class WhereOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = WhereOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsWhereOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def WhereOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # WhereOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def WhereOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return WhereOptionsStart(builder)
-def WhereOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return WhereOptionsEnd(builder)
-
-class WhereOptionsT(object):
-
-    # WhereOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        whereOptions = WhereOptions()
-        whereOptions.Init(buf, pos)
-        return cls.InitFromObj(whereOptions)
-
-    @classmethod
-    def InitFromObj(cls, whereOptions):
-        x = WhereOptionsT()
-        x._UnPack(whereOptions)
-        return x
-
-    # WhereOptionsT
-    def _UnPack(self, whereOptions):
-        if whereOptions is None:
-            return
-
-    # WhereOptionsT
-    def Pack(self, builder):
-        WhereOptionsStart(builder)
-        whereOptions = WhereOptionsEnd(builder)
-        return whereOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class WhileOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = WhileOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsWhileOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def WhileOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # WhileOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-    # WhileOptions
-    def CondSubgraphIndex(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-    # WhileOptions
-    def BodySubgraphIndex(self):
-        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
-        if o != 0:
-            return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
-        return 0
-
-def WhileOptionsStart(builder): builder.StartObject(2)
-def Start(builder):
-    return WhileOptionsStart(builder)
-def WhileOptionsAddCondSubgraphIndex(builder, condSubgraphIndex): builder.PrependInt32Slot(0, condSubgraphIndex, 0)
-def AddCondSubgraphIndex(builder, condSubgraphIndex):
-    return WhileOptionsAddCondSubgraphIndex(builder, condSubgraphIndex)
-def WhileOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex): builder.PrependInt32Slot(1, bodySubgraphIndex, 0)
-def AddBodySubgraphIndex(builder, bodySubgraphIndex):
-    return WhileOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex)
-def WhileOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return WhileOptionsEnd(builder)
-
-class WhileOptionsT(object):
-
-    # WhileOptionsT
-    def __init__(self):
-        self.condSubgraphIndex = 0  # type: int
-        self.bodySubgraphIndex = 0  # type: int
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        whileOptions = WhileOptions()
-        whileOptions.Init(buf, pos)
-        return cls.InitFromObj(whileOptions)
-
-    @classmethod
-    def InitFromObj(cls, whileOptions):
-        x = WhileOptionsT()
-        x._UnPack(whileOptions)
-        return x
-
-    # WhileOptionsT
-    def _UnPack(self, whileOptions):
-        if whileOptions is None:
-            return
-        self.condSubgraphIndex = whileOptions.CondSubgraphIndex()
-        self.bodySubgraphIndex = whileOptions.BodySubgraphIndex()
-
-    # WhileOptionsT
-    def Pack(self, builder):
-        WhileOptionsStart(builder)
-        WhileOptionsAddCondSubgraphIndex(builder, self.condSubgraphIndex)
-        WhileOptionsAddBodySubgraphIndex(builder, self.bodySubgraphIndex)
-        whileOptions = WhileOptionsEnd(builder)
-        return whileOptions
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tflite
-
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class ZerosLikeOptions(object):
-    __slots__ = ['_tab']
-
-    @classmethod
-    def GetRootAs(cls, buf, offset=0):
-        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
-        x = ZerosLikeOptions()
-        x.Init(buf, n + offset)
-        return x
-
-    @classmethod
-    def GetRootAsZerosLikeOptions(cls, buf, offset=0):
-        """This method is deprecated. Please switch to GetRootAs."""
-        return cls.GetRootAs(buf, offset)
-    @classmethod
-    def ZerosLikeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
-        return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
-
-    # ZerosLikeOptions
-    def Init(self, buf, pos):
-        self._tab = flatbuffers.table.Table(buf, pos)
-
-def ZerosLikeOptionsStart(builder): builder.StartObject(0)
-def Start(builder):
-    return ZerosLikeOptionsStart(builder)
-def ZerosLikeOptionsEnd(builder): return builder.EndObject()
-def End(builder):
-    return ZerosLikeOptionsEnd(builder)
-
-class ZerosLikeOptionsT(object):
-
-    # ZerosLikeOptionsT
-    def __init__(self):
-        pass
-
-    @classmethod
-    def InitFromBuf(cls, buf, pos):
-        zerosLikeOptions = ZerosLikeOptions()
-        zerosLikeOptions.Init(buf, pos)
-        return cls.InitFromObj(zerosLikeOptions)
-
-    @classmethod
-    def InitFromObj(cls, zerosLikeOptions):
-        x = ZerosLikeOptionsT()
-        x._UnPack(zerosLikeOptions)
-        return x
-
-    # ZerosLikeOptionsT
-    def _UnPack(self, zerosLikeOptions):
-        if zerosLikeOptions is None:
-            return
-
-    # ZerosLikeOptionsT
-    def Pack(self, builder):
-        ZerosLikeOptionsStart(builder)
-        zerosLikeOptions = ZerosLikeOptionsEnd(builder)
-        return zerosLikeOptions
diff --git a/tensorflow/lite/schema/BUILD b/tensorflow/lite/schema/BUILD
index e87375a..199c105 100644
--- a/tensorflow/lite/schema/BUILD
+++ b/tensorflow/lite/schema/BUILD
@@ -11,13 +11,13 @@
 # bazel build schema_fbs_srcs.
 flatbuffer_cc_library(
     name = "schema_fbs",
-    srcs = ["schema.fbs"],
+    srcs = ["//tensorflow/compiler/mlir/lite/schema:schema.fbs"],
 )
 
 # Generic schema for inference on device (but with reflections makes bigger).
 flatbuffer_cc_library(
     name = "schema_fbs_with_reflection",
-    srcs = ["schema.fbs"],
+    srcs = ["//tensorflow/compiler/mlir/lite/schema:schema.fbs"],
     flatc_args = [
         "--reflect-types",
         "--reflect-names",
@@ -29,11 +29,8 @@
 
 cc_library(
     name = "schema_utils",
-    srcs = ["schema_utils.cc"],
     hdrs = ["schema_utils.h"],
     deps = [
-        ":schema_fbs",
-        "//tensorflow/lite/kernels/internal:compatibility",
-        "@flatbuffers//:runtime_cc",
+        "//tensorflow/compiler/mlir/lite/schema:schema_utils",
     ],
 )
diff --git a/tensorflow/lite/schema/schema_generated.h b/tensorflow/lite/schema/schema_generated.h
index c9d92f8..d728617 100755
--- a/tensorflow/lite/schema/schema_generated.h
+++ b/tensorflow/lite/schema/schema_generated.h
@@ -8,9 +8,9 @@
 
 // Ensure the included flatbuffers.h is the same version as when this file was
 // generated, otherwise it may not be compatible.
-static_assert(FLATBUFFERS_VERSION_MAJOR == 2 &&
-              FLATBUFFERS_VERSION_MINOR == 0 &&
-              FLATBUFFERS_VERSION_REVISION == 6,
+static_assert(FLATBUFFERS_VERSION_MAJOR == 23 &&
+              FLATBUFFERS_VERSION_MINOR == 5 &&
+              FLATBUFFERS_VERSION_REVISION == 26,
              "Non-compatible flatbuffers version included");
 
 namespace tflite {
@@ -51,6 +51,78 @@
 struct TensorBuilder;
 struct TensorT;
 
+struct StablehloGatherOptions;
+struct StablehloGatherOptionsBuilder;
+struct StablehloGatherOptionsT;
+
+struct StablehloTransposeOptions;
+struct StablehloTransposeOptionsBuilder;
+struct StablehloTransposeOptionsT;
+
+struct StablehloDotGeneralOptions;
+struct StablehloDotGeneralOptionsBuilder;
+struct StablehloDotGeneralOptionsT;
+
+struct StablehloReduceWindowOptions;
+struct StablehloReduceWindowOptionsBuilder;
+struct StablehloReduceWindowOptionsT;
+
+struct StablehloWhileOptions;
+struct StablehloWhileOptionsBuilder;
+struct StablehloWhileOptionsT;
+
+struct StablehloSortOptions;
+struct StablehloSortOptionsBuilder;
+struct StablehloSortOptionsT;
+
+struct StablehloConcatenateOptions;
+struct StablehloConcatenateOptionsBuilder;
+struct StablehloConcatenateOptionsT;
+
+struct StablehloBroadcastInDimOptions;
+struct StablehloBroadcastInDimOptionsBuilder;
+struct StablehloBroadcastInDimOptionsT;
+
+struct StablehloCompareOptions;
+struct StablehloCompareOptionsBuilder;
+struct StablehloCompareOptionsT;
+
+struct StablehloDynamicSliceOptions;
+struct StablehloDynamicSliceOptionsBuilder;
+struct StablehloDynamicSliceOptionsT;
+
+struct StablehloPadOptions;
+struct StablehloPadOptionsBuilder;
+struct StablehloPadOptionsT;
+
+struct StablehloIotaOptions;
+struct StablehloIotaOptionsBuilder;
+struct StablehloIotaOptionsT;
+
+struct StablehloCustomCallOptions;
+struct StablehloCustomCallOptionsBuilder;
+struct StablehloCustomCallOptionsT;
+
+struct StablehloReduceOptions;
+struct StablehloReduceOptionsBuilder;
+struct StablehloReduceOptionsT;
+
+struct StablehloSliceOptions;
+struct StablehloSliceOptionsBuilder;
+struct StablehloSliceOptionsT;
+
+struct StablehloConvolutionOptions;
+struct StablehloConvolutionOptionsBuilder;
+struct StablehloConvolutionOptionsT;
+
+struct StablehloScatterOptions;
+struct StablehloScatterOptionsBuilder;
+struct StablehloScatterOptionsT;
+
+struct StablehloRngBitGeneratorOptions;
+struct StablehloRngBitGeneratorOptionsBuilder;
+struct StablehloRngBitGeneratorOptionsT;
+
 struct Conv2DOptions;
 struct Conv2DOptionsBuilder;
 struct Conv2DOptionsT;
@@ -555,10 +627,22 @@
 struct RightShiftOptionsBuilder;
 struct RightShiftOptionsT;
 
+struct DilateOptions;
+struct DilateOptionsBuilder;
+struct DilateOptionsT;
+
+struct ReduceWindowOptions;
+struct ReduceWindowOptionsBuilder;
+struct ReduceWindowOptionsT;
+
 struct OperatorCode;
 struct OperatorCodeBuilder;
 struct OperatorCodeT;
 
+struct StableHLOCompositeOptions;
+struct StableHLOCompositeOptionsBuilder;
+struct StableHLOCompositeOptionsT;
+
 struct Operator;
 struct OperatorBuilder;
 struct OperatorT;
@@ -606,11 +690,12 @@
   TensorType_UINT32 = 15,
   TensorType_UINT16 = 16,
   TensorType_INT4 = 17,
+  TensorType_BFLOAT16 = 18,
   TensorType_MIN = TensorType_FLOAT32,
-  TensorType_MAX = TensorType_INT4
+  TensorType_MAX = TensorType_BFLOAT16
 };
 
-inline const TensorType (&EnumValuesTensorType())[18] {
+inline const TensorType (&EnumValuesTensorType())[19] {
   static const TensorType values[] = {
     TensorType_FLOAT32,
     TensorType_FLOAT16,
@@ -629,13 +714,14 @@
     TensorType_VARIANT,
     TensorType_UINT32,
     TensorType_UINT16,
-    TensorType_INT4
+    TensorType_INT4,
+    TensorType_BFLOAT16
   };
   return values;
 }
 
 inline const char * const *EnumNamesTensorType() {
-  static const char * const names[19] = {
+  static const char * const names[20] = {
     "FLOAT32",
     "FLOAT16",
     "INT32",
@@ -654,13 +740,14 @@
     "UINT32",
     "UINT16",
     "INT4",
+    "BFLOAT16",
     nullptr
   };
   return names;
 }
 
 inline const char *EnumNameTensorType(TensorType e) {
-  if (flatbuffers::IsOutRange(e, TensorType_FLOAT32, TensorType_INT4)) return "";
+  if (::flatbuffers::IsOutRange(e, TensorType_FLOAT32, TensorType_BFLOAT16)) return "";
   const size_t index = static_cast<size_t>(e);
   return EnumNamesTensorType()[index];
 }
@@ -690,7 +777,7 @@
 }
 
 inline const char *EnumNameQuantizationDetails(QuantizationDetails e) {
-  if (flatbuffers::IsOutRange(e, QuantizationDetails_NONE, QuantizationDetails_CustomQuantization)) return "";
+  if (::flatbuffers::IsOutRange(e, QuantizationDetails_NONE, QuantizationDetails_CustomQuantization)) return "";
   const size_t index = static_cast<size_t>(e);
   return EnumNamesQuantizationDetails()[index];
 }
@@ -738,8 +825,8 @@
     }
   }
 
-  static void *UnPack(const void *obj, QuantizationDetails type, const flatbuffers::resolver_function_t *resolver);
-  flatbuffers::Offset<void> Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const;
+  static void *UnPack(const void *obj, QuantizationDetails type, const ::flatbuffers::resolver_function_t *resolver);
+  ::flatbuffers::Offset<void> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr) const;
 
   tflite::CustomQuantizationT *AsCustomQuantization() {
     return type == QuantizationDetails_CustomQuantization ?
@@ -751,8 +838,8 @@
   }
 };
 
-bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj, QuantizationDetails type);
-bool VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types);
+bool VerifyQuantizationDetails(::flatbuffers::Verifier &verifier, const void *obj, QuantizationDetails type);
+bool VerifyQuantizationDetailsVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset<void>> *values, const ::flatbuffers::Vector<uint8_t> *types);
 
 enum DimensionType : int8_t {
   DimensionType_DENSE = 0,
@@ -779,7 +866,7 @@
 }
 
 inline const char *EnumNameDimensionType(DimensionType e) {
-  if (flatbuffers::IsOutRange(e, DimensionType_DENSE, DimensionType_SPARSE_CSR)) return "";
+  if (::flatbuffers::IsOutRange(e, DimensionType_DENSE, DimensionType_SPARSE_CSR)) return "";
   const size_t index = static_cast<size_t>(e);
   return EnumNamesDimensionType()[index];
 }
@@ -815,7 +902,7 @@
 }
 
 inline const char *EnumNameSparseIndexVector(SparseIndexVector e) {
-  if (flatbuffers::IsOutRange(e, SparseIndexVector_NONE, SparseIndexVector_Uint8Vector)) return "";
+  if (::flatbuffers::IsOutRange(e, SparseIndexVector_NONE, SparseIndexVector_Uint8Vector)) return "";
   const size_t index = static_cast<size_t>(e);
   return EnumNamesSparseIndexVector()[index];
 }
@@ -879,8 +966,8 @@
     }
   }
 
-  static void *UnPack(const void *obj, SparseIndexVector type, const flatbuffers::resolver_function_t *resolver);
-  flatbuffers::Offset<void> Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const;
+  static void *UnPack(const void *obj, SparseIndexVector type, const ::flatbuffers::resolver_function_t *resolver);
+  ::flatbuffers::Offset<void> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr) const;
 
   tflite::Int32VectorT *AsInt32Vector() {
     return type == SparseIndexVector_Int32Vector ?
@@ -908,8 +995,8 @@
   }
 };
 
-bool VerifySparseIndexVector(flatbuffers::Verifier &verifier, const void *obj, SparseIndexVector type);
-bool VerifySparseIndexVectorVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types);
+bool VerifySparseIndexVector(::flatbuffers::Verifier &verifier, const void *obj, SparseIndexVector type);
+bool VerifySparseIndexVectorVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset<void>> *values, const ::flatbuffers::Vector<uint8_t> *types);
 
 enum BuiltinOperator : int32_t {
   BuiltinOperator_ADD = 0,
@@ -1074,11 +1161,56 @@
   BuiltinOperator_BITCAST = 159,
   BuiltinOperator_BITWISE_XOR = 160,
   BuiltinOperator_RIGHT_SHIFT = 161,
+  BuiltinOperator_STABLEHLO_LOGISTIC = 162,
+  BuiltinOperator_STABLEHLO_ADD = 163,
+  BuiltinOperator_STABLEHLO_DIVIDE = 164,
+  BuiltinOperator_STABLEHLO_MULTIPLY = 165,
+  BuiltinOperator_STABLEHLO_MAXIMUM = 166,
+  BuiltinOperator_STABLEHLO_RESHAPE = 167,
+  BuiltinOperator_STABLEHLO_CLAMP = 168,
+  BuiltinOperator_STABLEHLO_CONCATENATE = 169,
+  BuiltinOperator_STABLEHLO_BROADCAST_IN_DIM = 170,
+  BuiltinOperator_STABLEHLO_CONVOLUTION = 171,
+  BuiltinOperator_STABLEHLO_SLICE = 172,
+  BuiltinOperator_STABLEHLO_CUSTOM_CALL = 173,
+  BuiltinOperator_STABLEHLO_REDUCE = 174,
+  BuiltinOperator_STABLEHLO_ABS = 175,
+  BuiltinOperator_STABLEHLO_AND = 176,
+  BuiltinOperator_STABLEHLO_COSINE = 177,
+  BuiltinOperator_STABLEHLO_EXPONENTIAL = 178,
+  BuiltinOperator_STABLEHLO_FLOOR = 179,
+  BuiltinOperator_STABLEHLO_LOG = 180,
+  BuiltinOperator_STABLEHLO_MINIMUM = 181,
+  BuiltinOperator_STABLEHLO_NEGATE = 182,
+  BuiltinOperator_STABLEHLO_OR = 183,
+  BuiltinOperator_STABLEHLO_POWER = 184,
+  BuiltinOperator_STABLEHLO_REMAINDER = 185,
+  BuiltinOperator_STABLEHLO_RSQRT = 186,
+  BuiltinOperator_STABLEHLO_SELECT = 187,
+  BuiltinOperator_STABLEHLO_SUBTRACT = 188,
+  BuiltinOperator_STABLEHLO_TANH = 189,
+  BuiltinOperator_STABLEHLO_SCATTER = 190,
+  BuiltinOperator_STABLEHLO_COMPARE = 191,
+  BuiltinOperator_STABLEHLO_CONVERT = 192,
+  BuiltinOperator_STABLEHLO_DYNAMIC_SLICE = 193,
+  BuiltinOperator_STABLEHLO_DYNAMIC_UPDATE_SLICE = 194,
+  BuiltinOperator_STABLEHLO_PAD = 195,
+  BuiltinOperator_STABLEHLO_IOTA = 196,
+  BuiltinOperator_STABLEHLO_DOT_GENERAL = 197,
+  BuiltinOperator_STABLEHLO_REDUCE_WINDOW = 198,
+  BuiltinOperator_STABLEHLO_SORT = 199,
+  BuiltinOperator_STABLEHLO_WHILE = 200,
+  BuiltinOperator_STABLEHLO_GATHER = 201,
+  BuiltinOperator_STABLEHLO_TRANSPOSE = 202,
+  BuiltinOperator_DILATE = 203,
+  BuiltinOperator_STABLEHLO_RNG_BIT_GENERATOR = 204,
+  BuiltinOperator_REDUCE_WINDOW = 205,
+  BuiltinOperator_STABLEHLO_COMPOSITE = 206,
   BuiltinOperator_MIN = BuiltinOperator_ADD,
-  BuiltinOperator_MAX = BuiltinOperator_RIGHT_SHIFT
+  BuiltinOperator_MAX = BuiltinOperator_STABLEHLO_COMPOSITE
 };
 
-inline const BuiltinOperator (&EnumValuesBuiltinOperator())[162] {
+inline const BuiltinOperator (&EnumValuesBuiltinOperator())[207] {
   static const BuiltinOperator values[] = {
     BuiltinOperator_ADD,
     BuiltinOperator_AVERAGE_POOL_2D,
@@ -1241,13 +1373,58 @@
     BuiltinOperator_SIGN,
     BuiltinOperator_BITCAST,
     BuiltinOperator_BITWISE_XOR,
-    BuiltinOperator_RIGHT_SHIFT
+    BuiltinOperator_RIGHT_SHIFT,
+    BuiltinOperator_STABLEHLO_LOGISTIC,
+    BuiltinOperator_STABLEHLO_ADD,
+    BuiltinOperator_STABLEHLO_DIVIDE,
+    BuiltinOperator_STABLEHLO_MULTIPLY,
+    BuiltinOperator_STABLEHLO_MAXIMUM,
+    BuiltinOperator_STABLEHLO_RESHAPE,
+    BuiltinOperator_STABLEHLO_CLAMP,
+    BuiltinOperator_STABLEHLO_CONCATENATE,
+    BuiltinOperator_STABLEHLO_BROADCAST_IN_DIM,
+    BuiltinOperator_STABLEHLO_CONVOLUTION,
+    BuiltinOperator_STABLEHLO_SLICE,
+    BuiltinOperator_STABLEHLO_CUSTOM_CALL,
+    BuiltinOperator_STABLEHLO_REDUCE,
+    BuiltinOperator_STABLEHLO_ABS,
+    BuiltinOperator_STABLEHLO_AND,
+    BuiltinOperator_STABLEHLO_COSINE,
+    BuiltinOperator_STABLEHLO_EXPONENTIAL,
+    BuiltinOperator_STABLEHLO_FLOOR,
+    BuiltinOperator_STABLEHLO_LOG,
+    BuiltinOperator_STABLEHLO_MINIMUM,
+    BuiltinOperator_STABLEHLO_NEGATE,
+    BuiltinOperator_STABLEHLO_OR,
+    BuiltinOperator_STABLEHLO_POWER,
+    BuiltinOperator_STABLEHLO_REMAINDER,
+    BuiltinOperator_STABLEHLO_RSQRT,
+    BuiltinOperator_STABLEHLO_SELECT,
+    BuiltinOperator_STABLEHLO_SUBTRACT,
+    BuiltinOperator_STABLEHLO_TANH,
+    BuiltinOperator_STABLEHLO_SCATTER,
+    BuiltinOperator_STABLEHLO_COMPARE,
+    BuiltinOperator_STABLEHLO_CONVERT,
+    BuiltinOperator_STABLEHLO_DYNAMIC_SLICE,
+    BuiltinOperator_STABLEHLO_DYNAMIC_UPDATE_SLICE,
+    BuiltinOperator_STABLEHLO_PAD,
+    BuiltinOperator_STABLEHLO_IOTA,
+    BuiltinOperator_STABLEHLO_DOT_GENERAL,
+    BuiltinOperator_STABLEHLO_REDUCE_WINDOW,
+    BuiltinOperator_STABLEHLO_SORT,
+    BuiltinOperator_STABLEHLO_WHILE,
+    BuiltinOperator_STABLEHLO_GATHER,
+    BuiltinOperator_STABLEHLO_TRANSPOSE,
+    BuiltinOperator_DILATE,
+    BuiltinOperator_STABLEHLO_RNG_BIT_GENERATOR,
+    BuiltinOperator_REDUCE_WINDOW,
+    BuiltinOperator_STABLEHLO_COMPOSITE
   };
   return values;
 }
 
 inline const char * const *EnumNamesBuiltinOperator() {
-  static const char * const names[163] = {
+  static const char * const names[208] = {
     "ADD",
     "AVERAGE_POOL_2D",
     "CONCATENATION",
@@ -1410,13 +1587,58 @@
     "BITCAST",
     "BITWISE_XOR",
     "RIGHT_SHIFT",
+    "STABLEHLO_LOGISTIC",
+    "STABLEHLO_ADD",
+    "STABLEHLO_DIVIDE",
+    "STABLEHLO_MULTIPLY",
+    "STABLEHLO_MAXIMUM",
+    "STABLEHLO_RESHAPE",
+    "STABLEHLO_CLAMP",
+    "STABLEHLO_CONCATENATE",
+    "STABLEHLO_BROADCAST_IN_DIM",
+    "STABLEHLO_CONVOLUTION",
+    "STABLEHLO_SLICE",
+    "STABLEHLO_CUSTOM_CALL",
+    "STABLEHLO_REDUCE",
+    "STABLEHLO_ABS",
+    "STABLEHLO_AND",
+    "STABLEHLO_COSINE",
+    "STABLEHLO_EXPONENTIAL",
+    "STABLEHLO_FLOOR",
+    "STABLEHLO_LOG",
+    "STABLEHLO_MINIMUM",
+    "STABLEHLO_NEGATE",
+    "STABLEHLO_OR",
+    "STABLEHLO_POWER",
+    "STABLEHLO_REMAINDER",
+    "STABLEHLO_RSQRT",
+    "STABLEHLO_SELECT",
+    "STABLEHLO_SUBTRACT",
+    "STABLEHLO_TANH",
+    "STABLEHLO_SCATTER",
+    "STABLEHLO_COMPARE",
+    "STABLEHLO_CONVERT",
+    "STABLEHLO_DYNAMIC_SLICE",
+    "STABLEHLO_DYNAMIC_UPDATE_SLICE",
+    "STABLEHLO_PAD",
+    "STABLEHLO_IOTA",
+    "STABLEHLO_DOT_GENERAL",
+    "STABLEHLO_REDUCE_WINDOW",
+    "STABLEHLO_SORT",
+    "STABLEHLO_WHILE",
+    "STABLEHLO_GATHER",
+    "STABLEHLO_TRANSPOSE",
+    "DILATE",
+    "STABLEHLO_RNG_BIT_GENERATOR",
+    "REDUCE_WINDOW",
+    "STABLEHLO_COMPOSITE",
     nullptr
   };
   return names;
 }
 
 inline const char *EnumNameBuiltinOperator(BuiltinOperator e) {
-  if (flatbuffers::IsOutRange(e, BuiltinOperator_ADD, BuiltinOperator_RIGHT_SHIFT)) return "";
+  if (::flatbuffers::IsOutRange(e, BuiltinOperator_ADD, BuiltinOperator_STABLEHLO_COMPOSITE)) return "";
   const size_t index = static_cast<size_t>(e);
   return EnumNamesBuiltinOperator()[index];
 }
@@ -1821,7 +2043,7 @@
 }
 
 inline const char *EnumNameBuiltinOptions(BuiltinOptions e) {
-  if (flatbuffers::IsOutRange(e, BuiltinOptions_NONE, BuiltinOptions_RightShiftOptions)) return "";
+  if (::flatbuffers::IsOutRange(e, BuiltinOptions_NONE, BuiltinOptions_RightShiftOptions)) return "";
   const size_t index = static_cast<size_t>(e);
   return EnumNamesBuiltinOptions()[index];
 }
@@ -2869,8 +3091,8 @@
     }
   }
 
-  static void *UnPack(const void *obj, BuiltinOptions type, const flatbuffers::resolver_function_t *resolver);
-  flatbuffers::Offset<void> Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const;
+  static void *UnPack(const void *obj, BuiltinOptions type, const ::flatbuffers::resolver_function_t *resolver);
+  ::flatbuffers::Offset<void> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr) const;
 
   tflite::Conv2DOptionsT *AsConv2DOptions() {
     return type == BuiltinOptions_Conv2DOptions ?
@@ -3882,8 +4104,624 @@
   }
 };
 
-bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type);
-bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types);
+bool VerifyBuiltinOptions(::flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type);
+bool VerifyBuiltinOptionsVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset<void>> *values, const ::flatbuffers::Vector<uint8_t> *types);
+
+enum BuiltinOptions2 : uint8_t {
+  BuiltinOptions2_NONE = 0,
+  BuiltinOptions2_StablehloConcatenateOptions = 1,
+  BuiltinOptions2_StablehloBroadcastInDimOptions = 2,
+  BuiltinOptions2_StablehloSliceOptions = 3,
+  BuiltinOptions2_StablehloConvolutionOptions = 4,
+  BuiltinOptions2_StablehloCustomCallOptions = 5,
+  BuiltinOptions2_StablehloReduceOptions = 6,
+  BuiltinOptions2_StablehloScatterOptions = 7,
+  BuiltinOptions2_StablehloCompareOptions = 8,
+  BuiltinOptions2_StablehloDynamicSliceOptions = 9,
+  BuiltinOptions2_StablehloPadOptions = 10,
+  BuiltinOptions2_StablehloIotaOptions = 11,
+  BuiltinOptions2_StablehloDotGeneralOptions = 12,
+  BuiltinOptions2_StablehloReduceWindowOptions = 13,
+  BuiltinOptions2_StablehloSortOptions = 14,
+  BuiltinOptions2_StablehloWhileOptions = 15,
+  BuiltinOptions2_StablehloGatherOptions = 16,
+  BuiltinOptions2_StablehloTransposeOptions = 17,
+  BuiltinOptions2_DilateOptions = 18,
+  BuiltinOptions2_StablehloRngBitGeneratorOptions = 19,
+  BuiltinOptions2_ReduceWindowOptions = 20,
+  BuiltinOptions2_StableHLOCompositeOptions = 21,
+  BuiltinOptions2_MIN = BuiltinOptions2_NONE,
+  BuiltinOptions2_MAX = BuiltinOptions2_StableHLOCompositeOptions
+};
+
+inline const BuiltinOptions2 (&EnumValuesBuiltinOptions2())[22] {
+  static const BuiltinOptions2 values[] = {
+    BuiltinOptions2_NONE,
+    BuiltinOptions2_StablehloConcatenateOptions,
+    BuiltinOptions2_StablehloBroadcastInDimOptions,
+    BuiltinOptions2_StablehloSliceOptions,
+    BuiltinOptions2_StablehloConvolutionOptions,
+    BuiltinOptions2_StablehloCustomCallOptions,
+    BuiltinOptions2_StablehloReduceOptions,
+    BuiltinOptions2_StablehloScatterOptions,
+    BuiltinOptions2_StablehloCompareOptions,
+    BuiltinOptions2_StablehloDynamicSliceOptions,
+    BuiltinOptions2_StablehloPadOptions,
+    BuiltinOptions2_StablehloIotaOptions,
+    BuiltinOptions2_StablehloDotGeneralOptions,
+    BuiltinOptions2_StablehloReduceWindowOptions,
+    BuiltinOptions2_StablehloSortOptions,
+    BuiltinOptions2_StablehloWhileOptions,
+    BuiltinOptions2_StablehloGatherOptions,
+    BuiltinOptions2_StablehloTransposeOptions,
+    BuiltinOptions2_DilateOptions,
+    BuiltinOptions2_StablehloRngBitGeneratorOptions,
+    BuiltinOptions2_ReduceWindowOptions,
+    BuiltinOptions2_StableHLOCompositeOptions
+  };
+  return values;
+}
+
+inline const char * const *EnumNamesBuiltinOptions2() {
+  static const char * const names[23] = {
+    "NONE",
+    "StablehloConcatenateOptions",
+    "StablehloBroadcastInDimOptions",
+    "StablehloSliceOptions",
+    "StablehloConvolutionOptions",
+    "StablehloCustomCallOptions",
+    "StablehloReduceOptions",
+    "StablehloScatterOptions",
+    "StablehloCompareOptions",
+    "StablehloDynamicSliceOptions",
+    "StablehloPadOptions",
+    "StablehloIotaOptions",
+    "StablehloDotGeneralOptions",
+    "StablehloReduceWindowOptions",
+    "StablehloSortOptions",
+    "StablehloWhileOptions",
+    "StablehloGatherOptions",
+    "StablehloTransposeOptions",
+    "DilateOptions",
+    "StablehloRngBitGeneratorOptions",
+    "ReduceWindowOptions",
+    "StableHLOCompositeOptions",
+    nullptr
+  };
+  return names;
+}
+
+inline const char *EnumNameBuiltinOptions2(BuiltinOptions2 e) {
+  if (::flatbuffers::IsOutRange(e, BuiltinOptions2_NONE, BuiltinOptions2_StableHLOCompositeOptions)) return "";
+  const size_t index = static_cast<size_t>(e);
+  return EnumNamesBuiltinOptions2()[index];
+}
+
+template<typename T> struct BuiltinOptions2Traits {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_NONE;
+};
+
+template<> struct BuiltinOptions2Traits<tflite::StablehloConcatenateOptions> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloConcatenateOptions;
+};
+
+template<> struct BuiltinOptions2Traits<tflite::StablehloBroadcastInDimOptions> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloBroadcastInDimOptions;
+};
+
+template<> struct BuiltinOptions2Traits<tflite::StablehloSliceOptions> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloSliceOptions;
+};
+
+template<> struct BuiltinOptions2Traits<tflite::StablehloConvolutionOptions> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloConvolutionOptions;
+};
+
+template<> struct BuiltinOptions2Traits<tflite::StablehloCustomCallOptions> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloCustomCallOptions;
+};
+
+template<> struct BuiltinOptions2Traits<tflite::StablehloReduceOptions> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloReduceOptions;
+};
+
+template<> struct BuiltinOptions2Traits<tflite::StablehloScatterOptions> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloScatterOptions;
+};
+
+template<> struct BuiltinOptions2Traits<tflite::StablehloCompareOptions> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloCompareOptions;
+};
+
+template<> struct BuiltinOptions2Traits<tflite::StablehloDynamicSliceOptions> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloDynamicSliceOptions;
+};
+
+template<> struct BuiltinOptions2Traits<tflite::StablehloPadOptions> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloPadOptions;
+};
+
+template<> struct BuiltinOptions2Traits<tflite::StablehloIotaOptions> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloIotaOptions;
+};
+
+template<> struct BuiltinOptions2Traits<tflite::StablehloDotGeneralOptions> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloDotGeneralOptions;
+};
+
+template<> struct BuiltinOptions2Traits<tflite::StablehloReduceWindowOptions> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloReduceWindowOptions;
+};
+
+template<> struct BuiltinOptions2Traits<tflite::StablehloSortOptions> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloSortOptions;
+};
+
+template<> struct BuiltinOptions2Traits<tflite::StablehloWhileOptions> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloWhileOptions;
+};
+
+template<> struct BuiltinOptions2Traits<tflite::StablehloGatherOptions> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloGatherOptions;
+};
+
+template<> struct BuiltinOptions2Traits<tflite::StablehloTransposeOptions> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloTransposeOptions;
+};
+
+template<> struct BuiltinOptions2Traits<tflite::DilateOptions> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_DilateOptions;
+};
+
+template<> struct BuiltinOptions2Traits<tflite::StablehloRngBitGeneratorOptions> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloRngBitGeneratorOptions;
+};
+
+template<> struct BuiltinOptions2Traits<tflite::ReduceWindowOptions> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_ReduceWindowOptions;
+};
+
+template<> struct BuiltinOptions2Traits<tflite::StableHLOCompositeOptions> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StableHLOCompositeOptions;
+};
+
+template<typename T> struct BuiltinOptions2UnionTraits {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_NONE;
+};
+
+template<> struct BuiltinOptions2UnionTraits<tflite::StablehloConcatenateOptionsT> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloConcatenateOptions;
+};
+
+template<> struct BuiltinOptions2UnionTraits<tflite::StablehloBroadcastInDimOptionsT> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloBroadcastInDimOptions;
+};
+
+template<> struct BuiltinOptions2UnionTraits<tflite::StablehloSliceOptionsT> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloSliceOptions;
+};
+
+template<> struct BuiltinOptions2UnionTraits<tflite::StablehloConvolutionOptionsT> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloConvolutionOptions;
+};
+
+template<> struct BuiltinOptions2UnionTraits<tflite::StablehloCustomCallOptionsT> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloCustomCallOptions;
+};
+
+template<> struct BuiltinOptions2UnionTraits<tflite::StablehloReduceOptionsT> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloReduceOptions;
+};
+
+template<> struct BuiltinOptions2UnionTraits<tflite::StablehloScatterOptionsT> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloScatterOptions;
+};
+
+template<> struct BuiltinOptions2UnionTraits<tflite::StablehloCompareOptionsT> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloCompareOptions;
+};
+
+template<> struct BuiltinOptions2UnionTraits<tflite::StablehloDynamicSliceOptionsT> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloDynamicSliceOptions;
+};
+
+template<> struct BuiltinOptions2UnionTraits<tflite::StablehloPadOptionsT> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloPadOptions;
+};
+
+template<> struct BuiltinOptions2UnionTraits<tflite::StablehloIotaOptionsT> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloIotaOptions;
+};
+
+template<> struct BuiltinOptions2UnionTraits<tflite::StablehloDotGeneralOptionsT> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloDotGeneralOptions;
+};
+
+template<> struct BuiltinOptions2UnionTraits<tflite::StablehloReduceWindowOptionsT> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloReduceWindowOptions;
+};
+
+template<> struct BuiltinOptions2UnionTraits<tflite::StablehloSortOptionsT> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloSortOptions;
+};
+
+template<> struct BuiltinOptions2UnionTraits<tflite::StablehloWhileOptionsT> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloWhileOptions;
+};
+
+template<> struct BuiltinOptions2UnionTraits<tflite::StablehloGatherOptionsT> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloGatherOptions;
+};
+
+template<> struct BuiltinOptions2UnionTraits<tflite::StablehloTransposeOptionsT> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloTransposeOptions;
+};
+
+template<> struct BuiltinOptions2UnionTraits<tflite::DilateOptionsT> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_DilateOptions;
+};
+
+template<> struct BuiltinOptions2UnionTraits<tflite::StablehloRngBitGeneratorOptionsT> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloRngBitGeneratorOptions;
+};
+
+template<> struct BuiltinOptions2UnionTraits<tflite::ReduceWindowOptionsT> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_ReduceWindowOptions;
+};
+
+template<> struct BuiltinOptions2UnionTraits<tflite::StableHLOCompositeOptionsT> {
+  static const BuiltinOptions2 enum_value = BuiltinOptions2_StableHLOCompositeOptions;
+};
+
+struct BuiltinOptions2Union {
+  BuiltinOptions2 type;
+  void *value;
+
+  BuiltinOptions2Union() : type(BuiltinOptions2_NONE), value(nullptr) {}
+  BuiltinOptions2Union(BuiltinOptions2Union&& u) FLATBUFFERS_NOEXCEPT :
+    type(BuiltinOptions2_NONE), value(nullptr)
+    { std::swap(type, u.type); std::swap(value, u.value); }
+  BuiltinOptions2Union(const BuiltinOptions2Union &);
+  BuiltinOptions2Union &operator=(const BuiltinOptions2Union &u)
+    { BuiltinOptions2Union t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; }
+  BuiltinOptions2Union &operator=(BuiltinOptions2Union &&u) FLATBUFFERS_NOEXCEPT
+    { std::swap(type, u.type); std::swap(value, u.value); return *this; }
+  ~BuiltinOptions2Union() { Reset(); }
+
+  void Reset();
+
+  template <typename T>
+  void Set(T&& val) {
+    typedef typename std::remove_reference<T>::type RT;
+    Reset();
+    type = BuiltinOptions2UnionTraits<RT>::enum_value;
+    if (type != BuiltinOptions2_NONE) {
+      value = new RT(std::forward<T>(val));
+    }
+  }
+
+  static void *UnPack(const void *obj, BuiltinOptions2 type, const ::flatbuffers::resolver_function_t *resolver);
+  ::flatbuffers::Offset<void> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr) const;
+
+  tflite::StablehloConcatenateOptionsT *AsStablehloConcatenateOptions() {
+    return type == BuiltinOptions2_StablehloConcatenateOptions ?
+      reinterpret_cast<tflite::StablehloConcatenateOptionsT *>(value) : nullptr;
+  }
+  const tflite::StablehloConcatenateOptionsT *AsStablehloConcatenateOptions() const {
+    return type == BuiltinOptions2_StablehloConcatenateOptions ?
+      reinterpret_cast<const tflite::StablehloConcatenateOptionsT *>(value) : nullptr;
+  }
+  tflite::StablehloBroadcastInDimOptionsT *AsStablehloBroadcastInDimOptions() {
+    return type == BuiltinOptions2_StablehloBroadcastInDimOptions ?
+      reinterpret_cast<tflite::StablehloBroadcastInDimOptionsT *>(value) : nullptr;
+  }
+  const tflite::StablehloBroadcastInDimOptionsT *AsStablehloBroadcastInDimOptions() const {
+    return type == BuiltinOptions2_StablehloBroadcastInDimOptions ?
+      reinterpret_cast<const tflite::StablehloBroadcastInDimOptionsT *>(value) : nullptr;
+  }
+  tflite::StablehloSliceOptionsT *AsStablehloSliceOptions() {
+    return type == BuiltinOptions2_StablehloSliceOptions ?
+      reinterpret_cast<tflite::StablehloSliceOptionsT *>(value) : nullptr;
+  }
+  const tflite::StablehloSliceOptionsT *AsStablehloSliceOptions() const {
+    return type == BuiltinOptions2_StablehloSliceOptions ?
+      reinterpret_cast<const tflite::StablehloSliceOptionsT *>(value) : nullptr;
+  }
+  tflite::StablehloConvolutionOptionsT *AsStablehloConvolutionOptions() {
+    return type == BuiltinOptions2_StablehloConvolutionOptions ?
+      reinterpret_cast<tflite::StablehloConvolutionOptionsT *>(value) : nullptr;
+  }
+  const tflite::StablehloConvolutionOptionsT *AsStablehloConvolutionOptions() const {
+    return type == BuiltinOptions2_StablehloConvolutionOptions ?
+      reinterpret_cast<const tflite::StablehloConvolutionOptionsT *>(value) : nullptr;
+  }
+  tflite::StablehloCustomCallOptionsT *AsStablehloCustomCallOptions() {
+    return type == BuiltinOptions2_StablehloCustomCallOptions ?
+      reinterpret_cast<tflite::StablehloCustomCallOptionsT *>(value) : nullptr;
+  }
+  const tflite::StablehloCustomCallOptionsT *AsStablehloCustomCallOptions() const {
+    return type == BuiltinOptions2_StablehloCustomCallOptions ?
+      reinterpret_cast<const tflite::StablehloCustomCallOptionsT *>(value) : nullptr;
+  }
+  tflite::StablehloReduceOptionsT *AsStablehloReduceOptions() {
+    return type == BuiltinOptions2_StablehloReduceOptions ?
+      reinterpret_cast<tflite::StablehloReduceOptionsT *>(value) : nullptr;
+  }
+  const tflite::StablehloReduceOptionsT *AsStablehloReduceOptions() const {
+    return type == BuiltinOptions2_StablehloReduceOptions ?
+      reinterpret_cast<const tflite::StablehloReduceOptionsT *>(value) : nullptr;
+  }
+  tflite::StablehloScatterOptionsT *AsStablehloScatterOptions() {
+    return type == BuiltinOptions2_StablehloScatterOptions ?
+      reinterpret_cast<tflite::StablehloScatterOptionsT *>(value) : nullptr;
+  }
+  const tflite::StablehloScatterOptionsT *AsStablehloScatterOptions() const {
+    return type == BuiltinOptions2_StablehloScatterOptions ?
+      reinterpret_cast<const tflite::StablehloScatterOptionsT *>(value) : nullptr;
+  }
+  tflite::StablehloCompareOptionsT *AsStablehloCompareOptions() {
+    return type == BuiltinOptions2_StablehloCompareOptions ?
+      reinterpret_cast<tflite::StablehloCompareOptionsT *>(value) : nullptr;
+  }
+  const tflite::StablehloCompareOptionsT *AsStablehloCompareOptions() const {
+    return type == BuiltinOptions2_StablehloCompareOptions ?
+      reinterpret_cast<const tflite::StablehloCompareOptionsT *>(value) : nullptr;
+  }
+  tflite::StablehloDynamicSliceOptionsT *AsStablehloDynamicSliceOptions() {
+    return type == BuiltinOptions2_StablehloDynamicSliceOptions ?
+      reinterpret_cast<tflite::StablehloDynamicSliceOptionsT *>(value) : nullptr;
+  }
+  const tflite::StablehloDynamicSliceOptionsT *AsStablehloDynamicSliceOptions() const {
+    return type == BuiltinOptions2_StablehloDynamicSliceOptions ?
+      reinterpret_cast<const tflite::StablehloDynamicSliceOptionsT *>(value) : nullptr;
+  }
+  tflite::StablehloPadOptionsT *AsStablehloPadOptions() {
+    return type == BuiltinOptions2_StablehloPadOptions ?
+      reinterpret_cast<tflite::StablehloPadOptionsT *>(value) : nullptr;
+  }
+  const tflite::StablehloPadOptionsT *AsStablehloPadOptions() const {
+    return type == BuiltinOptions2_StablehloPadOptions ?
+      reinterpret_cast<const tflite::StablehloPadOptionsT *>(value) : nullptr;
+  }
+  tflite::StablehloIotaOptionsT *AsStablehloIotaOptions() {
+    return type == BuiltinOptions2_StablehloIotaOptions ?
+      reinterpret_cast<tflite::StablehloIotaOptionsT *>(value) : nullptr;
+  }
+  const tflite::StablehloIotaOptionsT *AsStablehloIotaOptions() const {
+    return type == BuiltinOptions2_StablehloIotaOptions ?
+      reinterpret_cast<const tflite::StablehloIotaOptionsT *>(value) : nullptr;
+  }
+  tflite::StablehloDotGeneralOptionsT *AsStablehloDotGeneralOptions() {
+    return type == BuiltinOptions2_StablehloDotGeneralOptions ?
+      reinterpret_cast<tflite::StablehloDotGeneralOptionsT *>(value) : nullptr;
+  }
+  const tflite::StablehloDotGeneralOptionsT *AsStablehloDotGeneralOptions() const {
+    return type == BuiltinOptions2_StablehloDotGeneralOptions ?
+      reinterpret_cast<const tflite::StablehloDotGeneralOptionsT *>(value) : nullptr;
+  }
+  tflite::StablehloReduceWindowOptionsT *AsStablehloReduceWindowOptions() {
+    return type == BuiltinOptions2_StablehloReduceWindowOptions ?
+      reinterpret_cast<tflite::StablehloReduceWindowOptionsT *>(value) : nullptr;
+  }
+  const tflite::StablehloReduceWindowOptionsT *AsStablehloReduceWindowOptions() const {
+    return type == BuiltinOptions2_StablehloReduceWindowOptions ?
+      reinterpret_cast<const tflite::StablehloReduceWindowOptionsT *>(value) : nullptr;
+  }
+  tflite::StablehloSortOptionsT *AsStablehloSortOptions() {
+    return type == BuiltinOptions2_StablehloSortOptions ?
+      reinterpret_cast<tflite::StablehloSortOptionsT *>(value) : nullptr;
+  }
+  const tflite::StablehloSortOptionsT *AsStablehloSortOptions() const {
+    return type == BuiltinOptions2_StablehloSortOptions ?
+      reinterpret_cast<const tflite::StablehloSortOptionsT *>(value) : nullptr;
+  }
+  tflite::StablehloWhileOptionsT *AsStablehloWhileOptions() {
+    return type == BuiltinOptions2_StablehloWhileOptions ?
+      reinterpret_cast<tflite::StablehloWhileOptionsT *>(value) : nullptr;
+  }
+  const tflite::StablehloWhileOptionsT *AsStablehloWhileOptions() const {
+    return type == BuiltinOptions2_StablehloWhileOptions ?
+      reinterpret_cast<const tflite::StablehloWhileOptionsT *>(value) : nullptr;
+  }
+  tflite::StablehloGatherOptionsT *AsStablehloGatherOptions() {
+    return type == BuiltinOptions2_StablehloGatherOptions ?
+      reinterpret_cast<tflite::StablehloGatherOptionsT *>(value) : nullptr;
+  }
+  const tflite::StablehloGatherOptionsT *AsStablehloGatherOptions() const {
+    return type == BuiltinOptions2_StablehloGatherOptions ?
+      reinterpret_cast<const tflite::StablehloGatherOptionsT *>(value) : nullptr;
+  }
+  tflite::StablehloTransposeOptionsT *AsStablehloTransposeOptions() {
+    return type == BuiltinOptions2_StablehloTransposeOptions ?
+      reinterpret_cast<tflite::StablehloTransposeOptionsT *>(value) : nullptr;
+  }
+  const tflite::StablehloTransposeOptionsT *AsStablehloTransposeOptions() const {
+    return type == BuiltinOptions2_StablehloTransposeOptions ?
+      reinterpret_cast<const tflite::StablehloTransposeOptionsT *>(value) : nullptr;
+  }
+  tflite::DilateOptionsT *AsDilateOptions() {
+    return type == BuiltinOptions2_DilateOptions ?
+      reinterpret_cast<tflite::DilateOptionsT *>(value) : nullptr;
+  }
+  const tflite::DilateOptionsT *AsDilateOptions() const {
+    return type == BuiltinOptions2_DilateOptions ?
+      reinterpret_cast<const tflite::DilateOptionsT *>(value) : nullptr;
+  }
+  tflite::StablehloRngBitGeneratorOptionsT *AsStablehloRngBitGeneratorOptions() {
+    return type == BuiltinOptions2_StablehloRngBitGeneratorOptions ?
+      reinterpret_cast<tflite::StablehloRngBitGeneratorOptionsT *>(value) : nullptr;
+  }
+  const tflite::StablehloRngBitGeneratorOptionsT *AsStablehloRngBitGeneratorOptions() const {
+    return type == BuiltinOptions2_StablehloRngBitGeneratorOptions ?
+      reinterpret_cast<const tflite::StablehloRngBitGeneratorOptionsT *>(value) : nullptr;
+  }
+  tflite::ReduceWindowOptionsT *AsReduceWindowOptions() {
+    return type == BuiltinOptions2_ReduceWindowOptions ?
+      reinterpret_cast<tflite::ReduceWindowOptionsT *>(value) : nullptr;
+  }
+  const tflite::ReduceWindowOptionsT *AsReduceWindowOptions() const {
+    return type == BuiltinOptions2_ReduceWindowOptions ?
+      reinterpret_cast<const tflite::ReduceWindowOptionsT *>(value) : nullptr;
+  }
+  tflite::StableHLOCompositeOptionsT *AsStableHLOCompositeOptions() {
+    return type == BuiltinOptions2_StableHLOCompositeOptions ?
+      reinterpret_cast<tflite::StableHLOCompositeOptionsT *>(value) : nullptr;
+  }
+  const tflite::StableHLOCompositeOptionsT *AsStableHLOCompositeOptions() const {
+    return type == BuiltinOptions2_StableHLOCompositeOptions ?
+      reinterpret_cast<const tflite::StableHLOCompositeOptionsT *>(value) : nullptr;
+  }
+};
+
+bool VerifyBuiltinOptions2(::flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions2 type);
+bool VerifyBuiltinOptions2Vector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset<void>> *values, const ::flatbuffers::Vector<uint8_t> *types);
+
+enum StablehloPrecisionConfig : uint32_t {
+  StablehloPrecisionConfig_DEFAULT = 0,
+  StablehloPrecisionConfig_HIGH = 1,
+  StablehloPrecisionConfig_HIGHEST = 2,
+  StablehloPrecisionConfig_MIN = StablehloPrecisionConfig_DEFAULT,
+  StablehloPrecisionConfig_MAX = StablehloPrecisionConfig_HIGHEST
+};
+
+inline const StablehloPrecisionConfig (&EnumValuesStablehloPrecisionConfig())[3] {
+  static const StablehloPrecisionConfig values[] = {
+    StablehloPrecisionConfig_DEFAULT,
+    StablehloPrecisionConfig_HIGH,
+    StablehloPrecisionConfig_HIGHEST
+  };
+  return values;
+}
+
+inline const char * const *EnumNamesStablehloPrecisionConfig() {
+  static const char * const names[4] = {
+    "DEFAULT",
+    "HIGH",
+    "HIGHEST",
+    nullptr
+  };
+  return names;
+}
+
+inline const char *EnumNameStablehloPrecisionConfig(StablehloPrecisionConfig e) {
+  if (::flatbuffers::IsOutRange(e, StablehloPrecisionConfig_DEFAULT, StablehloPrecisionConfig_HIGHEST)) return "";
+  const size_t index = static_cast<size_t>(e);
+  return EnumNamesStablehloPrecisionConfig()[index];
+}
+
+enum StablehloComparisonDirection : uint32_t {
+  StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_EQ = 0,
+  StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_NE = 1,
+  StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_GE = 2,
+  StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_GT = 3,
+  StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_LE = 4,
+  StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_LT = 5,
+  StablehloComparisonDirection_MIN = StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_EQ,
+  StablehloComparisonDirection_MAX = StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_LT
+};
+
+inline const StablehloComparisonDirection (&EnumValuesStablehloComparisonDirection())[6] {
+  static const StablehloComparisonDirection values[] = {
+    StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_EQ,
+    StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_NE,
+    StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_GE,
+    StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_GT,
+    StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_LE,
+    StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_LT
+  };
+  return values;
+}
+
+inline const char * const *EnumNamesStablehloComparisonDirection() {
+  static const char * const names[7] = {
+    "STABLEHLO_COMPARISON_DIRECTION_EQ",
+    "STABLEHLO_COMPARISON_DIRECTION_NE",
+    "STABLEHLO_COMPARISON_DIRECTION_GE",
+    "STABLEHLO_COMPARISON_DIRECTION_GT",
+    "STABLEHLO_COMPARISON_DIRECTION_LE",
+    "STABLEHLO_COMPARISON_DIRECTION_LT",
+    nullptr
+  };
+  return names;
+}
+
+inline const char *EnumNameStablehloComparisonDirection(StablehloComparisonDirection e) {
+  if (::flatbuffers::IsOutRange(e, StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_EQ, StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_LT)) return "";
+  const size_t index = static_cast<size_t>(e);
+  return EnumNamesStablehloComparisonDirection()[index];
+}
+
+enum StablehloComparisonType : uint32_t {
+  StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_NOTYPE = 0,
+  StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_FLOAT = 1,
+  StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_FLOAT_TOTAL_ORDER = 2,
+  StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_SIGNED = 3,
+  StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_UNSIGNED = 4,
+  StablehloComparisonType_MIN = StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_NOTYPE,
+  StablehloComparisonType_MAX = StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_UNSIGNED
+};
+
+inline const StablehloComparisonType (&EnumValuesStablehloComparisonType())[5] {
+  static const StablehloComparisonType values[] = {
+    StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_NOTYPE,
+    StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_FLOAT,
+    StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_FLOAT_TOTAL_ORDER,
+    StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_SIGNED,
+    StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_UNSIGNED
+  };
+  return values;
+}
+
+inline const char * const *EnumNamesStablehloComparisonType() {
+  static const char * const names[6] = {
+    "STABLEHLO_COMPARISON_TYPE_NOTYPE",
+    "STABLEHLO_COMPARISON_TYPE_FLOAT",
+    "STABLEHLO_COMPARISON_TYPE_FLOAT_TOTAL_ORDER",
+    "STABLEHLO_COMPARISON_TYPE_SIGNED",
+    "STABLEHLO_COMPARISON_TYPE_UNSIGNED",
+    nullptr
+  };
+  return names;
+}
+
+inline const char *EnumNameStablehloComparisonType(StablehloComparisonType e) {
+  if (::flatbuffers::IsOutRange(e, StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_NOTYPE, StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_UNSIGNED)) return "";
+  const size_t index = static_cast<size_t>(e);
+  return EnumNamesStablehloComparisonType()[index];
+}
+
+enum RngAlgorithm : int8_t {
+  RngAlgorithm_DEFAULT = 0,
+  RngAlgorithm_PHILOX = 1,
+  RngAlgorithm_THREEFRY = 2,
+  RngAlgorithm_MIN = RngAlgorithm_DEFAULT,
+  RngAlgorithm_MAX = RngAlgorithm_THREEFRY
+};
+
+inline const RngAlgorithm (&EnumValuesRngAlgorithm())[3] {
+  static const RngAlgorithm values[] = {
+    RngAlgorithm_DEFAULT,
+    RngAlgorithm_PHILOX,
+    RngAlgorithm_THREEFRY
+  };
+  return values;
+}
+
+inline const char * const *EnumNamesRngAlgorithm() {
+  static const char * const names[4] = {
+    "DEFAULT",
+    "PHILOX",
+    "THREEFRY",
+    nullptr
+  };
+  return names;
+}
+
+inline const char *EnumNameRngAlgorithm(RngAlgorithm e) {
+  if (::flatbuffers::IsOutRange(e, RngAlgorithm_DEFAULT, RngAlgorithm_THREEFRY)) return "";
+  const size_t index = static_cast<size_t>(e);
+  return EnumNamesRngAlgorithm()[index];
+}
 
 enum Padding : int8_t {
   Padding_SAME = 0,
@@ -3910,7 +4748,7 @@
 }
 
 inline const char *EnumNamePadding(Padding e) {
-  if (flatbuffers::IsOutRange(e, Padding_SAME, Padding_VALID)) return "";
+  if (::flatbuffers::IsOutRange(e, Padding_SAME, Padding_VALID)) return "";
   const size_t index = static_cast<size_t>(e);
   return EnumNamesPadding()[index];
 }
@@ -3952,7 +4790,7 @@
 }
 
 inline const char *EnumNameActivationFunctionType(ActivationFunctionType e) {
-  if (flatbuffers::IsOutRange(e, ActivationFunctionType_NONE, ActivationFunctionType_SIGN_BIT)) return "";
+  if (::flatbuffers::IsOutRange(e, ActivationFunctionType_NONE, ActivationFunctionType_SIGN_BIT)) return "";
   const size_t index = static_cast<size_t>(e);
   return EnumNamesActivationFunctionType()[index];
 }
@@ -3985,7 +4823,7 @@
 }
 
 inline const char *EnumNameLSHProjectionType(LSHProjectionType e) {
-  if (flatbuffers::IsOutRange(e, LSHProjectionType_UNKNOWN, LSHProjectionType_DENSE)) return "";
+  if (::flatbuffers::IsOutRange(e, LSHProjectionType_UNKNOWN, LSHProjectionType_DENSE)) return "";
   const size_t index = static_cast<size_t>(e);
   return EnumNamesLSHProjectionType()[index];
 }
@@ -4015,7 +4853,7 @@
 }
 
 inline const char *EnumNameFullyConnectedOptionsWeightsFormat(FullyConnectedOptionsWeightsFormat e) {
-  if (flatbuffers::IsOutRange(e, FullyConnectedOptionsWeightsFormat_DEFAULT, FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8)) return "";
+  if (::flatbuffers::IsOutRange(e, FullyConnectedOptionsWeightsFormat_DEFAULT, FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8)) return "";
   const size_t index = static_cast<size_t>(e);
   return EnumNamesFullyConnectedOptionsWeightsFormat()[index];
 }
@@ -4045,7 +4883,7 @@
 }
 
 inline const char *EnumNameLSTMKernelType(LSTMKernelType e) {
-  if (flatbuffers::IsOutRange(e, LSTMKernelType_FULL, LSTMKernelType_BASIC)) return "";
+  if (::flatbuffers::IsOutRange(e, LSTMKernelType_FULL, LSTMKernelType_BASIC)) return "";
   const size_t index = static_cast<size_t>(e);
   return EnumNamesLSTMKernelType()[index];
 }
@@ -4078,7 +4916,7 @@
 }
 
 inline const char *EnumNameCombinerType(CombinerType e) {
-  if (flatbuffers::IsOutRange(e, CombinerType_SUM, CombinerType_SQRTN)) return "";
+  if (::flatbuffers::IsOutRange(e, CombinerType_SUM, CombinerType_SQRTN)) return "";
   const size_t index = static_cast<size_t>(e);
   return EnumNamesCombinerType()[index];
 }
@@ -4108,11 +4946,56 @@
 }
 
 inline const char *EnumNameMirrorPadMode(MirrorPadMode e) {
-  if (flatbuffers::IsOutRange(e, MirrorPadMode_REFLECT, MirrorPadMode_SYMMETRIC)) return "";
+  if (::flatbuffers::IsOutRange(e, MirrorPadMode_REFLECT, MirrorPadMode_SYMMETRIC)) return "";
   const size_t index = static_cast<size_t>(e);
   return EnumNamesMirrorPadMode()[index];
 }
 
+enum ReduceWindowFunction : int32_t {
+  ReduceWindowFunction_UNSUPPORTED = 0,
+  ReduceWindowFunction_ADD = 1,
+  ReduceWindowFunction_MUL = 2,
+  ReduceWindowFunction_MINIMUM = 3,
+  ReduceWindowFunction_MAXIMUM = 4,
+  ReduceWindowFunction_ALL = 5,
+  ReduceWindowFunction_ANY = 6,
+  ReduceWindowFunction_MIN = ReduceWindowFunction_UNSUPPORTED,
+  ReduceWindowFunction_MAX = ReduceWindowFunction_ANY
+};
+
+inline const ReduceWindowFunction (&EnumValuesReduceWindowFunction())[7] {
+  static const ReduceWindowFunction values[] = {
+    ReduceWindowFunction_UNSUPPORTED,
+    ReduceWindowFunction_ADD,
+    ReduceWindowFunction_MUL,
+    ReduceWindowFunction_MINIMUM,
+    ReduceWindowFunction_MAXIMUM,
+    ReduceWindowFunction_ALL,
+    ReduceWindowFunction_ANY
+  };
+  return values;
+}
+
+inline const char * const *EnumNamesReduceWindowFunction() {
+  static const char * const names[8] = {
+    "UNSUPPORTED",
+    "ADD",
+    "MUL",
+    "MINIMUM",
+    "MAXIMUM",
+    "ALL",
+    "ANY",
+    nullptr
+  };
+  return names;
+}
+
+inline const char *EnumNameReduceWindowFunction(ReduceWindowFunction e) {
+  if (::flatbuffers::IsOutRange(e, ReduceWindowFunction_UNSUPPORTED, ReduceWindowFunction_ANY)) return "";
+  const size_t index = static_cast<size_t>(e);
+  return EnumNamesReduceWindowFunction()[index];
+}
+
 enum CustomOptionsFormat : int8_t {
   CustomOptionsFormat_FLEXBUFFERS = 0,
   CustomOptionsFormat_MIN = CustomOptionsFormat_FLEXBUFFERS,
@@ -4135,64 +5018,64 @@
 }
 
 inline const char *EnumNameCustomOptionsFormat(CustomOptionsFormat e) {
-  if (flatbuffers::IsOutRange(e, CustomOptionsFormat_FLEXBUFFERS, CustomOptionsFormat_FLEXBUFFERS)) return "";
+  if (::flatbuffers::IsOutRange(e, CustomOptionsFormat_FLEXBUFFERS, CustomOptionsFormat_FLEXBUFFERS)) return "";
   const size_t index = static_cast<size_t>(e);
   return EnumNamesCustomOptionsFormat()[index];
 }
 
-struct CustomQuantizationT : public flatbuffers::NativeTable {
+struct CustomQuantizationT : public ::flatbuffers::NativeTable {
   typedef CustomQuantization TableType;
   std::vector<uint8_t> custom{};
 };
 
-struct CustomQuantization FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct CustomQuantization FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef CustomQuantizationT NativeTableType;
   typedef CustomQuantizationBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
     VT_CUSTOM = 4
   };
-  const flatbuffers::Vector<uint8_t> *custom() const {
-    return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM);
+  const ::flatbuffers::Vector<uint8_t> *custom() const {
+    return GetPointer<const ::flatbuffers::Vector<uint8_t> *>(VT_CUSTOM);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyOffset(verifier, VT_CUSTOM) &&
            verifier.VerifyVector(custom()) &&
            verifier.EndTable();
   }
-  CustomQuantizationT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(CustomQuantizationT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<CustomQuantization> Pack(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  CustomQuantizationT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(CustomQuantizationT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<CustomQuantization> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct CustomQuantizationBuilder {
   typedef CustomQuantization Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_custom(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom) {
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_custom(::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> custom) {
     fbb_.AddOffset(CustomQuantization::VT_CUSTOM, custom);
   }
-  explicit CustomQuantizationBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit CustomQuantizationBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<CustomQuantization> Finish() {
+  ::flatbuffers::Offset<CustomQuantization> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<CustomQuantization>(end);
+    auto o = ::flatbuffers::Offset<CustomQuantization>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<CustomQuantization> CreateCustomQuantization(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom = 0) {
+inline ::flatbuffers::Offset<CustomQuantization> CreateCustomQuantization(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> custom = 0) {
   CustomQuantizationBuilder builder_(_fbb);
   builder_.add_custom(custom);
   return builder_.Finish();
 }
 
-inline flatbuffers::Offset<CustomQuantization> CreateCustomQuantizationDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<CustomQuantization> CreateCustomQuantizationDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     const std::vector<uint8_t> *custom = nullptr) {
   if (custom) { _fbb.ForceVectorAlignment(custom->size(), sizeof(uint8_t), 16); }
   auto custom__ = custom ? _fbb.CreateVector<uint8_t>(*custom) : 0;
@@ -4201,9 +5084,9 @@
       custom__);
 }
 
-flatbuffers::Offset<CustomQuantization> CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<CustomQuantization> CreateCustomQuantization(::flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct QuantizationParametersT : public flatbuffers::NativeTable {
+struct QuantizationParametersT : public ::flatbuffers::NativeTable {
   typedef QuantizationParameters TableType;
   std::vector<float> min{};
   std::vector<float> max{};
@@ -4213,7 +5096,7 @@
   int32_t quantized_dimension = 0;
 };
 
-struct QuantizationParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct QuantizationParameters FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef QuantizationParametersT NativeTableType;
   typedef QuantizationParametersBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -4225,17 +5108,17 @@
     VT_DETAILS = 14,
     VT_QUANTIZED_DIMENSION = 16
   };
-  const flatbuffers::Vector<float> *min() const {
-    return GetPointer<const flatbuffers::Vector<float> *>(VT_MIN);
+  const ::flatbuffers::Vector<float> *min() const {
+    return GetPointer<const ::flatbuffers::Vector<float> *>(VT_MIN);
   }
-  const flatbuffers::Vector<float> *max() const {
-    return GetPointer<const flatbuffers::Vector<float> *>(VT_MAX);
+  const ::flatbuffers::Vector<float> *max() const {
+    return GetPointer<const ::flatbuffers::Vector<float> *>(VT_MAX);
   }
-  const flatbuffers::Vector<float> *scale() const {
-    return GetPointer<const flatbuffers::Vector<float> *>(VT_SCALE);
+  const ::flatbuffers::Vector<float> *scale() const {
+    return GetPointer<const ::flatbuffers::Vector<float> *>(VT_SCALE);
   }
-  const flatbuffers::Vector<int64_t> *zero_point() const {
-    return GetPointer<const flatbuffers::Vector<int64_t> *>(VT_ZERO_POINT);
+  const ::flatbuffers::Vector<int64_t> *zero_point() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_ZERO_POINT);
   }
   tflite::QuantizationDetails details_type() const {
     return static_cast<tflite::QuantizationDetails>(GetField<uint8_t>(VT_DETAILS_TYPE, 0));
@@ -4250,7 +5133,7 @@
   int32_t quantized_dimension() const {
     return GetField<int32_t>(VT_QUANTIZED_DIMENSION, 0);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyOffset(verifier, VT_MIN) &&
            verifier.VerifyVector(min()) &&
@@ -4266,9 +5149,9 @@
            VerifyField<int32_t>(verifier, VT_QUANTIZED_DIMENSION, 4) &&
            verifier.EndTable();
   }
-  QuantizationParametersT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(QuantizationParametersT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<QuantizationParameters> Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  QuantizationParametersT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(QuantizationParametersT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<QuantizationParameters> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 template<> inline const tflite::CustomQuantization *QuantizationParameters::details_as<tflite::CustomQuantization>() const {
@@ -4277,48 +5160,48 @@
 
 struct QuantizationParametersBuilder {
   typedef QuantizationParameters Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_min(flatbuffers::Offset<flatbuffers::Vector<float>> min) {
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_min(::flatbuffers::Offset<::flatbuffers::Vector<float>> min) {
     fbb_.AddOffset(QuantizationParameters::VT_MIN, min);
   }
-  void add_max(flatbuffers::Offset<flatbuffers::Vector<float>> max) {
+  void add_max(::flatbuffers::Offset<::flatbuffers::Vector<float>> max) {
     fbb_.AddOffset(QuantizationParameters::VT_MAX, max);
   }
-  void add_scale(flatbuffers::Offset<flatbuffers::Vector<float>> scale) {
+  void add_scale(::flatbuffers::Offset<::flatbuffers::Vector<float>> scale) {
     fbb_.AddOffset(QuantizationParameters::VT_SCALE, scale);
   }
-  void add_zero_point(flatbuffers::Offset<flatbuffers::Vector<int64_t>> zero_point) {
+  void add_zero_point(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> zero_point) {
     fbb_.AddOffset(QuantizationParameters::VT_ZERO_POINT, zero_point);
   }
   void add_details_type(tflite::QuantizationDetails details_type) {
     fbb_.AddElement<uint8_t>(QuantizationParameters::VT_DETAILS_TYPE, static_cast<uint8_t>(details_type), 0);
   }
-  void add_details(flatbuffers::Offset<void> details) {
+  void add_details(::flatbuffers::Offset<void> details) {
     fbb_.AddOffset(QuantizationParameters::VT_DETAILS, details);
   }
   void add_quantized_dimension(int32_t quantized_dimension) {
     fbb_.AddElement<int32_t>(QuantizationParameters::VT_QUANTIZED_DIMENSION, quantized_dimension, 0);
   }
-  explicit QuantizationParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit QuantizationParametersBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<QuantizationParameters> Finish() {
+  ::flatbuffers::Offset<QuantizationParameters> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<QuantizationParameters>(end);
+    auto o = ::flatbuffers::Offset<QuantizationParameters>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<QuantizationParameters> CreateQuantizationParameters(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::Vector<float>> min = 0,
-    flatbuffers::Offset<flatbuffers::Vector<float>> max = 0,
-    flatbuffers::Offset<flatbuffers::Vector<float>> scale = 0,
-    flatbuffers::Offset<flatbuffers::Vector<int64_t>> zero_point = 0,
+inline ::flatbuffers::Offset<QuantizationParameters> CreateQuantizationParameters(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<float>> min = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<float>> max = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<float>> scale = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> zero_point = 0,
     tflite::QuantizationDetails details_type = tflite::QuantizationDetails_NONE,
-    flatbuffers::Offset<void> details = 0,
+    ::flatbuffers::Offset<void> details = 0,
     int32_t quantized_dimension = 0) {
   QuantizationParametersBuilder builder_(_fbb);
   builder_.add_quantized_dimension(quantized_dimension);
@@ -4331,14 +5214,14 @@
   return builder_.Finish();
 }
 
-inline flatbuffers::Offset<QuantizationParameters> CreateQuantizationParametersDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<QuantizationParameters> CreateQuantizationParametersDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     const std::vector<float> *min = nullptr,
     const std::vector<float> *max = nullptr,
     const std::vector<float> *scale = nullptr,
     const std::vector<int64_t> *zero_point = nullptr,
     tflite::QuantizationDetails details_type = tflite::QuantizationDetails_NONE,
-    flatbuffers::Offset<void> details = 0,
+    ::flatbuffers::Offset<void> details = 0,
     int32_t quantized_dimension = 0) {
   auto min__ = min ? _fbb.CreateVector<float>(*min) : 0;
   auto max__ = max ? _fbb.CreateVector<float>(*max) : 0;
@@ -4355,61 +5238,61 @@
       quantized_dimension);
 }
 
-flatbuffers::Offset<QuantizationParameters> CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<QuantizationParameters> CreateQuantizationParameters(::flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct Int32VectorT : public flatbuffers::NativeTable {
+struct Int32VectorT : public ::flatbuffers::NativeTable {
   typedef Int32Vector TableType;
   std::vector<int32_t> values{};
 };
 
-struct Int32Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct Int32Vector FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef Int32VectorT NativeTableType;
   typedef Int32VectorBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
     VT_VALUES = 4
   };
-  const flatbuffers::Vector<int32_t> *values() const {
-    return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_VALUES);
+  const ::flatbuffers::Vector<int32_t> *values() const {
+    return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_VALUES);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyOffset(verifier, VT_VALUES) &&
            verifier.VerifyVector(values()) &&
            verifier.EndTable();
   }
-  Int32VectorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(Int32VectorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<Int32Vector> Pack(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  Int32VectorT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(Int32VectorT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<Int32Vector> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct Int32VectorBuilder {
   typedef Int32Vector Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_values(flatbuffers::Offset<flatbuffers::Vector<int32_t>> values) {
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_values(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> values) {
     fbb_.AddOffset(Int32Vector::VT_VALUES, values);
   }
-  explicit Int32VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit Int32VectorBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<Int32Vector> Finish() {
+  ::flatbuffers::Offset<Int32Vector> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<Int32Vector>(end);
+    auto o = ::flatbuffers::Offset<Int32Vector>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<Int32Vector> CreateInt32Vector(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::Vector<int32_t>> values = 0) {
+inline ::flatbuffers::Offset<Int32Vector> CreateInt32Vector(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> values = 0) {
   Int32VectorBuilder builder_(_fbb);
   builder_.add_values(values);
   return builder_.Finish();
 }
 
-inline flatbuffers::Offset<Int32Vector> CreateInt32VectorDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<Int32Vector> CreateInt32VectorDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     const std::vector<int32_t> *values = nullptr) {
   auto values__ = values ? _fbb.CreateVector<int32_t>(*values) : 0;
   return tflite::CreateInt32Vector(
@@ -4417,61 +5300,61 @@
       values__);
 }
 
-flatbuffers::Offset<Int32Vector> CreateInt32Vector(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<Int32Vector> CreateInt32Vector(::flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct Uint16VectorT : public flatbuffers::NativeTable {
+struct Uint16VectorT : public ::flatbuffers::NativeTable {
   typedef Uint16Vector TableType;
   std::vector<uint16_t> values{};
 };
 
-struct Uint16Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct Uint16Vector FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef Uint16VectorT NativeTableType;
   typedef Uint16VectorBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
     VT_VALUES = 4
   };
-  const flatbuffers::Vector<uint16_t> *values() const {
-    return GetPointer<const flatbuffers::Vector<uint16_t> *>(VT_VALUES);
+  const ::flatbuffers::Vector<uint16_t> *values() const {
+    return GetPointer<const ::flatbuffers::Vector<uint16_t> *>(VT_VALUES);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyOffset(verifier, VT_VALUES) &&
            verifier.VerifyVector(values()) &&
            verifier.EndTable();
   }
-  Uint16VectorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(Uint16VectorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<Uint16Vector> Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  Uint16VectorT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(Uint16VectorT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<Uint16Vector> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct Uint16VectorBuilder {
   typedef Uint16Vector Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_values(flatbuffers::Offset<flatbuffers::Vector<uint16_t>> values) {
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_values(::flatbuffers::Offset<::flatbuffers::Vector<uint16_t>> values) {
     fbb_.AddOffset(Uint16Vector::VT_VALUES, values);
   }
-  explicit Uint16VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit Uint16VectorBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<Uint16Vector> Finish() {
+  ::flatbuffers::Offset<Uint16Vector> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<Uint16Vector>(end);
+    auto o = ::flatbuffers::Offset<Uint16Vector>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<Uint16Vector> CreateUint16Vector(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::Vector<uint16_t>> values = 0) {
+inline ::flatbuffers::Offset<Uint16Vector> CreateUint16Vector(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<uint16_t>> values = 0) {
   Uint16VectorBuilder builder_(_fbb);
   builder_.add_values(values);
   return builder_.Finish();
 }
 
-inline flatbuffers::Offset<Uint16Vector> CreateUint16VectorDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<Uint16Vector> CreateUint16VectorDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     const std::vector<uint16_t> *values = nullptr) {
   if (values) { _fbb.ForceVectorAlignment(values->size(), sizeof(uint16_t), 4); }
   auto values__ = values ? _fbb.CreateVector<uint16_t>(*values) : 0;
@@ -4480,61 +5363,61 @@
       values__);
 }
 
-flatbuffers::Offset<Uint16Vector> CreateUint16Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<Uint16Vector> CreateUint16Vector(::flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct Uint8VectorT : public flatbuffers::NativeTable {
+struct Uint8VectorT : public ::flatbuffers::NativeTable {
   typedef Uint8Vector TableType;
   std::vector<uint8_t> values{};
 };
 
-struct Uint8Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct Uint8Vector FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef Uint8VectorT NativeTableType;
   typedef Uint8VectorBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
     VT_VALUES = 4
   };
-  const flatbuffers::Vector<uint8_t> *values() const {
-    return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_VALUES);
+  const ::flatbuffers::Vector<uint8_t> *values() const {
+    return GetPointer<const ::flatbuffers::Vector<uint8_t> *>(VT_VALUES);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyOffset(verifier, VT_VALUES) &&
            verifier.VerifyVector(values()) &&
            verifier.EndTable();
   }
-  Uint8VectorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(Uint8VectorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<Uint8Vector> Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  Uint8VectorT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(Uint8VectorT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<Uint8Vector> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct Uint8VectorBuilder {
   typedef Uint8Vector Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_values(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> values) {
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_values(::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> values) {
     fbb_.AddOffset(Uint8Vector::VT_VALUES, values);
   }
-  explicit Uint8VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit Uint8VectorBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<Uint8Vector> Finish() {
+  ::flatbuffers::Offset<Uint8Vector> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<Uint8Vector>(end);
+    auto o = ::flatbuffers::Offset<Uint8Vector>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<Uint8Vector> CreateUint8Vector(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::Vector<uint8_t>> values = 0) {
+inline ::flatbuffers::Offset<Uint8Vector> CreateUint8Vector(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> values = 0) {
   Uint8VectorBuilder builder_(_fbb);
   builder_.add_values(values);
   return builder_.Finish();
 }
 
-inline flatbuffers::Offset<Uint8Vector> CreateUint8VectorDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<Uint8Vector> CreateUint8VectorDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     const std::vector<uint8_t> *values = nullptr) {
   if (values) { _fbb.ForceVectorAlignment(values->size(), sizeof(uint8_t), 4); }
   auto values__ = values ? _fbb.CreateVector<uint8_t>(*values) : 0;
@@ -4543,9 +5426,9 @@
       values__);
 }
 
-flatbuffers::Offset<Uint8Vector> CreateUint8Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<Uint8Vector> CreateUint8Vector(::flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct DimensionMetadataT : public flatbuffers::NativeTable {
+struct DimensionMetadataT : public ::flatbuffers::NativeTable {
   typedef DimensionMetadata TableType;
   tflite::DimensionType format = tflite::DimensionType_DENSE;
   int32_t dense_size = 0;
@@ -4553,7 +5436,7 @@
   tflite::SparseIndexVectorUnion array_indices{};
 };
 
-struct DimensionMetadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct DimensionMetadata FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef DimensionMetadataT NativeTableType;
   typedef DimensionMetadataBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -4602,7 +5485,7 @@
   const tflite::Uint8Vector *array_indices_as_Uint8Vector() const {
     return array_indices_type() == tflite::SparseIndexVector_Uint8Vector ? static_cast<const tflite::Uint8Vector *>(array_indices()) : nullptr;
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_FORMAT, 1) &&
            VerifyField<int32_t>(verifier, VT_DENSE_SIZE, 4) &&
@@ -4614,9 +5497,9 @@
            VerifySparseIndexVector(verifier, array_indices(), array_indices_type()) &&
            verifier.EndTable();
   }
-  DimensionMetadataT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(DimensionMetadataT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<DimensionMetadata> Pack(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  DimensionMetadataT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(DimensionMetadataT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<DimensionMetadata> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 template<> inline const tflite::Int32Vector *DimensionMetadata::array_segments_as<tflite::Int32Vector>() const {
@@ -4645,8 +5528,8 @@
 
 struct DimensionMetadataBuilder {
   typedef DimensionMetadata Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_format(tflite::DimensionType format) {
     fbb_.AddElement<int8_t>(DimensionMetadata::VT_FORMAT, static_cast<int8_t>(format), 0);
   }
@@ -4656,34 +5539,34 @@
   void add_array_segments_type(tflite::SparseIndexVector array_segments_type) {
     fbb_.AddElement<uint8_t>(DimensionMetadata::VT_ARRAY_SEGMENTS_TYPE, static_cast<uint8_t>(array_segments_type), 0);
   }
-  void add_array_segments(flatbuffers::Offset<void> array_segments) {
+  void add_array_segments(::flatbuffers::Offset<void> array_segments) {
     fbb_.AddOffset(DimensionMetadata::VT_ARRAY_SEGMENTS, array_segments);
   }
   void add_array_indices_type(tflite::SparseIndexVector array_indices_type) {
     fbb_.AddElement<uint8_t>(DimensionMetadata::VT_ARRAY_INDICES_TYPE, static_cast<uint8_t>(array_indices_type), 0);
   }
-  void add_array_indices(flatbuffers::Offset<void> array_indices) {
+  void add_array_indices(::flatbuffers::Offset<void> array_indices) {
     fbb_.AddOffset(DimensionMetadata::VT_ARRAY_INDICES, array_indices);
   }
-  explicit DimensionMetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit DimensionMetadataBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<DimensionMetadata> Finish() {
+  ::flatbuffers::Offset<DimensionMetadata> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<DimensionMetadata>(end);
+    auto o = ::flatbuffers::Offset<DimensionMetadata>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<DimensionMetadata> CreateDimensionMetadata(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<DimensionMetadata> CreateDimensionMetadata(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::DimensionType format = tflite::DimensionType_DENSE,
     int32_t dense_size = 0,
     tflite::SparseIndexVector array_segments_type = tflite::SparseIndexVector_NONE,
-    flatbuffers::Offset<void> array_segments = 0,
+    ::flatbuffers::Offset<void> array_segments = 0,
     tflite::SparseIndexVector array_indices_type = tflite::SparseIndexVector_NONE,
-    flatbuffers::Offset<void> array_indices = 0) {
+    ::flatbuffers::Offset<void> array_indices = 0) {
   DimensionMetadataBuilder builder_(_fbb);
   builder_.add_array_indices(array_indices);
   builder_.add_array_segments(array_segments);
@@ -4694,9 +5577,9 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<DimensionMetadata> CreateDimensionMetadata(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<DimensionMetadata> CreateDimensionMetadata(::flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct SparsityParametersT : public flatbuffers::NativeTable {
+struct SparsityParametersT : public ::flatbuffers::NativeTable {
   typedef SparsityParameters TableType;
   std::vector<int32_t> traversal_order{};
   std::vector<int32_t> block_map{};
@@ -4707,7 +5590,7 @@
   SparsityParametersT &operator=(SparsityParametersT o) FLATBUFFERS_NOEXCEPT;
 };
 
-struct SparsityParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct SparsityParameters FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef SparsityParametersT NativeTableType;
   typedef SparsityParametersBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -4715,16 +5598,16 @@
     VT_BLOCK_MAP = 6,
     VT_DIM_METADATA = 8
   };
-  const flatbuffers::Vector<int32_t> *traversal_order() const {
-    return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_TRAVERSAL_ORDER);
+  const ::flatbuffers::Vector<int32_t> *traversal_order() const {
+    return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_TRAVERSAL_ORDER);
   }
-  const flatbuffers::Vector<int32_t> *block_map() const {
-    return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_BLOCK_MAP);
+  const ::flatbuffers::Vector<int32_t> *block_map() const {
+    return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_BLOCK_MAP);
   }
-  const flatbuffers::Vector<flatbuffers::Offset<tflite::DimensionMetadata>> *dim_metadata() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::DimensionMetadata>> *>(VT_DIM_METADATA);
+  const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::DimensionMetadata>> *dim_metadata() const {
+    return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::DimensionMetadata>> *>(VT_DIM_METADATA);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyOffset(verifier, VT_TRAVERSAL_ORDER) &&
            verifier.VerifyVector(traversal_order()) &&
@@ -4735,40 +5618,40 @@
            verifier.VerifyVectorOfTables(dim_metadata()) &&
            verifier.EndTable();
   }
-  SparsityParametersT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(SparsityParametersT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<SparsityParameters> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  SparsityParametersT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(SparsityParametersT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<SparsityParameters> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct SparsityParametersBuilder {
   typedef SparsityParameters Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_traversal_order(flatbuffers::Offset<flatbuffers::Vector<int32_t>> traversal_order) {
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_traversal_order(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> traversal_order) {
     fbb_.AddOffset(SparsityParameters::VT_TRAVERSAL_ORDER, traversal_order);
   }
-  void add_block_map(flatbuffers::Offset<flatbuffers::Vector<int32_t>> block_map) {
+  void add_block_map(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> block_map) {
     fbb_.AddOffset(SparsityParameters::VT_BLOCK_MAP, block_map);
   }
-  void add_dim_metadata(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::DimensionMetadata>>> dim_metadata) {
+  void add_dim_metadata(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::DimensionMetadata>>> dim_metadata) {
     fbb_.AddOffset(SparsityParameters::VT_DIM_METADATA, dim_metadata);
   }
-  explicit SparsityParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit SparsityParametersBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<SparsityParameters> Finish() {
+  ::flatbuffers::Offset<SparsityParameters> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<SparsityParameters>(end);
+    auto o = ::flatbuffers::Offset<SparsityParameters>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<SparsityParameters> CreateSparsityParameters(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::Vector<int32_t>> traversal_order = 0,
-    flatbuffers::Offset<flatbuffers::Vector<int32_t>> block_map = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::DimensionMetadata>>> dim_metadata = 0) {
+inline ::flatbuffers::Offset<SparsityParameters> CreateSparsityParameters(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> traversal_order = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> block_map = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::DimensionMetadata>>> dim_metadata = 0) {
   SparsityParametersBuilder builder_(_fbb);
   builder_.add_dim_metadata(dim_metadata);
   builder_.add_block_map(block_map);
@@ -4776,14 +5659,14 @@
   return builder_.Finish();
 }
 
-inline flatbuffers::Offset<SparsityParameters> CreateSparsityParametersDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<SparsityParameters> CreateSparsityParametersDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     const std::vector<int32_t> *traversal_order = nullptr,
     const std::vector<int32_t> *block_map = nullptr,
-    const std::vector<flatbuffers::Offset<tflite::DimensionMetadata>> *dim_metadata = nullptr) {
+    const std::vector<::flatbuffers::Offset<tflite::DimensionMetadata>> *dim_metadata = nullptr) {
   auto traversal_order__ = traversal_order ? _fbb.CreateVector<int32_t>(*traversal_order) : 0;
   auto block_map__ = block_map ? _fbb.CreateVector<int32_t>(*block_map) : 0;
-  auto dim_metadata__ = dim_metadata ? _fbb.CreateVector<flatbuffers::Offset<tflite::DimensionMetadata>>(*dim_metadata) : 0;
+  auto dim_metadata__ = dim_metadata ? _fbb.CreateVector<::flatbuffers::Offset<tflite::DimensionMetadata>>(*dim_metadata) : 0;
   return tflite::CreateSparsityParameters(
       _fbb,
       traversal_order__,
@@ -4791,16 +5674,16 @@
       dim_metadata__);
 }
 
-flatbuffers::Offset<SparsityParameters> CreateSparsityParameters(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<SparsityParameters> CreateSparsityParameters(::flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct VariantSubTypeT : public flatbuffers::NativeTable {
+struct VariantSubTypeT : public ::flatbuffers::NativeTable {
   typedef VariantSubType TableType;
   std::vector<int32_t> shape{};
   tflite::TensorType type = tflite::TensorType_FLOAT32;
   bool has_rank = false;
 };
 
-struct VariantSubType FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct VariantSubType FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef VariantSubTypeT NativeTableType;
   typedef VariantSubTypeBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -4808,8 +5691,8 @@
     VT_TYPE = 6,
     VT_HAS_RANK = 8
   };
-  const flatbuffers::Vector<int32_t> *shape() const {
-    return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SHAPE);
+  const ::flatbuffers::Vector<int32_t> *shape() const {
+    return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_SHAPE);
   }
   tflite::TensorType type() const {
     return static_cast<tflite::TensorType>(GetField<int8_t>(VT_TYPE, 0));
@@ -4817,7 +5700,7 @@
   bool has_rank() const {
     return GetField<uint8_t>(VT_HAS_RANK, 0) != 0;
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyOffset(verifier, VT_SHAPE) &&
            verifier.VerifyVector(shape()) &&
@@ -4825,16 +5708,16 @@
            VerifyField<uint8_t>(verifier, VT_HAS_RANK, 1) &&
            verifier.EndTable();
   }
-  VariantSubTypeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(VariantSubTypeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<VariantSubType> Pack(flatbuffers::FlatBufferBuilder &_fbb, const VariantSubTypeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  VariantSubTypeT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(VariantSubTypeT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<VariantSubType> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const VariantSubTypeT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct VariantSubTypeBuilder {
   typedef VariantSubType Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape) {
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_shape(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> shape) {
     fbb_.AddOffset(VariantSubType::VT_SHAPE, shape);
   }
   void add_type(tflite::TensorType type) {
@@ -4843,20 +5726,20 @@
   void add_has_rank(bool has_rank) {
     fbb_.AddElement<uint8_t>(VariantSubType::VT_HAS_RANK, static_cast<uint8_t>(has_rank), 0);
   }
-  explicit VariantSubTypeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit VariantSubTypeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<VariantSubType> Finish() {
+  ::flatbuffers::Offset<VariantSubType> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<VariantSubType>(end);
+    auto o = ::flatbuffers::Offset<VariantSubType>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<VariantSubType> CreateVariantSubType(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape = 0,
+inline ::flatbuffers::Offset<VariantSubType> CreateVariantSubType(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> shape = 0,
     tflite::TensorType type = tflite::TensorType_FLOAT32,
     bool has_rank = false) {
   VariantSubTypeBuilder builder_(_fbb);
@@ -4866,8 +5749,8 @@
   return builder_.Finish();
 }
 
-inline flatbuffers::Offset<VariantSubType> CreateVariantSubTypeDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<VariantSubType> CreateVariantSubTypeDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     const std::vector<int32_t> *shape = nullptr,
     tflite::TensorType type = tflite::TensorType_FLOAT32,
     bool has_rank = false) {
@@ -4879,9 +5762,9 @@
       has_rank);
 }
 
-flatbuffers::Offset<VariantSubType> CreateVariantSubType(flatbuffers::FlatBufferBuilder &_fbb, const VariantSubTypeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<VariantSubType> CreateVariantSubType(::flatbuffers::FlatBufferBuilder &_fbb, const VariantSubTypeT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct TensorT : public flatbuffers::NativeTable {
+struct TensorT : public ::flatbuffers::NativeTable {
   typedef Tensor TableType;
   std::vector<int32_t> shape{};
   tflite::TensorType type = tflite::TensorType_FLOAT32;
@@ -4899,7 +5782,7 @@
   TensorT &operator=(TensorT o) FLATBUFFERS_NOEXCEPT;
 };
 
-struct Tensor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct Tensor FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef TensorT NativeTableType;
   typedef TensorBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -4914,8 +5797,8 @@
     VT_HAS_RANK = 20,
     VT_VARIANT_TENSORS = 22
   };
-  const flatbuffers::Vector<int32_t> *shape() const {
-    return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SHAPE);
+  const ::flatbuffers::Vector<int32_t> *shape() const {
+    return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_SHAPE);
   }
   tflite::TensorType type() const {
     return static_cast<tflite::TensorType>(GetField<int8_t>(VT_TYPE, 0));
@@ -4923,8 +5806,8 @@
   uint32_t buffer() const {
     return GetField<uint32_t>(VT_BUFFER, 0);
   }
-  const flatbuffers::String *name() const {
-    return GetPointer<const flatbuffers::String *>(VT_NAME);
+  const ::flatbuffers::String *name() const {
+    return GetPointer<const ::flatbuffers::String *>(VT_NAME);
   }
   const tflite::QuantizationParameters *quantization() const {
     return GetPointer<const tflite::QuantizationParameters *>(VT_QUANTIZATION);
@@ -4935,16 +5818,16 @@
   const tflite::SparsityParameters *sparsity() const {
     return GetPointer<const tflite::SparsityParameters *>(VT_SPARSITY);
   }
-  const flatbuffers::Vector<int32_t> *shape_signature() const {
-    return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SHAPE_SIGNATURE);
+  const ::flatbuffers::Vector<int32_t> *shape_signature() const {
+    return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_SHAPE_SIGNATURE);
   }
   bool has_rank() const {
     return GetField<uint8_t>(VT_HAS_RANK, 0) != 0;
   }
-  const flatbuffers::Vector<flatbuffers::Offset<tflite::VariantSubType>> *variant_tensors() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::VariantSubType>> *>(VT_VARIANT_TENSORS);
+  const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::VariantSubType>> *variant_tensors() const {
+    return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::VariantSubType>> *>(VT_VARIANT_TENSORS);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyOffset(verifier, VT_SHAPE) &&
            verifier.VerifyVector(shape()) &&
@@ -4965,16 +5848,16 @@
            verifier.VerifyVectorOfTables(variant_tensors()) &&
            verifier.EndTable();
   }
-  TensorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(TensorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<Tensor> Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  TensorT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(TensorT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<Tensor> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TensorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct TensorBuilder {
   typedef Tensor Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape) {
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_shape(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> shape) {
     fbb_.AddOffset(Tensor::VT_SHAPE, shape);
   }
   void add_type(tflite::TensorType type) {
@@ -4983,50 +5866,50 @@
   void add_buffer(uint32_t buffer) {
     fbb_.AddElement<uint32_t>(Tensor::VT_BUFFER, buffer, 0);
   }
-  void add_name(flatbuffers::Offset<flatbuffers::String> name) {
+  void add_name(::flatbuffers::Offset<::flatbuffers::String> name) {
     fbb_.AddOffset(Tensor::VT_NAME, name);
   }
-  void add_quantization(flatbuffers::Offset<tflite::QuantizationParameters> quantization) {
+  void add_quantization(::flatbuffers::Offset<tflite::QuantizationParameters> quantization) {
     fbb_.AddOffset(Tensor::VT_QUANTIZATION, quantization);
   }
   void add_is_variable(bool is_variable) {
     fbb_.AddElement<uint8_t>(Tensor::VT_IS_VARIABLE, static_cast<uint8_t>(is_variable), 0);
   }
-  void add_sparsity(flatbuffers::Offset<tflite::SparsityParameters> sparsity) {
+  void add_sparsity(::flatbuffers::Offset<tflite::SparsityParameters> sparsity) {
     fbb_.AddOffset(Tensor::VT_SPARSITY, sparsity);
   }
-  void add_shape_signature(flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape_signature) {
+  void add_shape_signature(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> shape_signature) {
     fbb_.AddOffset(Tensor::VT_SHAPE_SIGNATURE, shape_signature);
   }
   void add_has_rank(bool has_rank) {
     fbb_.AddElement<uint8_t>(Tensor::VT_HAS_RANK, static_cast<uint8_t>(has_rank), 0);
   }
-  void add_variant_tensors(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::VariantSubType>>> variant_tensors) {
+  void add_variant_tensors(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::VariantSubType>>> variant_tensors) {
     fbb_.AddOffset(Tensor::VT_VARIANT_TENSORS, variant_tensors);
   }
-  explicit TensorBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit TensorBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<Tensor> Finish() {
+  ::flatbuffers::Offset<Tensor> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<Tensor>(end);
+    auto o = ::flatbuffers::Offset<Tensor>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<Tensor> CreateTensor(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape = 0,
+inline ::flatbuffers::Offset<Tensor> CreateTensor(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> shape = 0,
     tflite::TensorType type = tflite::TensorType_FLOAT32,
     uint32_t buffer = 0,
-    flatbuffers::Offset<flatbuffers::String> name = 0,
-    flatbuffers::Offset<tflite::QuantizationParameters> quantization = 0,
+    ::flatbuffers::Offset<::flatbuffers::String> name = 0,
+    ::flatbuffers::Offset<tflite::QuantizationParameters> quantization = 0,
     bool is_variable = false,
-    flatbuffers::Offset<tflite::SparsityParameters> sparsity = 0,
-    flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape_signature = 0,
+    ::flatbuffers::Offset<tflite::SparsityParameters> sparsity = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> shape_signature = 0,
     bool has_rank = false,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::VariantSubType>>> variant_tensors = 0) {
+    ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::VariantSubType>>> variant_tensors = 0) {
   TensorBuilder builder_(_fbb);
   builder_.add_variant_tensors(variant_tensors);
   builder_.add_shape_signature(shape_signature);
@@ -5041,22 +5924,22 @@
   return builder_.Finish();
 }
 
-inline flatbuffers::Offset<Tensor> CreateTensorDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<Tensor> CreateTensorDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     const std::vector<int32_t> *shape = nullptr,
     tflite::TensorType type = tflite::TensorType_FLOAT32,
     uint32_t buffer = 0,
     const char *name = nullptr,
-    flatbuffers::Offset<tflite::QuantizationParameters> quantization = 0,
+    ::flatbuffers::Offset<tflite::QuantizationParameters> quantization = 0,
     bool is_variable = false,
-    flatbuffers::Offset<tflite::SparsityParameters> sparsity = 0,
+    ::flatbuffers::Offset<tflite::SparsityParameters> sparsity = 0,
     const std::vector<int32_t> *shape_signature = nullptr,
     bool has_rank = false,
-    const std::vector<flatbuffers::Offset<tflite::VariantSubType>> *variant_tensors = nullptr) {
+    const std::vector<::flatbuffers::Offset<tflite::VariantSubType>> *variant_tensors = nullptr) {
   auto shape__ = shape ? _fbb.CreateVector<int32_t>(*shape) : 0;
   auto name__ = name ? _fbb.CreateString(name) : 0;
   auto shape_signature__ = shape_signature ? _fbb.CreateVector<int32_t>(*shape_signature) : 0;
-  auto variant_tensors__ = variant_tensors ? _fbb.CreateVector<flatbuffers::Offset<tflite::VariantSubType>>(*variant_tensors) : 0;
+  auto variant_tensors__ = variant_tensors ? _fbb.CreateVector<::flatbuffers::Offset<tflite::VariantSubType>>(*variant_tensors) : 0;
   return tflite::CreateTensor(
       _fbb,
       shape__,
@@ -5071,9 +5954,1763 @@
       variant_tensors__);
 }
 
-flatbuffers::Offset<Tensor> CreateTensor(flatbuffers::FlatBufferBuilder &_fbb, const TensorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<Tensor> CreateTensor(::flatbuffers::FlatBufferBuilder &_fbb, const TensorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct Conv2DOptionsT : public flatbuffers::NativeTable {
+struct StablehloGatherOptionsT : public ::flatbuffers::NativeTable {
+  typedef StablehloGatherOptions TableType;
+  std::vector<int64_t> offset_dims{};
+  std::vector<int64_t> collapsed_slice_dims{};
+  std::vector<int64_t> start_index_map{};
+  int64_t index_vector_dim = 0;
+  std::vector<int64_t> slice_sizes{};
+  bool indices_are_sorted = false;
+};
+
+struct StablehloGatherOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef StablehloGatherOptionsT NativeTableType;
+  typedef StablehloGatherOptionsBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_OFFSET_DIMS = 4,
+    VT_COLLAPSED_SLICE_DIMS = 6,
+    VT_START_INDEX_MAP = 8,
+    VT_INDEX_VECTOR_DIM = 10,
+    VT_SLICE_SIZES = 12,
+    VT_INDICES_ARE_SORTED = 14
+  };
+  const ::flatbuffers::Vector<int64_t> *offset_dims() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_OFFSET_DIMS);
+  }
+  const ::flatbuffers::Vector<int64_t> *collapsed_slice_dims() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_COLLAPSED_SLICE_DIMS);
+  }
+  const ::flatbuffers::Vector<int64_t> *start_index_map() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_START_INDEX_MAP);
+  }
+  int64_t index_vector_dim() const {
+    return GetField<int64_t>(VT_INDEX_VECTOR_DIM, 0);
+  }
+  const ::flatbuffers::Vector<int64_t> *slice_sizes() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_SLICE_SIZES);
+  }
+  bool indices_are_sorted() const {
+    return GetField<uint8_t>(VT_INDICES_ARE_SORTED, 0) != 0;
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyOffset(verifier, VT_OFFSET_DIMS) &&
+           verifier.VerifyVector(offset_dims()) &&
+           VerifyOffset(verifier, VT_COLLAPSED_SLICE_DIMS) &&
+           verifier.VerifyVector(collapsed_slice_dims()) &&
+           VerifyOffset(verifier, VT_START_INDEX_MAP) &&
+           verifier.VerifyVector(start_index_map()) &&
+           VerifyField<int64_t>(verifier, VT_INDEX_VECTOR_DIM, 8) &&
+           VerifyOffset(verifier, VT_SLICE_SIZES) &&
+           verifier.VerifyVector(slice_sizes()) &&
+           VerifyField<uint8_t>(verifier, VT_INDICES_ARE_SORTED, 1) &&
+           verifier.EndTable();
+  }
+  StablehloGatherOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(StablehloGatherOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<StablehloGatherOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloGatherOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StablehloGatherOptionsBuilder {
+  typedef StablehloGatherOptions Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_offset_dims(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> offset_dims) {
+    fbb_.AddOffset(StablehloGatherOptions::VT_OFFSET_DIMS, offset_dims);
+  }
+  void add_collapsed_slice_dims(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> collapsed_slice_dims) {
+    fbb_.AddOffset(StablehloGatherOptions::VT_COLLAPSED_SLICE_DIMS, collapsed_slice_dims);
+  }
+  void add_start_index_map(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> start_index_map) {
+    fbb_.AddOffset(StablehloGatherOptions::VT_START_INDEX_MAP, start_index_map);
+  }
+  void add_index_vector_dim(int64_t index_vector_dim) {
+    fbb_.AddElement<int64_t>(StablehloGatherOptions::VT_INDEX_VECTOR_DIM, index_vector_dim, 0);
+  }
+  void add_slice_sizes(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> slice_sizes) {
+    fbb_.AddOffset(StablehloGatherOptions::VT_SLICE_SIZES, slice_sizes);
+  }
+  void add_indices_are_sorted(bool indices_are_sorted) {
+    fbb_.AddElement<uint8_t>(StablehloGatherOptions::VT_INDICES_ARE_SORTED, static_cast<uint8_t>(indices_are_sorted), 0);
+  }
+  explicit StablehloGatherOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<StablehloGatherOptions> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<StablehloGatherOptions>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<StablehloGatherOptions> CreateStablehloGatherOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> offset_dims = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> collapsed_slice_dims = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> start_index_map = 0,
+    int64_t index_vector_dim = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> slice_sizes = 0,
+    bool indices_are_sorted = false) {
+  StablehloGatherOptionsBuilder builder_(_fbb);
+  builder_.add_index_vector_dim(index_vector_dim);
+  builder_.add_slice_sizes(slice_sizes);
+  builder_.add_start_index_map(start_index_map);
+  builder_.add_collapsed_slice_dims(collapsed_slice_dims);
+  builder_.add_offset_dims(offset_dims);
+  builder_.add_indices_are_sorted(indices_are_sorted);
+  return builder_.Finish();
+}
+
+inline ::flatbuffers::Offset<StablehloGatherOptions> CreateStablehloGatherOptionsDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    const std::vector<int64_t> *offset_dims = nullptr,
+    const std::vector<int64_t> *collapsed_slice_dims = nullptr,
+    const std::vector<int64_t> *start_index_map = nullptr,
+    int64_t index_vector_dim = 0,
+    const std::vector<int64_t> *slice_sizes = nullptr,
+    bool indices_are_sorted = false) {
+  auto offset_dims__ = offset_dims ? _fbb.CreateVector<int64_t>(*offset_dims) : 0;
+  auto collapsed_slice_dims__ = collapsed_slice_dims ? _fbb.CreateVector<int64_t>(*collapsed_slice_dims) : 0;
+  auto start_index_map__ = start_index_map ? _fbb.CreateVector<int64_t>(*start_index_map) : 0;
+  auto slice_sizes__ = slice_sizes ? _fbb.CreateVector<int64_t>(*slice_sizes) : 0;
+  return tflite::CreateStablehloGatherOptions(
+      _fbb,
+      offset_dims__,
+      collapsed_slice_dims__,
+      start_index_map__,
+      index_vector_dim,
+      slice_sizes__,
+      indices_are_sorted);
+}
+
+::flatbuffers::Offset<StablehloGatherOptions> CreateStablehloGatherOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloGatherOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct StablehloTransposeOptionsT : public ::flatbuffers::NativeTable {
+  typedef StablehloTransposeOptions TableType;
+  std::vector<int64_t> permutation{};
+};
+
+struct StablehloTransposeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef StablehloTransposeOptionsT NativeTableType;
+  typedef StablehloTransposeOptionsBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_PERMUTATION = 4
+  };
+  const ::flatbuffers::Vector<int64_t> *permutation() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_PERMUTATION);
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyOffset(verifier, VT_PERMUTATION) &&
+           verifier.VerifyVector(permutation()) &&
+           verifier.EndTable();
+  }
+  StablehloTransposeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(StablehloTransposeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<StablehloTransposeOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloTransposeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StablehloTransposeOptionsBuilder {
+  typedef StablehloTransposeOptions Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_permutation(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> permutation) {
+    fbb_.AddOffset(StablehloTransposeOptions::VT_PERMUTATION, permutation);
+  }
+  explicit StablehloTransposeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<StablehloTransposeOptions> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<StablehloTransposeOptions>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<StablehloTransposeOptions> CreateStablehloTransposeOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> permutation = 0) {
+  StablehloTransposeOptionsBuilder builder_(_fbb);
+  builder_.add_permutation(permutation);
+  return builder_.Finish();
+}
+
+inline ::flatbuffers::Offset<StablehloTransposeOptions> CreateStablehloTransposeOptionsDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    const std::vector<int64_t> *permutation = nullptr) {
+  auto permutation__ = permutation ? _fbb.CreateVector<int64_t>(*permutation) : 0;
+  return tflite::CreateStablehloTransposeOptions(
+      _fbb,
+      permutation__);
+}
+
+::flatbuffers::Offset<StablehloTransposeOptions> CreateStablehloTransposeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloTransposeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct StablehloDotGeneralOptionsT : public ::flatbuffers::NativeTable {
+  typedef StablehloDotGeneralOptions TableType;
+  std::vector<int64_t> lhs_batching_dimensions{};
+  std::vector<int64_t> rhs_batching_dimensions{};
+  std::vector<int64_t> lhs_contracting_dimensions{};
+  std::vector<int64_t> rhs_contracting_dimensions{};
+  std::vector<tflite::StablehloPrecisionConfig> precision_config{};
+};
+
+struct StablehloDotGeneralOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef StablehloDotGeneralOptionsT NativeTableType;
+  typedef StablehloDotGeneralOptionsBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_LHS_BATCHING_DIMENSIONS = 4,
+    VT_RHS_BATCHING_DIMENSIONS = 6,
+    VT_LHS_CONTRACTING_DIMENSIONS = 8,
+    VT_RHS_CONTRACTING_DIMENSIONS = 10,
+    VT_PRECISION_CONFIG = 12
+  };
+  const ::flatbuffers::Vector<int64_t> *lhs_batching_dimensions() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_LHS_BATCHING_DIMENSIONS);
+  }
+  const ::flatbuffers::Vector<int64_t> *rhs_batching_dimensions() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_RHS_BATCHING_DIMENSIONS);
+  }
+  const ::flatbuffers::Vector<int64_t> *lhs_contracting_dimensions() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_LHS_CONTRACTING_DIMENSIONS);
+  }
+  const ::flatbuffers::Vector<int64_t> *rhs_contracting_dimensions() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_RHS_CONTRACTING_DIMENSIONS);
+  }
+  const ::flatbuffers::Vector<uint32_t> *precision_config() const {
+    return GetPointer<const ::flatbuffers::Vector<uint32_t> *>(VT_PRECISION_CONFIG);
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyOffset(verifier, VT_LHS_BATCHING_DIMENSIONS) &&
+           verifier.VerifyVector(lhs_batching_dimensions()) &&
+           VerifyOffset(verifier, VT_RHS_BATCHING_DIMENSIONS) &&
+           verifier.VerifyVector(rhs_batching_dimensions()) &&
+           VerifyOffset(verifier, VT_LHS_CONTRACTING_DIMENSIONS) &&
+           verifier.VerifyVector(lhs_contracting_dimensions()) &&
+           VerifyOffset(verifier, VT_RHS_CONTRACTING_DIMENSIONS) &&
+           verifier.VerifyVector(rhs_contracting_dimensions()) &&
+           VerifyOffset(verifier, VT_PRECISION_CONFIG) &&
+           verifier.VerifyVector(precision_config()) &&
+           verifier.EndTable();
+  }
+  StablehloDotGeneralOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(StablehloDotGeneralOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<StablehloDotGeneralOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloDotGeneralOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StablehloDotGeneralOptionsBuilder {
+  typedef StablehloDotGeneralOptions Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_lhs_batching_dimensions(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> lhs_batching_dimensions) {
+    fbb_.AddOffset(StablehloDotGeneralOptions::VT_LHS_BATCHING_DIMENSIONS, lhs_batching_dimensions);
+  }
+  void add_rhs_batching_dimensions(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> rhs_batching_dimensions) {
+    fbb_.AddOffset(StablehloDotGeneralOptions::VT_RHS_BATCHING_DIMENSIONS, rhs_batching_dimensions);
+  }
+  void add_lhs_contracting_dimensions(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> lhs_contracting_dimensions) {
+    fbb_.AddOffset(StablehloDotGeneralOptions::VT_LHS_CONTRACTING_DIMENSIONS, lhs_contracting_dimensions);
+  }
+  void add_rhs_contracting_dimensions(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> rhs_contracting_dimensions) {
+    fbb_.AddOffset(StablehloDotGeneralOptions::VT_RHS_CONTRACTING_DIMENSIONS, rhs_contracting_dimensions);
+  }
+  void add_precision_config(::flatbuffers::Offset<::flatbuffers::Vector<uint32_t>> precision_config) {
+    fbb_.AddOffset(StablehloDotGeneralOptions::VT_PRECISION_CONFIG, precision_config);
+  }
+  explicit StablehloDotGeneralOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<StablehloDotGeneralOptions> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<StablehloDotGeneralOptions>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<StablehloDotGeneralOptions> CreateStablehloDotGeneralOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> lhs_batching_dimensions = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> rhs_batching_dimensions = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> lhs_contracting_dimensions = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> rhs_contracting_dimensions = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<uint32_t>> precision_config = 0) {
+  StablehloDotGeneralOptionsBuilder builder_(_fbb);
+  builder_.add_precision_config(precision_config);
+  builder_.add_rhs_contracting_dimensions(rhs_contracting_dimensions);
+  builder_.add_lhs_contracting_dimensions(lhs_contracting_dimensions);
+  builder_.add_rhs_batching_dimensions(rhs_batching_dimensions);
+  builder_.add_lhs_batching_dimensions(lhs_batching_dimensions);
+  return builder_.Finish();
+}
+
+inline ::flatbuffers::Offset<StablehloDotGeneralOptions> CreateStablehloDotGeneralOptionsDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    const std::vector<int64_t> *lhs_batching_dimensions = nullptr,
+    const std::vector<int64_t> *rhs_batching_dimensions = nullptr,
+    const std::vector<int64_t> *lhs_contracting_dimensions = nullptr,
+    const std::vector<int64_t> *rhs_contracting_dimensions = nullptr,
+    const std::vector<uint32_t> *precision_config = nullptr) {
+  auto lhs_batching_dimensions__ = lhs_batching_dimensions ? _fbb.CreateVector<int64_t>(*lhs_batching_dimensions) : 0;
+  auto rhs_batching_dimensions__ = rhs_batching_dimensions ? _fbb.CreateVector<int64_t>(*rhs_batching_dimensions) : 0;
+  auto lhs_contracting_dimensions__ = lhs_contracting_dimensions ? _fbb.CreateVector<int64_t>(*lhs_contracting_dimensions) : 0;
+  auto rhs_contracting_dimensions__ = rhs_contracting_dimensions ? _fbb.CreateVector<int64_t>(*rhs_contracting_dimensions) : 0;
+  auto precision_config__ = precision_config ? _fbb.CreateVector<uint32_t>(*precision_config) : 0;
+  return tflite::CreateStablehloDotGeneralOptions(
+      _fbb,
+      lhs_batching_dimensions__,
+      rhs_batching_dimensions__,
+      lhs_contracting_dimensions__,
+      rhs_contracting_dimensions__,
+      precision_config__);
+}
+
+::flatbuffers::Offset<StablehloDotGeneralOptions> CreateStablehloDotGeneralOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloDotGeneralOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct StablehloReduceWindowOptionsT : public ::flatbuffers::NativeTable {
+  typedef StablehloReduceWindowOptions TableType;
+  std::vector<int64_t> window_dimensions{};
+  std::vector<int64_t> window_strides{};
+  std::vector<int64_t> base_dilations{};
+  std::vector<int64_t> window_dilations{};
+  std::vector<int64_t> padding{};
+  int32_t body_subgraph_index = 0;
+};
+
+struct StablehloReduceWindowOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef StablehloReduceWindowOptionsT NativeTableType;
+  typedef StablehloReduceWindowOptionsBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_WINDOW_DIMENSIONS = 4,
+    VT_WINDOW_STRIDES = 6,
+    VT_BASE_DILATIONS = 8,
+    VT_WINDOW_DILATIONS = 10,
+    VT_PADDING = 12,
+    VT_BODY_SUBGRAPH_INDEX = 14
+  };
+  const ::flatbuffers::Vector<int64_t> *window_dimensions() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_WINDOW_DIMENSIONS);
+  }
+  const ::flatbuffers::Vector<int64_t> *window_strides() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_WINDOW_STRIDES);
+  }
+  const ::flatbuffers::Vector<int64_t> *base_dilations() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_BASE_DILATIONS);
+  }
+  const ::flatbuffers::Vector<int64_t> *window_dilations() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_WINDOW_DILATIONS);
+  }
+  const ::flatbuffers::Vector<int64_t> *padding() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_PADDING);
+  }
+  int32_t body_subgraph_index() const {
+    return GetField<int32_t>(VT_BODY_SUBGRAPH_INDEX, 0);
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyOffset(verifier, VT_WINDOW_DIMENSIONS) &&
+           verifier.VerifyVector(window_dimensions()) &&
+           VerifyOffset(verifier, VT_WINDOW_STRIDES) &&
+           verifier.VerifyVector(window_strides()) &&
+           VerifyOffset(verifier, VT_BASE_DILATIONS) &&
+           verifier.VerifyVector(base_dilations()) &&
+           VerifyOffset(verifier, VT_WINDOW_DILATIONS) &&
+           verifier.VerifyVector(window_dilations()) &&
+           VerifyOffset(verifier, VT_PADDING) &&
+           verifier.VerifyVector(padding()) &&
+           VerifyField<int32_t>(verifier, VT_BODY_SUBGRAPH_INDEX, 4) &&
+           verifier.EndTable();
+  }
+  StablehloReduceWindowOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(StablehloReduceWindowOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<StablehloReduceWindowOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloReduceWindowOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StablehloReduceWindowOptionsBuilder {
+  typedef StablehloReduceWindowOptions Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_window_dimensions(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> window_dimensions) {
+    fbb_.AddOffset(StablehloReduceWindowOptions::VT_WINDOW_DIMENSIONS, window_dimensions);
+  }
+  void add_window_strides(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> window_strides) {
+    fbb_.AddOffset(StablehloReduceWindowOptions::VT_WINDOW_STRIDES, window_strides);
+  }
+  void add_base_dilations(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> base_dilations) {
+    fbb_.AddOffset(StablehloReduceWindowOptions::VT_BASE_DILATIONS, base_dilations);
+  }
+  void add_window_dilations(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> window_dilations) {
+    fbb_.AddOffset(StablehloReduceWindowOptions::VT_WINDOW_DILATIONS, window_dilations);
+  }
+  void add_padding(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> padding) {
+    fbb_.AddOffset(StablehloReduceWindowOptions::VT_PADDING, padding);
+  }
+  void add_body_subgraph_index(int32_t body_subgraph_index) {
+    fbb_.AddElement<int32_t>(StablehloReduceWindowOptions::VT_BODY_SUBGRAPH_INDEX, body_subgraph_index, 0);
+  }
+  explicit StablehloReduceWindowOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<StablehloReduceWindowOptions> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<StablehloReduceWindowOptions>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<StablehloReduceWindowOptions> CreateStablehloReduceWindowOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> window_dimensions = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> window_strides = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> base_dilations = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> window_dilations = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> padding = 0,
+    int32_t body_subgraph_index = 0) {
+  StablehloReduceWindowOptionsBuilder builder_(_fbb);
+  builder_.add_body_subgraph_index(body_subgraph_index);
+  builder_.add_padding(padding);
+  builder_.add_window_dilations(window_dilations);
+  builder_.add_base_dilations(base_dilations);
+  builder_.add_window_strides(window_strides);
+  builder_.add_window_dimensions(window_dimensions);
+  return builder_.Finish();
+}
+
+inline ::flatbuffers::Offset<StablehloReduceWindowOptions> CreateStablehloReduceWindowOptionsDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    const std::vector<int64_t> *window_dimensions = nullptr,
+    const std::vector<int64_t> *window_strides = nullptr,
+    const std::vector<int64_t> *base_dilations = nullptr,
+    const std::vector<int64_t> *window_dilations = nullptr,
+    const std::vector<int64_t> *padding = nullptr,
+    int32_t body_subgraph_index = 0) {
+  auto window_dimensions__ = window_dimensions ? _fbb.CreateVector<int64_t>(*window_dimensions) : 0;
+  auto window_strides__ = window_strides ? _fbb.CreateVector<int64_t>(*window_strides) : 0;
+  auto base_dilations__ = base_dilations ? _fbb.CreateVector<int64_t>(*base_dilations) : 0;
+  auto window_dilations__ = window_dilations ? _fbb.CreateVector<int64_t>(*window_dilations) : 0;
+  auto padding__ = padding ? _fbb.CreateVector<int64_t>(*padding) : 0;
+  return tflite::CreateStablehloReduceWindowOptions(
+      _fbb,
+      window_dimensions__,
+      window_strides__,
+      base_dilations__,
+      window_dilations__,
+      padding__,
+      body_subgraph_index);
+}
+
+::flatbuffers::Offset<StablehloReduceWindowOptions> CreateStablehloReduceWindowOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloReduceWindowOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct StablehloWhileOptionsT : public ::flatbuffers::NativeTable {
+  typedef StablehloWhileOptions TableType;
+  int32_t cond_subgraph_index = 0;
+  int32_t body_subgraph_index = 0;
+};
+
+struct StablehloWhileOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef StablehloWhileOptionsT NativeTableType;
+  typedef StablehloWhileOptionsBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_COND_SUBGRAPH_INDEX = 4,
+    VT_BODY_SUBGRAPH_INDEX = 6
+  };
+  int32_t cond_subgraph_index() const {
+    return GetField<int32_t>(VT_COND_SUBGRAPH_INDEX, 0);
+  }
+  int32_t body_subgraph_index() const {
+    return GetField<int32_t>(VT_BODY_SUBGRAPH_INDEX, 0);
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyField<int32_t>(verifier, VT_COND_SUBGRAPH_INDEX, 4) &&
+           VerifyField<int32_t>(verifier, VT_BODY_SUBGRAPH_INDEX, 4) &&
+           verifier.EndTable();
+  }
+  StablehloWhileOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(StablehloWhileOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<StablehloWhileOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloWhileOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StablehloWhileOptionsBuilder {
+  typedef StablehloWhileOptions Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_cond_subgraph_index(int32_t cond_subgraph_index) {
+    fbb_.AddElement<int32_t>(StablehloWhileOptions::VT_COND_SUBGRAPH_INDEX, cond_subgraph_index, 0);
+  }
+  void add_body_subgraph_index(int32_t body_subgraph_index) {
+    fbb_.AddElement<int32_t>(StablehloWhileOptions::VT_BODY_SUBGRAPH_INDEX, body_subgraph_index, 0);
+  }
+  explicit StablehloWhileOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<StablehloWhileOptions> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<StablehloWhileOptions>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<StablehloWhileOptions> CreateStablehloWhileOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    int32_t cond_subgraph_index = 0,
+    int32_t body_subgraph_index = 0) {
+  StablehloWhileOptionsBuilder builder_(_fbb);
+  builder_.add_body_subgraph_index(body_subgraph_index);
+  builder_.add_cond_subgraph_index(cond_subgraph_index);
+  return builder_.Finish();
+}
+
+::flatbuffers::Offset<StablehloWhileOptions> CreateStablehloWhileOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloWhileOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct StablehloSortOptionsT : public ::flatbuffers::NativeTable {
+  typedef StablehloSortOptions TableType;
+  int64_t dimension = 0;
+  bool is_stable = false;
+  int32_t comparator_subgraph_index = 0;
+};
+
+struct StablehloSortOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef StablehloSortOptionsT NativeTableType;
+  typedef StablehloSortOptionsBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_DIMENSION = 4,
+    VT_IS_STABLE = 6,
+    VT_COMPARATOR_SUBGRAPH_INDEX = 8
+  };
+  int64_t dimension() const {
+    return GetField<int64_t>(VT_DIMENSION, 0);
+  }
+  bool is_stable() const {
+    return GetField<uint8_t>(VT_IS_STABLE, 0) != 0;
+  }
+  int32_t comparator_subgraph_index() const {
+    return GetField<int32_t>(VT_COMPARATOR_SUBGRAPH_INDEX, 0);
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyField<int64_t>(verifier, VT_DIMENSION, 8) &&
+           VerifyField<uint8_t>(verifier, VT_IS_STABLE, 1) &&
+           VerifyField<int32_t>(verifier, VT_COMPARATOR_SUBGRAPH_INDEX, 4) &&
+           verifier.EndTable();
+  }
+  StablehloSortOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(StablehloSortOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<StablehloSortOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloSortOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StablehloSortOptionsBuilder {
+  typedef StablehloSortOptions Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_dimension(int64_t dimension) {
+    fbb_.AddElement<int64_t>(StablehloSortOptions::VT_DIMENSION, dimension, 0);
+  }
+  void add_is_stable(bool is_stable) {
+    fbb_.AddElement<uint8_t>(StablehloSortOptions::VT_IS_STABLE, static_cast<uint8_t>(is_stable), 0);
+  }
+  void add_comparator_subgraph_index(int32_t comparator_subgraph_index) {
+    fbb_.AddElement<int32_t>(StablehloSortOptions::VT_COMPARATOR_SUBGRAPH_INDEX, comparator_subgraph_index, 0);
+  }
+  explicit StablehloSortOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<StablehloSortOptions> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<StablehloSortOptions>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<StablehloSortOptions> CreateStablehloSortOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    int64_t dimension = 0,
+    bool is_stable = false,
+    int32_t comparator_subgraph_index = 0) {
+  StablehloSortOptionsBuilder builder_(_fbb);
+  builder_.add_dimension(dimension);
+  builder_.add_comparator_subgraph_index(comparator_subgraph_index);
+  builder_.add_is_stable(is_stable);
+  return builder_.Finish();
+}
+
+::flatbuffers::Offset<StablehloSortOptions> CreateStablehloSortOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloSortOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct StablehloConcatenateOptionsT : public ::flatbuffers::NativeTable {
+  typedef StablehloConcatenateOptions TableType;
+  int64_t dimension = 0;
+};
+
+struct StablehloConcatenateOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef StablehloConcatenateOptionsT NativeTableType;
+  typedef StablehloConcatenateOptionsBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_DIMENSION = 4
+  };
+  int64_t dimension() const {
+    return GetField<int64_t>(VT_DIMENSION, 0);
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyField<int64_t>(verifier, VT_DIMENSION, 8) &&
+           verifier.EndTable();
+  }
+  StablehloConcatenateOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(StablehloConcatenateOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<StablehloConcatenateOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloConcatenateOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StablehloConcatenateOptionsBuilder {
+  typedef StablehloConcatenateOptions Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_dimension(int64_t dimension) {
+    fbb_.AddElement<int64_t>(StablehloConcatenateOptions::VT_DIMENSION, dimension, 0);
+  }
+  explicit StablehloConcatenateOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<StablehloConcatenateOptions> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<StablehloConcatenateOptions>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<StablehloConcatenateOptions> CreateStablehloConcatenateOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    int64_t dimension = 0) {
+  StablehloConcatenateOptionsBuilder builder_(_fbb);
+  builder_.add_dimension(dimension);
+  return builder_.Finish();
+}
+
+::flatbuffers::Offset<StablehloConcatenateOptions> CreateStablehloConcatenateOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloConcatenateOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct StablehloBroadcastInDimOptionsT : public ::flatbuffers::NativeTable {
+  typedef StablehloBroadcastInDimOptions TableType;
+  std::vector<int64_t> broadcast_dimensions{};
+};
+
+struct StablehloBroadcastInDimOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef StablehloBroadcastInDimOptionsT NativeTableType;
+  typedef StablehloBroadcastInDimOptionsBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_BROADCAST_DIMENSIONS = 4
+  };
+  const ::flatbuffers::Vector<int64_t> *broadcast_dimensions() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_BROADCAST_DIMENSIONS);
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyOffset(verifier, VT_BROADCAST_DIMENSIONS) &&
+           verifier.VerifyVector(broadcast_dimensions()) &&
+           verifier.EndTable();
+  }
+  StablehloBroadcastInDimOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(StablehloBroadcastInDimOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<StablehloBroadcastInDimOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloBroadcastInDimOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StablehloBroadcastInDimOptionsBuilder {
+  typedef StablehloBroadcastInDimOptions Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_broadcast_dimensions(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> broadcast_dimensions) {
+    fbb_.AddOffset(StablehloBroadcastInDimOptions::VT_BROADCAST_DIMENSIONS, broadcast_dimensions);
+  }
+  explicit StablehloBroadcastInDimOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<StablehloBroadcastInDimOptions> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<StablehloBroadcastInDimOptions>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<StablehloBroadcastInDimOptions> CreateStablehloBroadcastInDimOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> broadcast_dimensions = 0) {
+  StablehloBroadcastInDimOptionsBuilder builder_(_fbb);
+  builder_.add_broadcast_dimensions(broadcast_dimensions);
+  return builder_.Finish();
+}
+
+inline ::flatbuffers::Offset<StablehloBroadcastInDimOptions> CreateStablehloBroadcastInDimOptionsDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    const std::vector<int64_t> *broadcast_dimensions = nullptr) {
+  auto broadcast_dimensions__ = broadcast_dimensions ? _fbb.CreateVector<int64_t>(*broadcast_dimensions) : 0;
+  return tflite::CreateStablehloBroadcastInDimOptions(
+      _fbb,
+      broadcast_dimensions__);
+}
+
+::flatbuffers::Offset<StablehloBroadcastInDimOptions> CreateStablehloBroadcastInDimOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloBroadcastInDimOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct StablehloCompareOptionsT : public ::flatbuffers::NativeTable {
+  typedef StablehloCompareOptions TableType;
+  tflite::StablehloComparisonDirection comparison_direction = tflite::StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_EQ;
+  tflite::StablehloComparisonType compare_type = tflite::StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_NOTYPE;
+};
+
+struct StablehloCompareOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef StablehloCompareOptionsT NativeTableType;
+  typedef StablehloCompareOptionsBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_COMPARISON_DIRECTION = 4,
+    VT_COMPARE_TYPE = 6
+  };
+  tflite::StablehloComparisonDirection comparison_direction() const {
+    return static_cast<tflite::StablehloComparisonDirection>(GetField<uint32_t>(VT_COMPARISON_DIRECTION, 0));
+  }
+  tflite::StablehloComparisonType compare_type() const {
+    return static_cast<tflite::StablehloComparisonType>(GetField<uint32_t>(VT_COMPARE_TYPE, 0));
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyField<uint32_t>(verifier, VT_COMPARISON_DIRECTION, 4) &&
+           VerifyField<uint32_t>(verifier, VT_COMPARE_TYPE, 4) &&
+           verifier.EndTable();
+  }
+  StablehloCompareOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(StablehloCompareOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<StablehloCompareOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloCompareOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StablehloCompareOptionsBuilder {
+  typedef StablehloCompareOptions Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_comparison_direction(tflite::StablehloComparisonDirection comparison_direction) {
+    fbb_.AddElement<uint32_t>(StablehloCompareOptions::VT_COMPARISON_DIRECTION, static_cast<uint32_t>(comparison_direction), 0);
+  }
+  void add_compare_type(tflite::StablehloComparisonType compare_type) {
+    fbb_.AddElement<uint32_t>(StablehloCompareOptions::VT_COMPARE_TYPE, static_cast<uint32_t>(compare_type), 0);
+  }
+  explicit StablehloCompareOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<StablehloCompareOptions> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<StablehloCompareOptions>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<StablehloCompareOptions> CreateStablehloCompareOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    tflite::StablehloComparisonDirection comparison_direction = tflite::StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_EQ,
+    tflite::StablehloComparisonType compare_type = tflite::StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_NOTYPE) {
+  StablehloCompareOptionsBuilder builder_(_fbb);
+  builder_.add_compare_type(compare_type);
+  builder_.add_comparison_direction(comparison_direction);
+  return builder_.Finish();
+}
+
+::flatbuffers::Offset<StablehloCompareOptions> CreateStablehloCompareOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloCompareOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct StablehloDynamicSliceOptionsT : public ::flatbuffers::NativeTable {
+  typedef StablehloDynamicSliceOptions TableType;
+  std::vector<int64_t> slice_sizes{};
+};
+
+struct StablehloDynamicSliceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef StablehloDynamicSliceOptionsT NativeTableType;
+  typedef StablehloDynamicSliceOptionsBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_SLICE_SIZES = 4
+  };
+  const ::flatbuffers::Vector<int64_t> *slice_sizes() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_SLICE_SIZES);
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyOffset(verifier, VT_SLICE_SIZES) &&
+           verifier.VerifyVector(slice_sizes()) &&
+           verifier.EndTable();
+  }
+  StablehloDynamicSliceOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(StablehloDynamicSliceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<StablehloDynamicSliceOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloDynamicSliceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StablehloDynamicSliceOptionsBuilder {
+  typedef StablehloDynamicSliceOptions Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_slice_sizes(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> slice_sizes) {
+    fbb_.AddOffset(StablehloDynamicSliceOptions::VT_SLICE_SIZES, slice_sizes);
+  }
+  explicit StablehloDynamicSliceOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<StablehloDynamicSliceOptions> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<StablehloDynamicSliceOptions>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<StablehloDynamicSliceOptions> CreateStablehloDynamicSliceOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> slice_sizes = 0) {
+  StablehloDynamicSliceOptionsBuilder builder_(_fbb);
+  builder_.add_slice_sizes(slice_sizes);
+  return builder_.Finish();
+}
+
+inline ::flatbuffers::Offset<StablehloDynamicSliceOptions> CreateStablehloDynamicSliceOptionsDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    const std::vector<int64_t> *slice_sizes = nullptr) {
+  auto slice_sizes__ = slice_sizes ? _fbb.CreateVector<int64_t>(*slice_sizes) : 0;
+  return tflite::CreateStablehloDynamicSliceOptions(
+      _fbb,
+      slice_sizes__);
+}
+
+::flatbuffers::Offset<StablehloDynamicSliceOptions> CreateStablehloDynamicSliceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloDynamicSliceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct StablehloPadOptionsT : public ::flatbuffers::NativeTable {
+  typedef StablehloPadOptions TableType;
+  std::vector<int64_t> edge_padding_low{};
+  std::vector<int64_t> edge_padding_high{};
+  std::vector<int64_t> interior_padding{};
+};
+
+struct StablehloPadOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef StablehloPadOptionsT NativeTableType;
+  typedef StablehloPadOptionsBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_EDGE_PADDING_LOW = 4,
+    VT_EDGE_PADDING_HIGH = 6,
+    VT_INTERIOR_PADDING = 8
+  };
+  const ::flatbuffers::Vector<int64_t> *edge_padding_low() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_EDGE_PADDING_LOW);
+  }
+  const ::flatbuffers::Vector<int64_t> *edge_padding_high() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_EDGE_PADDING_HIGH);
+  }
+  const ::flatbuffers::Vector<int64_t> *interior_padding() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_INTERIOR_PADDING);
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyOffset(verifier, VT_EDGE_PADDING_LOW) &&
+           verifier.VerifyVector(edge_padding_low()) &&
+           VerifyOffset(verifier, VT_EDGE_PADDING_HIGH) &&
+           verifier.VerifyVector(edge_padding_high()) &&
+           VerifyOffset(verifier, VT_INTERIOR_PADDING) &&
+           verifier.VerifyVector(interior_padding()) &&
+           verifier.EndTable();
+  }
+  StablehloPadOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(StablehloPadOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<StablehloPadOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloPadOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StablehloPadOptionsBuilder {
+  typedef StablehloPadOptions Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_edge_padding_low(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> edge_padding_low) {
+    fbb_.AddOffset(StablehloPadOptions::VT_EDGE_PADDING_LOW, edge_padding_low);
+  }
+  void add_edge_padding_high(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> edge_padding_high) {
+    fbb_.AddOffset(StablehloPadOptions::VT_EDGE_PADDING_HIGH, edge_padding_high);
+  }
+  void add_interior_padding(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> interior_padding) {
+    fbb_.AddOffset(StablehloPadOptions::VT_INTERIOR_PADDING, interior_padding);
+  }
+  explicit StablehloPadOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<StablehloPadOptions> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<StablehloPadOptions>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<StablehloPadOptions> CreateStablehloPadOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> edge_padding_low = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> edge_padding_high = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> interior_padding = 0) {
+  StablehloPadOptionsBuilder builder_(_fbb);
+  builder_.add_interior_padding(interior_padding);
+  builder_.add_edge_padding_high(edge_padding_high);
+  builder_.add_edge_padding_low(edge_padding_low);
+  return builder_.Finish();
+}
+
+inline ::flatbuffers::Offset<StablehloPadOptions> CreateStablehloPadOptionsDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    const std::vector<int64_t> *edge_padding_low = nullptr,
+    const std::vector<int64_t> *edge_padding_high = nullptr,
+    const std::vector<int64_t> *interior_padding = nullptr) {
+  auto edge_padding_low__ = edge_padding_low ? _fbb.CreateVector<int64_t>(*edge_padding_low) : 0;
+  auto edge_padding_high__ = edge_padding_high ? _fbb.CreateVector<int64_t>(*edge_padding_high) : 0;
+  auto interior_padding__ = interior_padding ? _fbb.CreateVector<int64_t>(*interior_padding) : 0;
+  return tflite::CreateStablehloPadOptions(
+      _fbb,
+      edge_padding_low__,
+      edge_padding_high__,
+      interior_padding__);
+}
+
+::flatbuffers::Offset<StablehloPadOptions> CreateStablehloPadOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloPadOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct StablehloIotaOptionsT : public ::flatbuffers::NativeTable {
+  typedef StablehloIotaOptions TableType;
+  int64_t iota_dimension = 0;
+};
+
+struct StablehloIotaOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef StablehloIotaOptionsT NativeTableType;
+  typedef StablehloIotaOptionsBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_IOTA_DIMENSION = 4
+  };
+  int64_t iota_dimension() const {
+    return GetField<int64_t>(VT_IOTA_DIMENSION, 0);
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyField<int64_t>(verifier, VT_IOTA_DIMENSION, 8) &&
+           verifier.EndTable();
+  }
+  StablehloIotaOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(StablehloIotaOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<StablehloIotaOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloIotaOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StablehloIotaOptionsBuilder {
+  typedef StablehloIotaOptions Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_iota_dimension(int64_t iota_dimension) {
+    fbb_.AddElement<int64_t>(StablehloIotaOptions::VT_IOTA_DIMENSION, iota_dimension, 0);
+  }
+  explicit StablehloIotaOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<StablehloIotaOptions> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<StablehloIotaOptions>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<StablehloIotaOptions> CreateStablehloIotaOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    int64_t iota_dimension = 0) {
+  StablehloIotaOptionsBuilder builder_(_fbb);
+  builder_.add_iota_dimension(iota_dimension);
+  return builder_.Finish();
+}
+
+::flatbuffers::Offset<StablehloIotaOptions> CreateStablehloIotaOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloIotaOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct StablehloCustomCallOptionsT : public ::flatbuffers::NativeTable {
+  typedef StablehloCustomCallOptions TableType;
+  std::string call_target_name{};
+  bool has_side_effect = false;
+  std::string backend_config{};
+  int32_t api_version = 0;
+  std::vector<int32_t> called_computations{};
+  std::vector<uint8_t> custom_attributes{};
+};
+
+struct StablehloCustomCallOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef StablehloCustomCallOptionsT NativeTableType;
+  typedef StablehloCustomCallOptionsBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_CALL_TARGET_NAME = 4,
+    VT_HAS_SIDE_EFFECT = 6,
+    VT_BACKEND_CONFIG = 8,
+    VT_API_VERSION = 10,
+    VT_CALLED_COMPUTATIONS = 12,
+    VT_CUSTOM_ATTRIBUTES = 14
+  };
+  const ::flatbuffers::String *call_target_name() const {
+    return GetPointer<const ::flatbuffers::String *>(VT_CALL_TARGET_NAME);
+  }
+  bool has_side_effect() const {
+    return GetField<uint8_t>(VT_HAS_SIDE_EFFECT, 0) != 0;
+  }
+  const ::flatbuffers::String *backend_config() const {
+    return GetPointer<const ::flatbuffers::String *>(VT_BACKEND_CONFIG);
+  }
+  int32_t api_version() const {
+    return GetField<int32_t>(VT_API_VERSION, 0);
+  }
+  const ::flatbuffers::Vector<int32_t> *called_computations() const {
+    return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_CALLED_COMPUTATIONS);
+  }
+  const ::flatbuffers::Vector<uint8_t> *custom_attributes() const {
+    return GetPointer<const ::flatbuffers::Vector<uint8_t> *>(VT_CUSTOM_ATTRIBUTES);
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyOffset(verifier, VT_CALL_TARGET_NAME) &&
+           verifier.VerifyString(call_target_name()) &&
+           VerifyField<uint8_t>(verifier, VT_HAS_SIDE_EFFECT, 1) &&
+           VerifyOffset(verifier, VT_BACKEND_CONFIG) &&
+           verifier.VerifyString(backend_config()) &&
+           VerifyField<int32_t>(verifier, VT_API_VERSION, 4) &&
+           VerifyOffset(verifier, VT_CALLED_COMPUTATIONS) &&
+           verifier.VerifyVector(called_computations()) &&
+           VerifyOffset(verifier, VT_CUSTOM_ATTRIBUTES) &&
+           verifier.VerifyVector(custom_attributes()) &&
+           verifier.EndTable();
+  }
+  StablehloCustomCallOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(StablehloCustomCallOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<StablehloCustomCallOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloCustomCallOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StablehloCustomCallOptionsBuilder {
+  typedef StablehloCustomCallOptions Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_call_target_name(::flatbuffers::Offset<::flatbuffers::String> call_target_name) {
+    fbb_.AddOffset(StablehloCustomCallOptions::VT_CALL_TARGET_NAME, call_target_name);
+  }
+  void add_has_side_effect(bool has_side_effect) {
+    fbb_.AddElement<uint8_t>(StablehloCustomCallOptions::VT_HAS_SIDE_EFFECT, static_cast<uint8_t>(has_side_effect), 0);
+  }
+  void add_backend_config(::flatbuffers::Offset<::flatbuffers::String> backend_config) {
+    fbb_.AddOffset(StablehloCustomCallOptions::VT_BACKEND_CONFIG, backend_config);
+  }
+  void add_api_version(int32_t api_version) {
+    fbb_.AddElement<int32_t>(StablehloCustomCallOptions::VT_API_VERSION, api_version, 0);
+  }
+  void add_called_computations(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> called_computations) {
+    fbb_.AddOffset(StablehloCustomCallOptions::VT_CALLED_COMPUTATIONS, called_computations);
+  }
+  void add_custom_attributes(::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> custom_attributes) {
+    fbb_.AddOffset(StablehloCustomCallOptions::VT_CUSTOM_ATTRIBUTES, custom_attributes);
+  }
+  explicit StablehloCustomCallOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<StablehloCustomCallOptions> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<StablehloCustomCallOptions>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<StablehloCustomCallOptions> CreateStablehloCustomCallOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::String> call_target_name = 0,
+    bool has_side_effect = false,
+    ::flatbuffers::Offset<::flatbuffers::String> backend_config = 0,
+    int32_t api_version = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> called_computations = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> custom_attributes = 0) {
+  StablehloCustomCallOptionsBuilder builder_(_fbb);
+  builder_.add_custom_attributes(custom_attributes);
+  builder_.add_called_computations(called_computations);
+  builder_.add_api_version(api_version);
+  builder_.add_backend_config(backend_config);
+  builder_.add_call_target_name(call_target_name);
+  builder_.add_has_side_effect(has_side_effect);
+  return builder_.Finish();
+}
+
+inline ::flatbuffers::Offset<StablehloCustomCallOptions> CreateStablehloCustomCallOptionsDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    const char *call_target_name = nullptr,
+    bool has_side_effect = false,
+    const char *backend_config = nullptr,
+    int32_t api_version = 0,
+    const std::vector<int32_t> *called_computations = nullptr,
+    const std::vector<uint8_t> *custom_attributes = nullptr) {
+  auto call_target_name__ = call_target_name ? _fbb.CreateString(call_target_name) : 0;
+  auto backend_config__ = backend_config ? _fbb.CreateString(backend_config) : 0;
+  auto called_computations__ = called_computations ? _fbb.CreateVector<int32_t>(*called_computations) : 0;
+  auto custom_attributes__ = custom_attributes ? _fbb.CreateVector<uint8_t>(*custom_attributes) : 0;
+  return tflite::CreateStablehloCustomCallOptions(
+      _fbb,
+      call_target_name__,
+      has_side_effect,
+      backend_config__,
+      api_version,
+      called_computations__,
+      custom_attributes__);
+}
+
+::flatbuffers::Offset<StablehloCustomCallOptions> CreateStablehloCustomCallOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloCustomCallOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct StablehloReduceOptionsT : public ::flatbuffers::NativeTable {
+  typedef StablehloReduceOptions TableType;
+  std::vector<int64_t> dimensions{};
+  int32_t body_subgraph_index = 0;
+};
+
+struct StablehloReduceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef StablehloReduceOptionsT NativeTableType;
+  typedef StablehloReduceOptionsBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_DIMENSIONS = 4,
+    VT_BODY_SUBGRAPH_INDEX = 6
+  };
+  const ::flatbuffers::Vector<int64_t> *dimensions() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_DIMENSIONS);
+  }
+  int32_t body_subgraph_index() const {
+    return GetField<int32_t>(VT_BODY_SUBGRAPH_INDEX, 0);
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyOffset(verifier, VT_DIMENSIONS) &&
+           verifier.VerifyVector(dimensions()) &&
+           VerifyField<int32_t>(verifier, VT_BODY_SUBGRAPH_INDEX, 4) &&
+           verifier.EndTable();
+  }
+  StablehloReduceOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(StablehloReduceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<StablehloReduceOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloReduceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StablehloReduceOptionsBuilder {
+  typedef StablehloReduceOptions Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_dimensions(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> dimensions) {
+    fbb_.AddOffset(StablehloReduceOptions::VT_DIMENSIONS, dimensions);
+  }
+  void add_body_subgraph_index(int32_t body_subgraph_index) {
+    fbb_.AddElement<int32_t>(StablehloReduceOptions::VT_BODY_SUBGRAPH_INDEX, body_subgraph_index, 0);
+  }
+  explicit StablehloReduceOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<StablehloReduceOptions> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<StablehloReduceOptions>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<StablehloReduceOptions> CreateStablehloReduceOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> dimensions = 0,
+    int32_t body_subgraph_index = 0) {
+  StablehloReduceOptionsBuilder builder_(_fbb);
+  builder_.add_body_subgraph_index(body_subgraph_index);
+  builder_.add_dimensions(dimensions);
+  return builder_.Finish();
+}
+
+inline ::flatbuffers::Offset<StablehloReduceOptions> CreateStablehloReduceOptionsDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    const std::vector<int64_t> *dimensions = nullptr,
+    int32_t body_subgraph_index = 0) {
+  auto dimensions__ = dimensions ? _fbb.CreateVector<int64_t>(*dimensions) : 0;
+  return tflite::CreateStablehloReduceOptions(
+      _fbb,
+      dimensions__,
+      body_subgraph_index);
+}
+
+::flatbuffers::Offset<StablehloReduceOptions> CreateStablehloReduceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloReduceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct StablehloSliceOptionsT : public ::flatbuffers::NativeTable {
+  typedef StablehloSliceOptions TableType;
+  std::vector<int64_t> start_indices{};
+  std::vector<int64_t> limit_indices{};
+  std::vector<int64_t> strides{};
+};
+
+struct StablehloSliceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef StablehloSliceOptionsT NativeTableType;
+  typedef StablehloSliceOptionsBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_START_INDICES = 4,
+    VT_LIMIT_INDICES = 6,
+    VT_STRIDES = 8
+  };
+  const ::flatbuffers::Vector<int64_t> *start_indices() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_START_INDICES);
+  }
+  const ::flatbuffers::Vector<int64_t> *limit_indices() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_LIMIT_INDICES);
+  }
+  const ::flatbuffers::Vector<int64_t> *strides() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_STRIDES);
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyOffset(verifier, VT_START_INDICES) &&
+           verifier.VerifyVector(start_indices()) &&
+           VerifyOffset(verifier, VT_LIMIT_INDICES) &&
+           verifier.VerifyVector(limit_indices()) &&
+           VerifyOffset(verifier, VT_STRIDES) &&
+           verifier.VerifyVector(strides()) &&
+           verifier.EndTable();
+  }
+  StablehloSliceOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(StablehloSliceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<StablehloSliceOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloSliceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StablehloSliceOptionsBuilder {
+  typedef StablehloSliceOptions Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_start_indices(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> start_indices) {
+    fbb_.AddOffset(StablehloSliceOptions::VT_START_INDICES, start_indices);
+  }
+  void add_limit_indices(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> limit_indices) {
+    fbb_.AddOffset(StablehloSliceOptions::VT_LIMIT_INDICES, limit_indices);
+  }
+  void add_strides(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> strides) {
+    fbb_.AddOffset(StablehloSliceOptions::VT_STRIDES, strides);
+  }
+  explicit StablehloSliceOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<StablehloSliceOptions> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<StablehloSliceOptions>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<StablehloSliceOptions> CreateStablehloSliceOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> start_indices = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> limit_indices = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> strides = 0) {
+  StablehloSliceOptionsBuilder builder_(_fbb);
+  builder_.add_strides(strides);
+  builder_.add_limit_indices(limit_indices);
+  builder_.add_start_indices(start_indices);
+  return builder_.Finish();
+}
+
+inline ::flatbuffers::Offset<StablehloSliceOptions> CreateStablehloSliceOptionsDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    const std::vector<int64_t> *start_indices = nullptr,
+    const std::vector<int64_t> *limit_indices = nullptr,
+    const std::vector<int64_t> *strides = nullptr) {
+  auto start_indices__ = start_indices ? _fbb.CreateVector<int64_t>(*start_indices) : 0;
+  auto limit_indices__ = limit_indices ? _fbb.CreateVector<int64_t>(*limit_indices) : 0;
+  auto strides__ = strides ? _fbb.CreateVector<int64_t>(*strides) : 0;
+  return tflite::CreateStablehloSliceOptions(
+      _fbb,
+      start_indices__,
+      limit_indices__,
+      strides__);
+}
+
+::flatbuffers::Offset<StablehloSliceOptions> CreateStablehloSliceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloSliceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct StablehloConvolutionOptionsT : public ::flatbuffers::NativeTable {
+  typedef StablehloConvolutionOptions TableType;
+  std::vector<int64_t> window_strides{};
+  std::vector<int64_t> padding{};
+  std::vector<int64_t> lhs_dilation{};
+  std::vector<int64_t> rhs_dilation{};
+  std::vector<bool> window_reversal{};
+  int64_t input_batch_dimension = 0;
+  int64_t input_feature_dimension = 0;
+  std::vector<int64_t> input_spatial_dimensions{};
+  int64_t kernel_input_feature_dimension = 0;
+  int64_t kernel_output_feature_dimension = 0;
+  std::vector<int64_t> kernel_spatial_dimensions{};
+  int64_t output_batch_dimension = 0;
+  int64_t output_feature_dimension = 0;
+  std::vector<int64_t> output_spatial_dimensions{};
+  int64_t feature_group_count = 0;
+  int64_t batch_group_count = 0;
+  std::vector<tflite::StablehloPrecisionConfig> precision_config{};
+};
+
+struct StablehloConvolutionOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef StablehloConvolutionOptionsT NativeTableType;
+  typedef StablehloConvolutionOptionsBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_WINDOW_STRIDES = 4,
+    VT_PADDING = 6,
+    VT_LHS_DILATION = 8,
+    VT_RHS_DILATION = 10,
+    VT_WINDOW_REVERSAL = 12,
+    VT_INPUT_BATCH_DIMENSION = 14,
+    VT_INPUT_FEATURE_DIMENSION = 16,
+    VT_INPUT_SPATIAL_DIMENSIONS = 18,
+    VT_KERNEL_INPUT_FEATURE_DIMENSION = 20,
+    VT_KERNEL_OUTPUT_FEATURE_DIMENSION = 22,
+    VT_KERNEL_SPATIAL_DIMENSIONS = 24,
+    VT_OUTPUT_BATCH_DIMENSION = 26,
+    VT_OUTPUT_FEATURE_DIMENSION = 28,
+    VT_OUTPUT_SPATIAL_DIMENSIONS = 30,
+    VT_FEATURE_GROUP_COUNT = 32,
+    VT_BATCH_GROUP_COUNT = 34,
+    VT_PRECISION_CONFIG = 36
+  };
+  const ::flatbuffers::Vector<int64_t> *window_strides() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_WINDOW_STRIDES);
+  }
+  const ::flatbuffers::Vector<int64_t> *padding() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_PADDING);
+  }
+  const ::flatbuffers::Vector<int64_t> *lhs_dilation() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_LHS_DILATION);
+  }
+  const ::flatbuffers::Vector<int64_t> *rhs_dilation() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_RHS_DILATION);
+  }
+  const ::flatbuffers::Vector<uint8_t> *window_reversal() const {
+    return GetPointer<const ::flatbuffers::Vector<uint8_t> *>(VT_WINDOW_REVERSAL);
+  }
+  int64_t input_batch_dimension() const {
+    return GetField<int64_t>(VT_INPUT_BATCH_DIMENSION, 0);
+  }
+  int64_t input_feature_dimension() const {
+    return GetField<int64_t>(VT_INPUT_FEATURE_DIMENSION, 0);
+  }
+  const ::flatbuffers::Vector<int64_t> *input_spatial_dimensions() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_INPUT_SPATIAL_DIMENSIONS);
+  }
+  int64_t kernel_input_feature_dimension() const {
+    return GetField<int64_t>(VT_KERNEL_INPUT_FEATURE_DIMENSION, 0);
+  }
+  int64_t kernel_output_feature_dimension() const {
+    return GetField<int64_t>(VT_KERNEL_OUTPUT_FEATURE_DIMENSION, 0);
+  }
+  const ::flatbuffers::Vector<int64_t> *kernel_spatial_dimensions() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_KERNEL_SPATIAL_DIMENSIONS);
+  }
+  int64_t output_batch_dimension() const {
+    return GetField<int64_t>(VT_OUTPUT_BATCH_DIMENSION, 0);
+  }
+  int64_t output_feature_dimension() const {
+    return GetField<int64_t>(VT_OUTPUT_FEATURE_DIMENSION, 0);
+  }
+  const ::flatbuffers::Vector<int64_t> *output_spatial_dimensions() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_OUTPUT_SPATIAL_DIMENSIONS);
+  }
+  int64_t feature_group_count() const {
+    return GetField<int64_t>(VT_FEATURE_GROUP_COUNT, 0);
+  }
+  int64_t batch_group_count() const {
+    return GetField<int64_t>(VT_BATCH_GROUP_COUNT, 0);
+  }
+  const ::flatbuffers::Vector<uint32_t> *precision_config() const {
+    return GetPointer<const ::flatbuffers::Vector<uint32_t> *>(VT_PRECISION_CONFIG);
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyOffset(verifier, VT_WINDOW_STRIDES) &&
+           verifier.VerifyVector(window_strides()) &&
+           VerifyOffset(verifier, VT_PADDING) &&
+           verifier.VerifyVector(padding()) &&
+           VerifyOffset(verifier, VT_LHS_DILATION) &&
+           verifier.VerifyVector(lhs_dilation()) &&
+           VerifyOffset(verifier, VT_RHS_DILATION) &&
+           verifier.VerifyVector(rhs_dilation()) &&
+           VerifyOffset(verifier, VT_WINDOW_REVERSAL) &&
+           verifier.VerifyVector(window_reversal()) &&
+           VerifyField<int64_t>(verifier, VT_INPUT_BATCH_DIMENSION, 8) &&
+           VerifyField<int64_t>(verifier, VT_INPUT_FEATURE_DIMENSION, 8) &&
+           VerifyOffset(verifier, VT_INPUT_SPATIAL_DIMENSIONS) &&
+           verifier.VerifyVector(input_spatial_dimensions()) &&
+           VerifyField<int64_t>(verifier, VT_KERNEL_INPUT_FEATURE_DIMENSION, 8) &&
+           VerifyField<int64_t>(verifier, VT_KERNEL_OUTPUT_FEATURE_DIMENSION, 8) &&
+           VerifyOffset(verifier, VT_KERNEL_SPATIAL_DIMENSIONS) &&
+           verifier.VerifyVector(kernel_spatial_dimensions()) &&
+           VerifyField<int64_t>(verifier, VT_OUTPUT_BATCH_DIMENSION, 8) &&
+           VerifyField<int64_t>(verifier, VT_OUTPUT_FEATURE_DIMENSION, 8) &&
+           VerifyOffset(verifier, VT_OUTPUT_SPATIAL_DIMENSIONS) &&
+           verifier.VerifyVector(output_spatial_dimensions()) &&
+           VerifyField<int64_t>(verifier, VT_FEATURE_GROUP_COUNT, 8) &&
+           VerifyField<int64_t>(verifier, VT_BATCH_GROUP_COUNT, 8) &&
+           VerifyOffset(verifier, VT_PRECISION_CONFIG) &&
+           verifier.VerifyVector(precision_config()) &&
+           verifier.EndTable();
+  }
+  StablehloConvolutionOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(StablehloConvolutionOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<StablehloConvolutionOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloConvolutionOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StablehloConvolutionOptionsBuilder {
+  typedef StablehloConvolutionOptions Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_window_strides(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> window_strides) {
+    fbb_.AddOffset(StablehloConvolutionOptions::VT_WINDOW_STRIDES, window_strides);
+  }
+  void add_padding(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> padding) {
+    fbb_.AddOffset(StablehloConvolutionOptions::VT_PADDING, padding);
+  }
+  void add_lhs_dilation(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> lhs_dilation) {
+    fbb_.AddOffset(StablehloConvolutionOptions::VT_LHS_DILATION, lhs_dilation);
+  }
+  void add_rhs_dilation(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> rhs_dilation) {
+    fbb_.AddOffset(StablehloConvolutionOptions::VT_RHS_DILATION, rhs_dilation);
+  }
+  void add_window_reversal(::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> window_reversal) {
+    fbb_.AddOffset(StablehloConvolutionOptions::VT_WINDOW_REVERSAL, window_reversal);
+  }
+  void add_input_batch_dimension(int64_t input_batch_dimension) {
+    fbb_.AddElement<int64_t>(StablehloConvolutionOptions::VT_INPUT_BATCH_DIMENSION, input_batch_dimension, 0);
+  }
+  void add_input_feature_dimension(int64_t input_feature_dimension) {
+    fbb_.AddElement<int64_t>(StablehloConvolutionOptions::VT_INPUT_FEATURE_DIMENSION, input_feature_dimension, 0);
+  }
+  void add_input_spatial_dimensions(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> input_spatial_dimensions) {
+    fbb_.AddOffset(StablehloConvolutionOptions::VT_INPUT_SPATIAL_DIMENSIONS, input_spatial_dimensions);
+  }
+  void add_kernel_input_feature_dimension(int64_t kernel_input_feature_dimension) {
+    fbb_.AddElement<int64_t>(StablehloConvolutionOptions::VT_KERNEL_INPUT_FEATURE_DIMENSION, kernel_input_feature_dimension, 0);
+  }
+  void add_kernel_output_feature_dimension(int64_t kernel_output_feature_dimension) {
+    fbb_.AddElement<int64_t>(StablehloConvolutionOptions::VT_KERNEL_OUTPUT_FEATURE_DIMENSION, kernel_output_feature_dimension, 0);
+  }
+  void add_kernel_spatial_dimensions(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> kernel_spatial_dimensions) {
+    fbb_.AddOffset(StablehloConvolutionOptions::VT_KERNEL_SPATIAL_DIMENSIONS, kernel_spatial_dimensions);
+  }
+  void add_output_batch_dimension(int64_t output_batch_dimension) {
+    fbb_.AddElement<int64_t>(StablehloConvolutionOptions::VT_OUTPUT_BATCH_DIMENSION, output_batch_dimension, 0);
+  }
+  void add_output_feature_dimension(int64_t output_feature_dimension) {
+    fbb_.AddElement<int64_t>(StablehloConvolutionOptions::VT_OUTPUT_FEATURE_DIMENSION, output_feature_dimension, 0);
+  }
+  void add_output_spatial_dimensions(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> output_spatial_dimensions) {
+    fbb_.AddOffset(StablehloConvolutionOptions::VT_OUTPUT_SPATIAL_DIMENSIONS, output_spatial_dimensions);
+  }
+  void add_feature_group_count(int64_t feature_group_count) {
+    fbb_.AddElement<int64_t>(StablehloConvolutionOptions::VT_FEATURE_GROUP_COUNT, feature_group_count, 0);
+  }
+  void add_batch_group_count(int64_t batch_group_count) {
+    fbb_.AddElement<int64_t>(StablehloConvolutionOptions::VT_BATCH_GROUP_COUNT, batch_group_count, 0);
+  }
+  void add_precision_config(::flatbuffers::Offset<::flatbuffers::Vector<uint32_t>> precision_config) {
+    fbb_.AddOffset(StablehloConvolutionOptions::VT_PRECISION_CONFIG, precision_config);
+  }
+  explicit StablehloConvolutionOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<StablehloConvolutionOptions> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<StablehloConvolutionOptions>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<StablehloConvolutionOptions> CreateStablehloConvolutionOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> window_strides = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> padding = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> lhs_dilation = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> rhs_dilation = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> window_reversal = 0,
+    int64_t input_batch_dimension = 0,
+    int64_t input_feature_dimension = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> input_spatial_dimensions = 0,
+    int64_t kernel_input_feature_dimension = 0,
+    int64_t kernel_output_feature_dimension = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> kernel_spatial_dimensions = 0,
+    int64_t output_batch_dimension = 0,
+    int64_t output_feature_dimension = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> output_spatial_dimensions = 0,
+    int64_t feature_group_count = 0,
+    int64_t batch_group_count = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<uint32_t>> precision_config = 0) {
+  StablehloConvolutionOptionsBuilder builder_(_fbb);
+  builder_.add_batch_group_count(batch_group_count);
+  builder_.add_feature_group_count(feature_group_count);
+  builder_.add_output_feature_dimension(output_feature_dimension);
+  builder_.add_output_batch_dimension(output_batch_dimension);
+  builder_.add_kernel_output_feature_dimension(kernel_output_feature_dimension);
+  builder_.add_kernel_input_feature_dimension(kernel_input_feature_dimension);
+  builder_.add_input_feature_dimension(input_feature_dimension);
+  builder_.add_input_batch_dimension(input_batch_dimension);
+  builder_.add_precision_config(precision_config);
+  builder_.add_output_spatial_dimensions(output_spatial_dimensions);
+  builder_.add_kernel_spatial_dimensions(kernel_spatial_dimensions);
+  builder_.add_input_spatial_dimensions(input_spatial_dimensions);
+  builder_.add_window_reversal(window_reversal);
+  builder_.add_rhs_dilation(rhs_dilation);
+  builder_.add_lhs_dilation(lhs_dilation);
+  builder_.add_padding(padding);
+  builder_.add_window_strides(window_strides);
+  return builder_.Finish();
+}
+
+inline ::flatbuffers::Offset<StablehloConvolutionOptions> CreateStablehloConvolutionOptionsDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    const std::vector<int64_t> *window_strides = nullptr,
+    const std::vector<int64_t> *padding = nullptr,
+    const std::vector<int64_t> *lhs_dilation = nullptr,
+    const std::vector<int64_t> *rhs_dilation = nullptr,
+    const std::vector<uint8_t> *window_reversal = nullptr,
+    int64_t input_batch_dimension = 0,
+    int64_t input_feature_dimension = 0,
+    const std::vector<int64_t> *input_spatial_dimensions = nullptr,
+    int64_t kernel_input_feature_dimension = 0,
+    int64_t kernel_output_feature_dimension = 0,
+    const std::vector<int64_t> *kernel_spatial_dimensions = nullptr,
+    int64_t output_batch_dimension = 0,
+    int64_t output_feature_dimension = 0,
+    const std::vector<int64_t> *output_spatial_dimensions = nullptr,
+    int64_t feature_group_count = 0,
+    int64_t batch_group_count = 0,
+    const std::vector<uint32_t> *precision_config = nullptr) {
+  auto window_strides__ = window_strides ? _fbb.CreateVector<int64_t>(*window_strides) : 0;
+  auto padding__ = padding ? _fbb.CreateVector<int64_t>(*padding) : 0;
+  auto lhs_dilation__ = lhs_dilation ? _fbb.CreateVector<int64_t>(*lhs_dilation) : 0;
+  auto rhs_dilation__ = rhs_dilation ? _fbb.CreateVector<int64_t>(*rhs_dilation) : 0;
+  auto window_reversal__ = window_reversal ? _fbb.CreateVector<uint8_t>(*window_reversal) : 0;
+  auto input_spatial_dimensions__ = input_spatial_dimensions ? _fbb.CreateVector<int64_t>(*input_spatial_dimensions) : 0;
+  auto kernel_spatial_dimensions__ = kernel_spatial_dimensions ? _fbb.CreateVector<int64_t>(*kernel_spatial_dimensions) : 0;
+  auto output_spatial_dimensions__ = output_spatial_dimensions ? _fbb.CreateVector<int64_t>(*output_spatial_dimensions) : 0;
+  auto precision_config__ = precision_config ? _fbb.CreateVector<uint32_t>(*precision_config) : 0;
+  return tflite::CreateStablehloConvolutionOptions(
+      _fbb,
+      window_strides__,
+      padding__,
+      lhs_dilation__,
+      rhs_dilation__,
+      window_reversal__,
+      input_batch_dimension,
+      input_feature_dimension,
+      input_spatial_dimensions__,
+      kernel_input_feature_dimension,
+      kernel_output_feature_dimension,
+      kernel_spatial_dimensions__,
+      output_batch_dimension,
+      output_feature_dimension,
+      output_spatial_dimensions__,
+      feature_group_count,
+      batch_group_count,
+      precision_config__);
+}
+
+::flatbuffers::Offset<StablehloConvolutionOptions> CreateStablehloConvolutionOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloConvolutionOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct StablehloScatterOptionsT : public ::flatbuffers::NativeTable {
+  typedef StablehloScatterOptions TableType;
+  bool indices_are_sorted = false;
+  std::vector<int64_t> update_window_dims{};
+  std::vector<int64_t> inserted_window_dims{};
+  std::vector<int64_t> scatter_dims_to_operand_dims{};
+  int64_t index_vector_dim = 0;
+  bool unique_indices = false;
+  int32_t update_computation_subgraph_index = 0;
+};
+
+struct StablehloScatterOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef StablehloScatterOptionsT NativeTableType;
+  typedef StablehloScatterOptionsBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_INDICES_ARE_SORTED = 4,
+    VT_UPDATE_WINDOW_DIMS = 6,
+    VT_INSERTED_WINDOW_DIMS = 8,
+    VT_SCATTER_DIMS_TO_OPERAND_DIMS = 10,
+    VT_INDEX_VECTOR_DIM = 12,
+    VT_UNIQUE_INDICES = 14,
+    VT_UPDATE_COMPUTATION_SUBGRAPH_INDEX = 16
+  };
+  bool indices_are_sorted() const {
+    return GetField<uint8_t>(VT_INDICES_ARE_SORTED, 0) != 0;
+  }
+  const ::flatbuffers::Vector<int64_t> *update_window_dims() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_UPDATE_WINDOW_DIMS);
+  }
+  const ::flatbuffers::Vector<int64_t> *inserted_window_dims() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_INSERTED_WINDOW_DIMS);
+  }
+  const ::flatbuffers::Vector<int64_t> *scatter_dims_to_operand_dims() const {
+    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_SCATTER_DIMS_TO_OPERAND_DIMS);
+  }
+  int64_t index_vector_dim() const {
+    return GetField<int64_t>(VT_INDEX_VECTOR_DIM, 0);
+  }
+  bool unique_indices() const {
+    return GetField<uint8_t>(VT_UNIQUE_INDICES, 0) != 0;
+  }
+  int32_t update_computation_subgraph_index() const {
+    return GetField<int32_t>(VT_UPDATE_COMPUTATION_SUBGRAPH_INDEX, 0);
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyField<uint8_t>(verifier, VT_INDICES_ARE_SORTED, 1) &&
+           VerifyOffset(verifier, VT_UPDATE_WINDOW_DIMS) &&
+           verifier.VerifyVector(update_window_dims()) &&
+           VerifyOffset(verifier, VT_INSERTED_WINDOW_DIMS) &&
+           verifier.VerifyVector(inserted_window_dims()) &&
+           VerifyOffset(verifier, VT_SCATTER_DIMS_TO_OPERAND_DIMS) &&
+           verifier.VerifyVector(scatter_dims_to_operand_dims()) &&
+           VerifyField<int64_t>(verifier, VT_INDEX_VECTOR_DIM, 8) &&
+           VerifyField<uint8_t>(verifier, VT_UNIQUE_INDICES, 1) &&
+           VerifyField<int32_t>(verifier, VT_UPDATE_COMPUTATION_SUBGRAPH_INDEX, 4) &&
+           verifier.EndTable();
+  }
+  StablehloScatterOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(StablehloScatterOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<StablehloScatterOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloScatterOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StablehloScatterOptionsBuilder {
+  typedef StablehloScatterOptions Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_indices_are_sorted(bool indices_are_sorted) {
+    fbb_.AddElement<uint8_t>(StablehloScatterOptions::VT_INDICES_ARE_SORTED, static_cast<uint8_t>(indices_are_sorted), 0);
+  }
+  void add_update_window_dims(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> update_window_dims) {
+    fbb_.AddOffset(StablehloScatterOptions::VT_UPDATE_WINDOW_DIMS, update_window_dims);
+  }
+  void add_inserted_window_dims(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> inserted_window_dims) {
+    fbb_.AddOffset(StablehloScatterOptions::VT_INSERTED_WINDOW_DIMS, inserted_window_dims);
+  }
+  void add_scatter_dims_to_operand_dims(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> scatter_dims_to_operand_dims) {
+    fbb_.AddOffset(StablehloScatterOptions::VT_SCATTER_DIMS_TO_OPERAND_DIMS, scatter_dims_to_operand_dims);
+  }
+  void add_index_vector_dim(int64_t index_vector_dim) {
+    fbb_.AddElement<int64_t>(StablehloScatterOptions::VT_INDEX_VECTOR_DIM, index_vector_dim, 0);
+  }
+  void add_unique_indices(bool unique_indices) {
+    fbb_.AddElement<uint8_t>(StablehloScatterOptions::VT_UNIQUE_INDICES, static_cast<uint8_t>(unique_indices), 0);
+  }
+  void add_update_computation_subgraph_index(int32_t update_computation_subgraph_index) {
+    fbb_.AddElement<int32_t>(StablehloScatterOptions::VT_UPDATE_COMPUTATION_SUBGRAPH_INDEX, update_computation_subgraph_index, 0);
+  }
+  explicit StablehloScatterOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<StablehloScatterOptions> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<StablehloScatterOptions>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<StablehloScatterOptions> CreateStablehloScatterOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    bool indices_are_sorted = false,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> update_window_dims = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> inserted_window_dims = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> scatter_dims_to_operand_dims = 0,
+    int64_t index_vector_dim = 0,
+    bool unique_indices = false,
+    int32_t update_computation_subgraph_index = 0) {
+  StablehloScatterOptionsBuilder builder_(_fbb);
+  builder_.add_index_vector_dim(index_vector_dim);
+  builder_.add_update_computation_subgraph_index(update_computation_subgraph_index);
+  builder_.add_scatter_dims_to_operand_dims(scatter_dims_to_operand_dims);
+  builder_.add_inserted_window_dims(inserted_window_dims);
+  builder_.add_update_window_dims(update_window_dims);
+  builder_.add_unique_indices(unique_indices);
+  builder_.add_indices_are_sorted(indices_are_sorted);
+  return builder_.Finish();
+}
+
+inline ::flatbuffers::Offset<StablehloScatterOptions> CreateStablehloScatterOptionsDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    bool indices_are_sorted = false,
+    const std::vector<int64_t> *update_window_dims = nullptr,
+    const std::vector<int64_t> *inserted_window_dims = nullptr,
+    const std::vector<int64_t> *scatter_dims_to_operand_dims = nullptr,
+    int64_t index_vector_dim = 0,
+    bool unique_indices = false,
+    int32_t update_computation_subgraph_index = 0) {
+  auto update_window_dims__ = update_window_dims ? _fbb.CreateVector<int64_t>(*update_window_dims) : 0;
+  auto inserted_window_dims__ = inserted_window_dims ? _fbb.CreateVector<int64_t>(*inserted_window_dims) : 0;
+  auto scatter_dims_to_operand_dims__ = scatter_dims_to_operand_dims ? _fbb.CreateVector<int64_t>(*scatter_dims_to_operand_dims) : 0;
+  return tflite::CreateStablehloScatterOptions(
+      _fbb,
+      indices_are_sorted,
+      update_window_dims__,
+      inserted_window_dims__,
+      scatter_dims_to_operand_dims__,
+      index_vector_dim,
+      unique_indices,
+      update_computation_subgraph_index);
+}
+
+::flatbuffers::Offset<StablehloScatterOptions> CreateStablehloScatterOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloScatterOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct StablehloRngBitGeneratorOptionsT : public ::flatbuffers::NativeTable {
+  typedef StablehloRngBitGeneratorOptions TableType;
+  tflite::RngAlgorithm algorithm = tflite::RngAlgorithm_DEFAULT;
+};
+
+struct StablehloRngBitGeneratorOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef StablehloRngBitGeneratorOptionsT NativeTableType;
+  typedef StablehloRngBitGeneratorOptionsBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_ALGORITHM = 4
+  };
+  tflite::RngAlgorithm algorithm() const {
+    return static_cast<tflite::RngAlgorithm>(GetField<int8_t>(VT_ALGORITHM, 0));
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyField<int8_t>(verifier, VT_ALGORITHM, 1) &&
+           verifier.EndTable();
+  }
+  StablehloRngBitGeneratorOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(StablehloRngBitGeneratorOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<StablehloRngBitGeneratorOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloRngBitGeneratorOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StablehloRngBitGeneratorOptionsBuilder {
+  typedef StablehloRngBitGeneratorOptions Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_algorithm(tflite::RngAlgorithm algorithm) {
+    fbb_.AddElement<int8_t>(StablehloRngBitGeneratorOptions::VT_ALGORITHM, static_cast<int8_t>(algorithm), 0);
+  }
+  explicit StablehloRngBitGeneratorOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<StablehloRngBitGeneratorOptions> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<StablehloRngBitGeneratorOptions>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<StablehloRngBitGeneratorOptions> CreateStablehloRngBitGeneratorOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    tflite::RngAlgorithm algorithm = tflite::RngAlgorithm_DEFAULT) {
+  StablehloRngBitGeneratorOptionsBuilder builder_(_fbb);
+  builder_.add_algorithm(algorithm);
+  return builder_.Finish();
+}
+
+::flatbuffers::Offset<StablehloRngBitGeneratorOptions> CreateStablehloRngBitGeneratorOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloRngBitGeneratorOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct Conv2DOptionsT : public ::flatbuffers::NativeTable {
   typedef Conv2DOptions TableType;
   tflite::Padding padding = tflite::Padding_SAME;
   int32_t stride_w = 0;
@@ -5081,9 +7718,10 @@
   tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE;
   int32_t dilation_w_factor = 1;
   int32_t dilation_h_factor = 1;
+  tflite::TensorType quantized_bias_type = tflite::TensorType_FLOAT32;
 };
 
-struct Conv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct Conv2DOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef Conv2DOptionsT NativeTableType;
   typedef Conv2DOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -5092,7 +7730,8 @@
     VT_STRIDE_H = 8,
     VT_FUSED_ACTIVATION_FUNCTION = 10,
     VT_DILATION_W_FACTOR = 12,
-    VT_DILATION_H_FACTOR = 14
+    VT_DILATION_H_FACTOR = 14,
+    VT_QUANTIZED_BIAS_TYPE = 16
   };
   tflite::Padding padding() const {
     return static_cast<tflite::Padding>(GetField<int8_t>(VT_PADDING, 0));
@@ -5112,7 +7751,10 @@
   int32_t dilation_h_factor() const {
     return GetField<int32_t>(VT_DILATION_H_FACTOR, 1);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  tflite::TensorType quantized_bias_type() const {
+    return static_cast<tflite::TensorType>(GetField<int8_t>(VT_QUANTIZED_BIAS_TYPE, 0));
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_PADDING, 1) &&
            VerifyField<int32_t>(verifier, VT_STRIDE_W, 4) &&
@@ -5120,17 +7762,18 @@
            VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) &&
            VerifyField<int32_t>(verifier, VT_DILATION_W_FACTOR, 4) &&
            VerifyField<int32_t>(verifier, VT_DILATION_H_FACTOR, 4) &&
+           VerifyField<int8_t>(verifier, VT_QUANTIZED_BIAS_TYPE, 1) &&
            verifier.EndTable();
   }
-  Conv2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(Conv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<Conv2DOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  Conv2DOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(Conv2DOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<Conv2DOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct Conv2DOptionsBuilder {
   typedef Conv2DOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_padding(tflite::Padding padding) {
     fbb_.AddElement<int8_t>(Conv2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
   }
@@ -5149,38 +7792,43 @@
   void add_dilation_h_factor(int32_t dilation_h_factor) {
     fbb_.AddElement<int32_t>(Conv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1);
   }
-  explicit Conv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  void add_quantized_bias_type(tflite::TensorType quantized_bias_type) {
+    fbb_.AddElement<int8_t>(Conv2DOptions::VT_QUANTIZED_BIAS_TYPE, static_cast<int8_t>(quantized_bias_type), 0);
+  }
+  explicit Conv2DOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<Conv2DOptions> Finish() {
+  ::flatbuffers::Offset<Conv2DOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<Conv2DOptions>(end);
+    auto o = ::flatbuffers::Offset<Conv2DOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<Conv2DOptions> CreateConv2DOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<Conv2DOptions> CreateConv2DOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::Padding padding = tflite::Padding_SAME,
     int32_t stride_w = 0,
     int32_t stride_h = 0,
     tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE,
     int32_t dilation_w_factor = 1,
-    int32_t dilation_h_factor = 1) {
+    int32_t dilation_h_factor = 1,
+    tflite::TensorType quantized_bias_type = tflite::TensorType_FLOAT32) {
   Conv2DOptionsBuilder builder_(_fbb);
   builder_.add_dilation_h_factor(dilation_h_factor);
   builder_.add_dilation_w_factor(dilation_w_factor);
   builder_.add_stride_h(stride_h);
   builder_.add_stride_w(stride_w);
+  builder_.add_quantized_bias_type(quantized_bias_type);
   builder_.add_fused_activation_function(fused_activation_function);
   builder_.add_padding(padding);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<Conv2DOptions> CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<Conv2DOptions> CreateConv2DOptions(::flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct Conv3DOptionsT : public flatbuffers::NativeTable {
+struct Conv3DOptionsT : public ::flatbuffers::NativeTable {
   typedef Conv3DOptions TableType;
   tflite::Padding padding = tflite::Padding_SAME;
   int32_t stride_d = 0;
@@ -5192,7 +7840,7 @@
   int32_t dilation_h_factor = 1;
 };
 
-struct Conv3DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct Conv3DOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef Conv3DOptionsT NativeTableType;
   typedef Conv3DOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -5229,7 +7877,7 @@
   int32_t dilation_h_factor() const {
     return GetField<int32_t>(VT_DILATION_H_FACTOR, 1);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_PADDING, 1) &&
            VerifyField<int32_t>(verifier, VT_STRIDE_D, 4) &&
@@ -5241,15 +7889,15 @@
            VerifyField<int32_t>(verifier, VT_DILATION_H_FACTOR, 4) &&
            verifier.EndTable();
   }
-  Conv3DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(Conv3DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<Conv3DOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  Conv3DOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(Conv3DOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<Conv3DOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct Conv3DOptionsBuilder {
   typedef Conv3DOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_padding(tflite::Padding padding) {
     fbb_.AddElement<int8_t>(Conv3DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
   }
@@ -5274,19 +7922,19 @@
   void add_dilation_h_factor(int32_t dilation_h_factor) {
     fbb_.AddElement<int32_t>(Conv3DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1);
   }
-  explicit Conv3DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit Conv3DOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<Conv3DOptions> Finish() {
+  ::flatbuffers::Offset<Conv3DOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<Conv3DOptions>(end);
+    auto o = ::flatbuffers::Offset<Conv3DOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<Conv3DOptions> CreateConv3DOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<Conv3DOptions> CreateConv3DOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::Padding padding = tflite::Padding_SAME,
     int32_t stride_d = 0,
     int32_t stride_w = 0,
@@ -5307,9 +7955,9 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<Conv3DOptions> CreateConv3DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<Conv3DOptions> CreateConv3DOptions(::flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct Pool2DOptionsT : public flatbuffers::NativeTable {
+struct Pool2DOptionsT : public ::flatbuffers::NativeTable {
   typedef Pool2DOptions TableType;
   tflite::Padding padding = tflite::Padding_SAME;
   int32_t stride_w = 0;
@@ -5319,7 +7967,7 @@
   tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE;
 };
 
-struct Pool2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct Pool2DOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef Pool2DOptionsT NativeTableType;
   typedef Pool2DOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -5348,7 +7996,7 @@
   tflite::ActivationFunctionType fused_activation_function() const {
     return static_cast<tflite::ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_PADDING, 1) &&
            VerifyField<int32_t>(verifier, VT_STRIDE_W, 4) &&
@@ -5358,15 +8006,15 @@
            VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) &&
            verifier.EndTable();
   }
-  Pool2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(Pool2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<Pool2DOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  Pool2DOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(Pool2DOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<Pool2DOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct Pool2DOptionsBuilder {
   typedef Pool2DOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_padding(tflite::Padding padding) {
     fbb_.AddElement<int8_t>(Pool2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
   }
@@ -5385,19 +8033,19 @@
   void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) {
     fbb_.AddElement<int8_t>(Pool2DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
   }
-  explicit Pool2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit Pool2DOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<Pool2DOptions> Finish() {
+  ::flatbuffers::Offset<Pool2DOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<Pool2DOptions>(end);
+    auto o = ::flatbuffers::Offset<Pool2DOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<Pool2DOptions> CreatePool2DOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<Pool2DOptions> CreatePool2DOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::Padding padding = tflite::Padding_SAME,
     int32_t stride_w = 0,
     int32_t stride_h = 0,
@@ -5414,9 +8062,9 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<Pool2DOptions> CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<Pool2DOptions> CreatePool2DOptions(::flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct DepthwiseConv2DOptionsT : public flatbuffers::NativeTable {
+struct DepthwiseConv2DOptionsT : public ::flatbuffers::NativeTable {
   typedef DepthwiseConv2DOptions TableType;
   tflite::Padding padding = tflite::Padding_SAME;
   int32_t stride_w = 0;
@@ -5427,7 +8075,7 @@
   int32_t dilation_h_factor = 1;
 };
 
-struct DepthwiseConv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct DepthwiseConv2DOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef DepthwiseConv2DOptionsT NativeTableType;
   typedef DepthwiseConv2DOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -5460,7 +8108,7 @@
   int32_t dilation_h_factor() const {
     return GetField<int32_t>(VT_DILATION_H_FACTOR, 1);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_PADDING, 1) &&
            VerifyField<int32_t>(verifier, VT_STRIDE_W, 4) &&
@@ -5471,15 +8119,15 @@
            VerifyField<int32_t>(verifier, VT_DILATION_H_FACTOR, 4) &&
            verifier.EndTable();
   }
-  DepthwiseConv2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(DepthwiseConv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<DepthwiseConv2DOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  DepthwiseConv2DOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(DepthwiseConv2DOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<DepthwiseConv2DOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct DepthwiseConv2DOptionsBuilder {
   typedef DepthwiseConv2DOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_padding(tflite::Padding padding) {
     fbb_.AddElement<int8_t>(DepthwiseConv2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
   }
@@ -5501,19 +8149,19 @@
   void add_dilation_h_factor(int32_t dilation_h_factor) {
     fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1);
   }
-  explicit DepthwiseConv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit DepthwiseConv2DOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<DepthwiseConv2DOptions> Finish() {
+  ::flatbuffers::Offset<DepthwiseConv2DOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<DepthwiseConv2DOptions>(end);
+    auto o = ::flatbuffers::Offset<DepthwiseConv2DOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<DepthwiseConv2DOptions> CreateDepthwiseConv2DOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<DepthwiseConv2DOptions> CreateDepthwiseConv2DOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::Padding padding = tflite::Padding_SAME,
     int32_t stride_w = 0,
     int32_t stride_h = 0,
@@ -5532,16 +8180,16 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<DepthwiseConv2DOptions> CreateDepthwiseConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<DepthwiseConv2DOptions> CreateDepthwiseConv2DOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct ConcatEmbeddingsOptionsT : public flatbuffers::NativeTable {
+struct ConcatEmbeddingsOptionsT : public ::flatbuffers::NativeTable {
   typedef ConcatEmbeddingsOptions TableType;
   int32_t num_channels = 0;
   std::vector<int32_t> num_columns_per_channel{};
   std::vector<int32_t> embedding_dim_per_channel{};
 };
 
-struct ConcatEmbeddingsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct ConcatEmbeddingsOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef ConcatEmbeddingsOptionsT NativeTableType;
   typedef ConcatEmbeddingsOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -5552,13 +8200,13 @@
   int32_t num_channels() const {
     return GetField<int32_t>(VT_NUM_CHANNELS, 0);
   }
-  const flatbuffers::Vector<int32_t> *num_columns_per_channel() const {
-    return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_NUM_COLUMNS_PER_CHANNEL);
+  const ::flatbuffers::Vector<int32_t> *num_columns_per_channel() const {
+    return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_NUM_COLUMNS_PER_CHANNEL);
   }
-  const flatbuffers::Vector<int32_t> *embedding_dim_per_channel() const {
-    return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_EMBEDDING_DIM_PER_CHANNEL);
+  const ::flatbuffers::Vector<int32_t> *embedding_dim_per_channel() const {
+    return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_EMBEDDING_DIM_PER_CHANNEL);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int32_t>(verifier, VT_NUM_CHANNELS, 4) &&
            VerifyOffset(verifier, VT_NUM_COLUMNS_PER_CHANNEL) &&
@@ -5567,40 +8215,40 @@
            verifier.VerifyVector(embedding_dim_per_channel()) &&
            verifier.EndTable();
   }
-  ConcatEmbeddingsOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(ConcatEmbeddingsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<ConcatEmbeddingsOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  ConcatEmbeddingsOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(ConcatEmbeddingsOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<ConcatEmbeddingsOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct ConcatEmbeddingsOptionsBuilder {
   typedef ConcatEmbeddingsOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_num_channels(int32_t num_channels) {
     fbb_.AddElement<int32_t>(ConcatEmbeddingsOptions::VT_NUM_CHANNELS, num_channels, 0);
   }
-  void add_num_columns_per_channel(flatbuffers::Offset<flatbuffers::Vector<int32_t>> num_columns_per_channel) {
+  void add_num_columns_per_channel(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> num_columns_per_channel) {
     fbb_.AddOffset(ConcatEmbeddingsOptions::VT_NUM_COLUMNS_PER_CHANNEL, num_columns_per_channel);
   }
-  void add_embedding_dim_per_channel(flatbuffers::Offset<flatbuffers::Vector<int32_t>> embedding_dim_per_channel) {
+  void add_embedding_dim_per_channel(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> embedding_dim_per_channel) {
     fbb_.AddOffset(ConcatEmbeddingsOptions::VT_EMBEDDING_DIM_PER_CHANNEL, embedding_dim_per_channel);
   }
-  explicit ConcatEmbeddingsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit ConcatEmbeddingsOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<ConcatEmbeddingsOptions> Finish() {
+  ::flatbuffers::Offset<ConcatEmbeddingsOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<ConcatEmbeddingsOptions>(end);
+    auto o = ::flatbuffers::Offset<ConcatEmbeddingsOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<ConcatEmbeddingsOptions> CreateConcatEmbeddingsOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<ConcatEmbeddingsOptions> CreateConcatEmbeddingsOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int32_t num_channels = 0,
-    flatbuffers::Offset<flatbuffers::Vector<int32_t>> num_columns_per_channel = 0,
-    flatbuffers::Offset<flatbuffers::Vector<int32_t>> embedding_dim_per_channel = 0) {
+    ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> num_columns_per_channel = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> embedding_dim_per_channel = 0) {
   ConcatEmbeddingsOptionsBuilder builder_(_fbb);
   builder_.add_embedding_dim_per_channel(embedding_dim_per_channel);
   builder_.add_num_columns_per_channel(num_columns_per_channel);
@@ -5608,8 +8256,8 @@
   return builder_.Finish();
 }
 
-inline flatbuffers::Offset<ConcatEmbeddingsOptions> CreateConcatEmbeddingsOptionsDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<ConcatEmbeddingsOptions> CreateConcatEmbeddingsOptionsDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int32_t num_channels = 0,
     const std::vector<int32_t> *num_columns_per_channel = nullptr,
     const std::vector<int32_t> *embedding_dim_per_channel = nullptr) {
@@ -5622,14 +8270,14 @@
       embedding_dim_per_channel__);
 }
 
-flatbuffers::Offset<ConcatEmbeddingsOptions> CreateConcatEmbeddingsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<ConcatEmbeddingsOptions> CreateConcatEmbeddingsOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct LSHProjectionOptionsT : public flatbuffers::NativeTable {
+struct LSHProjectionOptionsT : public ::flatbuffers::NativeTable {
   typedef LSHProjectionOptions TableType;
   tflite::LSHProjectionType type = tflite::LSHProjectionType_UNKNOWN;
 };
 
-struct LSHProjectionOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct LSHProjectionOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef LSHProjectionOptionsT NativeTableType;
   typedef LSHProjectionOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -5638,52 +8286,52 @@
   tflite::LSHProjectionType type() const {
     return static_cast<tflite::LSHProjectionType>(GetField<int8_t>(VT_TYPE, 0));
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_TYPE, 1) &&
            verifier.EndTable();
   }
-  LSHProjectionOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(LSHProjectionOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<LSHProjectionOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  LSHProjectionOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(LSHProjectionOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<LSHProjectionOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct LSHProjectionOptionsBuilder {
   typedef LSHProjectionOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_type(tflite::LSHProjectionType type) {
     fbb_.AddElement<int8_t>(LSHProjectionOptions::VT_TYPE, static_cast<int8_t>(type), 0);
   }
-  explicit LSHProjectionOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit LSHProjectionOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<LSHProjectionOptions> Finish() {
+  ::flatbuffers::Offset<LSHProjectionOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<LSHProjectionOptions>(end);
+    auto o = ::flatbuffers::Offset<LSHProjectionOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<LSHProjectionOptions> CreateLSHProjectionOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<LSHProjectionOptions> CreateLSHProjectionOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::LSHProjectionType type = tflite::LSHProjectionType_UNKNOWN) {
   LSHProjectionOptionsBuilder builder_(_fbb);
   builder_.add_type(type);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<LSHProjectionOptions> CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<LSHProjectionOptions> CreateLSHProjectionOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct SVDFOptionsT : public flatbuffers::NativeTable {
+struct SVDFOptionsT : public ::flatbuffers::NativeTable {
   typedef SVDFOptions TableType;
   int32_t rank = 0;
   tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE;
   bool asymmetric_quantize_inputs = false;
 };
 
-struct SVDFOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct SVDFOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef SVDFOptionsT NativeTableType;
   typedef SVDFOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -5700,22 +8348,22 @@
   bool asymmetric_quantize_inputs() const {
     return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int32_t>(verifier, VT_RANK, 4) &&
            VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) &&
            VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) &&
            verifier.EndTable();
   }
-  SVDFOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(SVDFOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<SVDFOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  SVDFOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(SVDFOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<SVDFOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct SVDFOptionsBuilder {
   typedef SVDFOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_rank(int32_t rank) {
     fbb_.AddElement<int32_t>(SVDFOptions::VT_RANK, rank, 0);
   }
@@ -5725,19 +8373,19 @@
   void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) {
     fbb_.AddElement<uint8_t>(SVDFOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
   }
-  explicit SVDFOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit SVDFOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<SVDFOptions> Finish() {
+  ::flatbuffers::Offset<SVDFOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<SVDFOptions>(end);
+    auto o = ::flatbuffers::Offset<SVDFOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<SVDFOptions> CreateSVDFOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<SVDFOptions> CreateSVDFOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int32_t rank = 0,
     tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE,
     bool asymmetric_quantize_inputs = false) {
@@ -5748,15 +8396,15 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<SVDFOptions> CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<SVDFOptions> CreateSVDFOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct RNNOptionsT : public flatbuffers::NativeTable {
+struct RNNOptionsT : public ::flatbuffers::NativeTable {
   typedef RNNOptions TableType;
   tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE;
   bool asymmetric_quantize_inputs = false;
 };
 
-struct RNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct RNNOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef RNNOptionsT NativeTableType;
   typedef RNNOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -5769,40 +8417,40 @@
   bool asymmetric_quantize_inputs() const {
     return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) &&
            VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) &&
            verifier.EndTable();
   }
-  RNNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(RNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<RNNOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  RNNOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(RNNOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<RNNOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct RNNOptionsBuilder {
   typedef RNNOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) {
     fbb_.AddElement<int8_t>(RNNOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
   }
   void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) {
     fbb_.AddElement<uint8_t>(RNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
   }
-  explicit RNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit RNNOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<RNNOptions> Finish() {
+  ::flatbuffers::Offset<RNNOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<RNNOptions>(end);
+    auto o = ::flatbuffers::Offset<RNNOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<RNNOptions> CreateRNNOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<RNNOptions> CreateRNNOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE,
     bool asymmetric_quantize_inputs = false) {
   RNNOptionsBuilder builder_(_fbb);
@@ -5811,16 +8459,16 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<RNNOptions> CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<RNNOptions> CreateRNNOptions(::flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct SequenceRNNOptionsT : public flatbuffers::NativeTable {
+struct SequenceRNNOptionsT : public ::flatbuffers::NativeTable {
   typedef SequenceRNNOptions TableType;
   bool time_major = false;
   tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE;
   bool asymmetric_quantize_inputs = false;
 };
 
-struct SequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct SequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef SequenceRNNOptionsT NativeTableType;
   typedef SequenceRNNOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -5837,22 +8485,22 @@
   bool asymmetric_quantize_inputs() const {
     return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<uint8_t>(verifier, VT_TIME_MAJOR, 1) &&
            VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) &&
            VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) &&
            verifier.EndTable();
   }
-  SequenceRNNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(SequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<SequenceRNNOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  SequenceRNNOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(SequenceRNNOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<SequenceRNNOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct SequenceRNNOptionsBuilder {
   typedef SequenceRNNOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_time_major(bool time_major) {
     fbb_.AddElement<uint8_t>(SequenceRNNOptions::VT_TIME_MAJOR, static_cast<uint8_t>(time_major), 0);
   }
@@ -5862,19 +8510,19 @@
   void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) {
     fbb_.AddElement<uint8_t>(SequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
   }
-  explicit SequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit SequenceRNNOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<SequenceRNNOptions> Finish() {
+  ::flatbuffers::Offset<SequenceRNNOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<SequenceRNNOptions>(end);
+    auto o = ::flatbuffers::Offset<SequenceRNNOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<SequenceRNNOptions> CreateSequenceRNNOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<SequenceRNNOptions> CreateSequenceRNNOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     bool time_major = false,
     tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE,
     bool asymmetric_quantize_inputs = false) {
@@ -5885,9 +8533,9 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<SequenceRNNOptions> CreateSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<SequenceRNNOptions> CreateSequenceRNNOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct BidirectionalSequenceRNNOptionsT : public flatbuffers::NativeTable {
+struct BidirectionalSequenceRNNOptionsT : public ::flatbuffers::NativeTable {
   typedef BidirectionalSequenceRNNOptions TableType;
   bool time_major = false;
   tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE;
@@ -5895,7 +8543,7 @@
   bool asymmetric_quantize_inputs = false;
 };
 
-struct BidirectionalSequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct BidirectionalSequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef BidirectionalSequenceRNNOptionsT NativeTableType;
   typedef BidirectionalSequenceRNNOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -5916,7 +8564,7 @@
   bool asymmetric_quantize_inputs() const {
     return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<uint8_t>(verifier, VT_TIME_MAJOR, 1) &&
            VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) &&
@@ -5924,15 +8572,15 @@
            VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) &&
            verifier.EndTable();
   }
-  BidirectionalSequenceRNNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<BidirectionalSequenceRNNOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  BidirectionalSequenceRNNOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(BidirectionalSequenceRNNOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<BidirectionalSequenceRNNOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct BidirectionalSequenceRNNOptionsBuilder {
   typedef BidirectionalSequenceRNNOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_time_major(bool time_major) {
     fbb_.AddElement<uint8_t>(BidirectionalSequenceRNNOptions::VT_TIME_MAJOR, static_cast<uint8_t>(time_major), 0);
   }
@@ -5945,19 +8593,19 @@
   void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) {
     fbb_.AddElement<uint8_t>(BidirectionalSequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
   }
-  explicit BidirectionalSequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit BidirectionalSequenceRNNOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<BidirectionalSequenceRNNOptions> Finish() {
+  ::flatbuffers::Offset<BidirectionalSequenceRNNOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<BidirectionalSequenceRNNOptions>(end);
+    auto o = ::flatbuffers::Offset<BidirectionalSequenceRNNOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<BidirectionalSequenceRNNOptions> CreateBidirectionalSequenceRNNOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<BidirectionalSequenceRNNOptions> CreateBidirectionalSequenceRNNOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     bool time_major = false,
     tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE,
     bool merge_outputs = false,
@@ -5970,24 +8618,26 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<BidirectionalSequenceRNNOptions> CreateBidirectionalSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<BidirectionalSequenceRNNOptions> CreateBidirectionalSequenceRNNOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct FullyConnectedOptionsT : public flatbuffers::NativeTable {
+struct FullyConnectedOptionsT : public ::flatbuffers::NativeTable {
   typedef FullyConnectedOptions TableType;
   tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE;
   tflite::FullyConnectedOptionsWeightsFormat weights_format = tflite::FullyConnectedOptionsWeightsFormat_DEFAULT;
   bool keep_num_dims = false;
   bool asymmetric_quantize_inputs = false;
+  tflite::TensorType quantized_bias_type = tflite::TensorType_FLOAT32;
 };
 
-struct FullyConnectedOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct FullyConnectedOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef FullyConnectedOptionsT NativeTableType;
   typedef FullyConnectedOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
     VT_FUSED_ACTIVATION_FUNCTION = 4,
     VT_WEIGHTS_FORMAT = 6,
     VT_KEEP_NUM_DIMS = 8,
-    VT_ASYMMETRIC_QUANTIZE_INPUTS = 10
+    VT_ASYMMETRIC_QUANTIZE_INPUTS = 10,
+    VT_QUANTIZED_BIAS_TYPE = 12
   };
   tflite::ActivationFunctionType fused_activation_function() const {
     return static_cast<tflite::ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
@@ -6001,23 +8651,27 @@
   bool asymmetric_quantize_inputs() const {
     return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  tflite::TensorType quantized_bias_type() const {
+    return static_cast<tflite::TensorType>(GetField<int8_t>(VT_QUANTIZED_BIAS_TYPE, 0));
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) &&
            VerifyField<int8_t>(verifier, VT_WEIGHTS_FORMAT, 1) &&
            VerifyField<uint8_t>(verifier, VT_KEEP_NUM_DIMS, 1) &&
            VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) &&
+           VerifyField<int8_t>(verifier, VT_QUANTIZED_BIAS_TYPE, 1) &&
            verifier.EndTable();
   }
-  FullyConnectedOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(FullyConnectedOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<FullyConnectedOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  FullyConnectedOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(FullyConnectedOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<FullyConnectedOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct FullyConnectedOptionsBuilder {
   typedef FullyConnectedOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) {
     fbb_.AddElement<int8_t>(FullyConnectedOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
   }
@@ -6030,24 +8684,29 @@
   void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) {
     fbb_.AddElement<uint8_t>(FullyConnectedOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
   }
-  explicit FullyConnectedOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  void add_quantized_bias_type(tflite::TensorType quantized_bias_type) {
+    fbb_.AddElement<int8_t>(FullyConnectedOptions::VT_QUANTIZED_BIAS_TYPE, static_cast<int8_t>(quantized_bias_type), 0);
+  }
+  explicit FullyConnectedOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<FullyConnectedOptions> Finish() {
+  ::flatbuffers::Offset<FullyConnectedOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<FullyConnectedOptions>(end);
+    auto o = ::flatbuffers::Offset<FullyConnectedOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<FullyConnectedOptions> CreateFullyConnectedOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<FullyConnectedOptions> CreateFullyConnectedOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE,
     tflite::FullyConnectedOptionsWeightsFormat weights_format = tflite::FullyConnectedOptionsWeightsFormat_DEFAULT,
     bool keep_num_dims = false,
-    bool asymmetric_quantize_inputs = false) {
+    bool asymmetric_quantize_inputs = false,
+    tflite::TensorType quantized_bias_type = tflite::TensorType_FLOAT32) {
   FullyConnectedOptionsBuilder builder_(_fbb);
+  builder_.add_quantized_bias_type(quantized_bias_type);
   builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
   builder_.add_keep_num_dims(keep_num_dims);
   builder_.add_weights_format(weights_format);
@@ -6055,14 +8714,14 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<FullyConnectedOptions> CreateFullyConnectedOptions(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<FullyConnectedOptions> CreateFullyConnectedOptions(::flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct SoftmaxOptionsT : public flatbuffers::NativeTable {
+struct SoftmaxOptionsT : public ::flatbuffers::NativeTable {
   typedef SoftmaxOptions TableType;
   float beta = 0.0f;
 };
 
-struct SoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct SoftmaxOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef SoftmaxOptionsT NativeTableType;
   typedef SoftmaxOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -6071,51 +8730,51 @@
   float beta() const {
     return GetField<float>(VT_BETA, 0.0f);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<float>(verifier, VT_BETA, 4) &&
            verifier.EndTable();
   }
-  SoftmaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(SoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<SoftmaxOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  SoftmaxOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(SoftmaxOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<SoftmaxOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct SoftmaxOptionsBuilder {
   typedef SoftmaxOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_beta(float beta) {
     fbb_.AddElement<float>(SoftmaxOptions::VT_BETA, beta, 0.0f);
   }
-  explicit SoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit SoftmaxOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<SoftmaxOptions> Finish() {
+  ::flatbuffers::Offset<SoftmaxOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<SoftmaxOptions>(end);
+    auto o = ::flatbuffers::Offset<SoftmaxOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<SoftmaxOptions> CreateSoftmaxOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<SoftmaxOptions> CreateSoftmaxOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     float beta = 0.0f) {
   SoftmaxOptionsBuilder builder_(_fbb);
   builder_.add_beta(beta);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<SoftmaxOptions> CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<SoftmaxOptions> CreateSoftmaxOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct ConcatenationOptionsT : public flatbuffers::NativeTable {
+struct ConcatenationOptionsT : public ::flatbuffers::NativeTable {
   typedef ConcatenationOptions TableType;
   int32_t axis = 0;
   tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE;
 };
 
-struct ConcatenationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct ConcatenationOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef ConcatenationOptionsT NativeTableType;
   typedef ConcatenationOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -6128,40 +8787,40 @@
   tflite::ActivationFunctionType fused_activation_function() const {
     return static_cast<tflite::ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int32_t>(verifier, VT_AXIS, 4) &&
            VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) &&
            verifier.EndTable();
   }
-  ConcatenationOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(ConcatenationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<ConcatenationOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  ConcatenationOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(ConcatenationOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<ConcatenationOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct ConcatenationOptionsBuilder {
   typedef ConcatenationOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_axis(int32_t axis) {
     fbb_.AddElement<int32_t>(ConcatenationOptions::VT_AXIS, axis, 0);
   }
   void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) {
     fbb_.AddElement<int8_t>(ConcatenationOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
   }
-  explicit ConcatenationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit ConcatenationOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<ConcatenationOptions> Finish() {
+  ::flatbuffers::Offset<ConcatenationOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<ConcatenationOptions>(end);
+    auto o = ::flatbuffers::Offset<ConcatenationOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<ConcatenationOptions> CreateConcatenationOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<ConcatenationOptions> CreateConcatenationOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int32_t axis = 0,
     tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) {
   ConcatenationOptionsBuilder builder_(_fbb);
@@ -6170,15 +8829,15 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<ConcatenationOptions> CreateConcatenationOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<ConcatenationOptions> CreateConcatenationOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct AddOptionsT : public flatbuffers::NativeTable {
+struct AddOptionsT : public ::flatbuffers::NativeTable {
   typedef AddOptions TableType;
   tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE;
   bool pot_scale_int16 = true;
 };
 
-struct AddOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct AddOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef AddOptionsT NativeTableType;
   typedef AddOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -6191,40 +8850,40 @@
   bool pot_scale_int16() const {
     return GetField<uint8_t>(VT_POT_SCALE_INT16, 1) != 0;
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) &&
            VerifyField<uint8_t>(verifier, VT_POT_SCALE_INT16, 1) &&
            verifier.EndTable();
   }
-  AddOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(AddOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<AddOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  AddOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(AddOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<AddOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct AddOptionsBuilder {
   typedef AddOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) {
     fbb_.AddElement<int8_t>(AddOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
   }
   void add_pot_scale_int16(bool pot_scale_int16) {
     fbb_.AddElement<uint8_t>(AddOptions::VT_POT_SCALE_INT16, static_cast<uint8_t>(pot_scale_int16), 1);
   }
-  explicit AddOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit AddOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<AddOptions> Finish() {
+  ::flatbuffers::Offset<AddOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<AddOptions>(end);
+    auto o = ::flatbuffers::Offset<AddOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<AddOptions> CreateAddOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<AddOptions> CreateAddOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE,
     bool pot_scale_int16 = true) {
   AddOptionsBuilder builder_(_fbb);
@@ -6233,14 +8892,14 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<AddOptions> CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<AddOptions> CreateAddOptions(::flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct MulOptionsT : public flatbuffers::NativeTable {
+struct MulOptionsT : public ::flatbuffers::NativeTable {
   typedef MulOptions TableType;
   tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE;
 };
 
-struct MulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct MulOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef MulOptionsT NativeTableType;
   typedef MulOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -6249,50 +8908,50 @@
   tflite::ActivationFunctionType fused_activation_function() const {
     return static_cast<tflite::ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) &&
            verifier.EndTable();
   }
-  MulOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(MulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<MulOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  MulOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(MulOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<MulOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct MulOptionsBuilder {
   typedef MulOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) {
     fbb_.AddElement<int8_t>(MulOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
   }
-  explicit MulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit MulOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<MulOptions> Finish() {
+  ::flatbuffers::Offset<MulOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<MulOptions>(end);
+    auto o = ::flatbuffers::Offset<MulOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<MulOptions> CreateMulOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<MulOptions> CreateMulOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) {
   MulOptionsBuilder builder_(_fbb);
   builder_.add_fused_activation_function(fused_activation_function);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<MulOptions> CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<MulOptions> CreateMulOptions(::flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct L2NormOptionsT : public flatbuffers::NativeTable {
+struct L2NormOptionsT : public ::flatbuffers::NativeTable {
   typedef L2NormOptions TableType;
   tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE;
 };
 
-struct L2NormOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct L2NormOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef L2NormOptionsT NativeTableType;
   typedef L2NormOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -6301,45 +8960,45 @@
   tflite::ActivationFunctionType fused_activation_function() const {
     return static_cast<tflite::ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) &&
            verifier.EndTable();
   }
-  L2NormOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(L2NormOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<L2NormOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  L2NormOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(L2NormOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<L2NormOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct L2NormOptionsBuilder {
   typedef L2NormOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) {
     fbb_.AddElement<int8_t>(L2NormOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
   }
-  explicit L2NormOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit L2NormOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<L2NormOptions> Finish() {
+  ::flatbuffers::Offset<L2NormOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<L2NormOptions>(end);
+    auto o = ::flatbuffers::Offset<L2NormOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<L2NormOptions> CreateL2NormOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<L2NormOptions> CreateL2NormOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) {
   L2NormOptionsBuilder builder_(_fbb);
   builder_.add_fused_activation_function(fused_activation_function);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<L2NormOptions> CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<L2NormOptions> CreateL2NormOptions(::flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct LocalResponseNormalizationOptionsT : public flatbuffers::NativeTable {
+struct LocalResponseNormalizationOptionsT : public ::flatbuffers::NativeTable {
   typedef LocalResponseNormalizationOptions TableType;
   int32_t radius = 0;
   float bias = 0.0f;
@@ -6347,7 +9006,7 @@
   float beta = 0.0f;
 };
 
-struct LocalResponseNormalizationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct LocalResponseNormalizationOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef LocalResponseNormalizationOptionsT NativeTableType;
   typedef LocalResponseNormalizationOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -6368,7 +9027,7 @@
   float beta() const {
     return GetField<float>(VT_BETA, 0.0f);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int32_t>(verifier, VT_RADIUS, 4) &&
            VerifyField<float>(verifier, VT_BIAS, 4) &&
@@ -6376,15 +9035,15 @@
            VerifyField<float>(verifier, VT_BETA, 4) &&
            verifier.EndTable();
   }
-  LocalResponseNormalizationOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(LocalResponseNormalizationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<LocalResponseNormalizationOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  LocalResponseNormalizationOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(LocalResponseNormalizationOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<LocalResponseNormalizationOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct LocalResponseNormalizationOptionsBuilder {
   typedef LocalResponseNormalizationOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_radius(int32_t radius) {
     fbb_.AddElement<int32_t>(LocalResponseNormalizationOptions::VT_RADIUS, radius, 0);
   }
@@ -6397,19 +9056,19 @@
   void add_beta(float beta) {
     fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_BETA, beta, 0.0f);
   }
-  explicit LocalResponseNormalizationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit LocalResponseNormalizationOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<LocalResponseNormalizationOptions> Finish() {
+  ::flatbuffers::Offset<LocalResponseNormalizationOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<LocalResponseNormalizationOptions>(end);
+    auto o = ::flatbuffers::Offset<LocalResponseNormalizationOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<LocalResponseNormalizationOptions> CreateLocalResponseNormalizationOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<LocalResponseNormalizationOptions> CreateLocalResponseNormalizationOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int32_t radius = 0,
     float bias = 0.0f,
     float alpha = 0.0f,
@@ -6422,9 +9081,9 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<LocalResponseNormalizationOptions> CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<LocalResponseNormalizationOptions> CreateLocalResponseNormalizationOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct LSTMOptionsT : public flatbuffers::NativeTable {
+struct LSTMOptionsT : public ::flatbuffers::NativeTable {
   typedef LSTMOptions TableType;
   tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE;
   float cell_clip = 0.0f;
@@ -6433,7 +9092,7 @@
   bool asymmetric_quantize_inputs = false;
 };
 
-struct LSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct LSTMOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef LSTMOptionsT NativeTableType;
   typedef LSTMOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -6458,7 +9117,7 @@
   bool asymmetric_quantize_inputs() const {
     return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) &&
            VerifyField<float>(verifier, VT_CELL_CLIP, 4) &&
@@ -6467,15 +9126,15 @@
            VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) &&
            verifier.EndTable();
   }
-  LSTMOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(LSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<LSTMOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  LSTMOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(LSTMOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<LSTMOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct LSTMOptionsBuilder {
   typedef LSTMOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) {
     fbb_.AddElement<int8_t>(LSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
   }
@@ -6491,19 +9150,19 @@
   void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) {
     fbb_.AddElement<uint8_t>(LSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
   }
-  explicit LSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit LSTMOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<LSTMOptions> Finish() {
+  ::flatbuffers::Offset<LSTMOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<LSTMOptions>(end);
+    auto o = ::flatbuffers::Offset<LSTMOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<LSTMOptions> CreateLSTMOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<LSTMOptions> CreateLSTMOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE,
     float cell_clip = 0.0f,
     float proj_clip = 0.0f,
@@ -6518,9 +9177,9 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<LSTMOptions> CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<LSTMOptions> CreateLSTMOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct UnidirectionalSequenceLSTMOptionsT : public flatbuffers::NativeTable {
+struct UnidirectionalSequenceLSTMOptionsT : public ::flatbuffers::NativeTable {
   typedef UnidirectionalSequenceLSTMOptions TableType;
   tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE;
   float cell_clip = 0.0f;
@@ -6530,7 +9189,7 @@
   bool diagonal_recurrent_tensors = false;
 };
 
-struct UnidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct UnidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef UnidirectionalSequenceLSTMOptionsT NativeTableType;
   typedef UnidirectionalSequenceLSTMOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -6559,7 +9218,7 @@
   bool diagonal_recurrent_tensors() const {
     return GetField<uint8_t>(VT_DIAGONAL_RECURRENT_TENSORS, 0) != 0;
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) &&
            VerifyField<float>(verifier, VT_CELL_CLIP, 4) &&
@@ -6569,15 +9228,15 @@
            VerifyField<uint8_t>(verifier, VT_DIAGONAL_RECURRENT_TENSORS, 1) &&
            verifier.EndTable();
   }
-  UnidirectionalSequenceLSTMOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<UnidirectionalSequenceLSTMOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  UnidirectionalSequenceLSTMOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(UnidirectionalSequenceLSTMOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<UnidirectionalSequenceLSTMOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct UnidirectionalSequenceLSTMOptionsBuilder {
   typedef UnidirectionalSequenceLSTMOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) {
     fbb_.AddElement<int8_t>(UnidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
   }
@@ -6596,19 +9255,19 @@
   void add_diagonal_recurrent_tensors(bool diagonal_recurrent_tensors) {
     fbb_.AddElement<uint8_t>(UnidirectionalSequenceLSTMOptions::VT_DIAGONAL_RECURRENT_TENSORS, static_cast<uint8_t>(diagonal_recurrent_tensors), 0);
   }
-  explicit UnidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit UnidirectionalSequenceLSTMOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<UnidirectionalSequenceLSTMOptions> Finish() {
+  ::flatbuffers::Offset<UnidirectionalSequenceLSTMOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<UnidirectionalSequenceLSTMOptions>(end);
+    auto o = ::flatbuffers::Offset<UnidirectionalSequenceLSTMOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<UnidirectionalSequenceLSTMOptions> CreateUnidirectionalSequenceLSTMOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<UnidirectionalSequenceLSTMOptions> CreateUnidirectionalSequenceLSTMOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE,
     float cell_clip = 0.0f,
     float proj_clip = 0.0f,
@@ -6625,9 +9284,9 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<UnidirectionalSequenceLSTMOptions> CreateUnidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<UnidirectionalSequenceLSTMOptions> CreateUnidirectionalSequenceLSTMOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct BidirectionalSequenceLSTMOptionsT : public flatbuffers::NativeTable {
+struct BidirectionalSequenceLSTMOptionsT : public ::flatbuffers::NativeTable {
   typedef BidirectionalSequenceLSTMOptions TableType;
   tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE;
   float cell_clip = 0.0f;
@@ -6637,7 +9296,7 @@
   bool asymmetric_quantize_inputs = false;
 };
 
-struct BidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct BidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef BidirectionalSequenceLSTMOptionsT NativeTableType;
   typedef BidirectionalSequenceLSTMOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -6666,7 +9325,7 @@
   bool asymmetric_quantize_inputs() const {
     return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) &&
            VerifyField<float>(verifier, VT_CELL_CLIP, 4) &&
@@ -6676,15 +9335,15 @@
            VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) &&
            verifier.EndTable();
   }
-  BidirectionalSequenceLSTMOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<BidirectionalSequenceLSTMOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  BidirectionalSequenceLSTMOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(BidirectionalSequenceLSTMOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<BidirectionalSequenceLSTMOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct BidirectionalSequenceLSTMOptionsBuilder {
   typedef BidirectionalSequenceLSTMOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) {
     fbb_.AddElement<int8_t>(BidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
   }
@@ -6703,19 +9362,19 @@
   void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) {
     fbb_.AddElement<uint8_t>(BidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
   }
-  explicit BidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit BidirectionalSequenceLSTMOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<BidirectionalSequenceLSTMOptions> Finish() {
+  ::flatbuffers::Offset<BidirectionalSequenceLSTMOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<BidirectionalSequenceLSTMOptions>(end);
+    auto o = ::flatbuffers::Offset<BidirectionalSequenceLSTMOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<BidirectionalSequenceLSTMOptions> CreateBidirectionalSequenceLSTMOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<BidirectionalSequenceLSTMOptions> CreateBidirectionalSequenceLSTMOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE,
     float cell_clip = 0.0f,
     float proj_clip = 0.0f,
@@ -6732,15 +9391,15 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<BidirectionalSequenceLSTMOptions> CreateBidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<BidirectionalSequenceLSTMOptions> CreateBidirectionalSequenceLSTMOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct ResizeBilinearOptionsT : public flatbuffers::NativeTable {
+struct ResizeBilinearOptionsT : public ::flatbuffers::NativeTable {
   typedef ResizeBilinearOptions TableType;
   bool align_corners = false;
   bool half_pixel_centers = false;
 };
 
-struct ResizeBilinearOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct ResizeBilinearOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef ResizeBilinearOptionsT NativeTableType;
   typedef ResizeBilinearOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -6753,40 +9412,40 @@
   bool half_pixel_centers() const {
     return GetField<uint8_t>(VT_HALF_PIXEL_CENTERS, 0) != 0;
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<uint8_t>(verifier, VT_ALIGN_CORNERS, 1) &&
            VerifyField<uint8_t>(verifier, VT_HALF_PIXEL_CENTERS, 1) &&
            verifier.EndTable();
   }
-  ResizeBilinearOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(ResizeBilinearOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<ResizeBilinearOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  ResizeBilinearOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(ResizeBilinearOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<ResizeBilinearOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct ResizeBilinearOptionsBuilder {
   typedef ResizeBilinearOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_align_corners(bool align_corners) {
     fbb_.AddElement<uint8_t>(ResizeBilinearOptions::VT_ALIGN_CORNERS, static_cast<uint8_t>(align_corners), 0);
   }
   void add_half_pixel_centers(bool half_pixel_centers) {
     fbb_.AddElement<uint8_t>(ResizeBilinearOptions::VT_HALF_PIXEL_CENTERS, static_cast<uint8_t>(half_pixel_centers), 0);
   }
-  explicit ResizeBilinearOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit ResizeBilinearOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<ResizeBilinearOptions> Finish() {
+  ::flatbuffers::Offset<ResizeBilinearOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<ResizeBilinearOptions>(end);
+    auto o = ::flatbuffers::Offset<ResizeBilinearOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<ResizeBilinearOptions> CreateResizeBilinearOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<ResizeBilinearOptions> CreateResizeBilinearOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     bool align_corners = false,
     bool half_pixel_centers = false) {
   ResizeBilinearOptionsBuilder builder_(_fbb);
@@ -6795,15 +9454,15 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<ResizeBilinearOptions> CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<ResizeBilinearOptions> CreateResizeBilinearOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct ResizeNearestNeighborOptionsT : public flatbuffers::NativeTable {
+struct ResizeNearestNeighborOptionsT : public ::flatbuffers::NativeTable {
   typedef ResizeNearestNeighborOptions TableType;
   bool align_corners = false;
   bool half_pixel_centers = false;
 };
 
-struct ResizeNearestNeighborOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct ResizeNearestNeighborOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef ResizeNearestNeighborOptionsT NativeTableType;
   typedef ResizeNearestNeighborOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -6816,40 +9475,40 @@
   bool half_pixel_centers() const {
     return GetField<uint8_t>(VT_HALF_PIXEL_CENTERS, 0) != 0;
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<uint8_t>(verifier, VT_ALIGN_CORNERS, 1) &&
            VerifyField<uint8_t>(verifier, VT_HALF_PIXEL_CENTERS, 1) &&
            verifier.EndTable();
   }
-  ResizeNearestNeighborOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(ResizeNearestNeighborOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<ResizeNearestNeighborOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  ResizeNearestNeighborOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(ResizeNearestNeighborOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<ResizeNearestNeighborOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct ResizeNearestNeighborOptionsBuilder {
   typedef ResizeNearestNeighborOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_align_corners(bool align_corners) {
     fbb_.AddElement<uint8_t>(ResizeNearestNeighborOptions::VT_ALIGN_CORNERS, static_cast<uint8_t>(align_corners), 0);
   }
   void add_half_pixel_centers(bool half_pixel_centers) {
     fbb_.AddElement<uint8_t>(ResizeNearestNeighborOptions::VT_HALF_PIXEL_CENTERS, static_cast<uint8_t>(half_pixel_centers), 0);
   }
-  explicit ResizeNearestNeighborOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit ResizeNearestNeighborOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<ResizeNearestNeighborOptions> Finish() {
+  ::flatbuffers::Offset<ResizeNearestNeighborOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<ResizeNearestNeighborOptions>(end);
+    auto o = ::flatbuffers::Offset<ResizeNearestNeighborOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<ResizeNearestNeighborOptions> CreateResizeNearestNeighborOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<ResizeNearestNeighborOptions> CreateResizeNearestNeighborOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     bool align_corners = false,
     bool half_pixel_centers = false) {
   ResizeNearestNeighborOptionsBuilder builder_(_fbb);
@@ -6858,14 +9517,14 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<ResizeNearestNeighborOptions> CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<ResizeNearestNeighborOptions> CreateResizeNearestNeighborOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct CallOptionsT : public flatbuffers::NativeTable {
+struct CallOptionsT : public ::flatbuffers::NativeTable {
   typedef CallOptions TableType;
   uint32_t subgraph = 0;
 };
 
-struct CallOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct CallOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef CallOptionsT NativeTableType;
   typedef CallOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -6874,175 +9533,175 @@
   uint32_t subgraph() const {
     return GetField<uint32_t>(VT_SUBGRAPH, 0);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<uint32_t>(verifier, VT_SUBGRAPH, 4) &&
            verifier.EndTable();
   }
-  CallOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(CallOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<CallOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  CallOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(CallOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<CallOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct CallOptionsBuilder {
   typedef CallOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_subgraph(uint32_t subgraph) {
     fbb_.AddElement<uint32_t>(CallOptions::VT_SUBGRAPH, subgraph, 0);
   }
-  explicit CallOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit CallOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<CallOptions> Finish() {
+  ::flatbuffers::Offset<CallOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<CallOptions>(end);
+    auto o = ::flatbuffers::Offset<CallOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<CallOptions> CreateCallOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<CallOptions> CreateCallOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     uint32_t subgraph = 0) {
   CallOptionsBuilder builder_(_fbb);
   builder_.add_subgraph(subgraph);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<CallOptions> CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<CallOptions> CreateCallOptions(::flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct PadOptionsT : public flatbuffers::NativeTable {
+struct PadOptionsT : public ::flatbuffers::NativeTable {
   typedef PadOptions TableType;
 };
 
-struct PadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct PadOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef PadOptionsT NativeTableType;
   typedef PadOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  PadOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(PadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<PadOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  PadOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(PadOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<PadOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct PadOptionsBuilder {
   typedef PadOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit PadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit PadOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<PadOptions> Finish() {
+  ::flatbuffers::Offset<PadOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<PadOptions>(end);
+    auto o = ::flatbuffers::Offset<PadOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<PadOptions> CreatePadOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<PadOptions> CreatePadOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   PadOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<PadOptions> CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<PadOptions> CreatePadOptions(::flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct PadV2OptionsT : public flatbuffers::NativeTable {
+struct PadV2OptionsT : public ::flatbuffers::NativeTable {
   typedef PadV2Options TableType;
 };
 
-struct PadV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct PadV2Options FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef PadV2OptionsT NativeTableType;
   typedef PadV2OptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  PadV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(PadV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<PadV2Options> Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  PadV2OptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(PadV2OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<PadV2Options> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct PadV2OptionsBuilder {
   typedef PadV2Options Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit PadV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit PadV2OptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<PadV2Options> Finish() {
+  ::flatbuffers::Offset<PadV2Options> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<PadV2Options>(end);
+    auto o = ::flatbuffers::Offset<PadV2Options>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<PadV2Options> CreatePadV2Options(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<PadV2Options> CreatePadV2Options(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   PadV2OptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<PadV2Options> CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<PadV2Options> CreatePadV2Options(::flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct ReshapeOptionsT : public flatbuffers::NativeTable {
+struct ReshapeOptionsT : public ::flatbuffers::NativeTable {
   typedef ReshapeOptions TableType;
   std::vector<int32_t> new_shape{};
 };
 
-struct ReshapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct ReshapeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef ReshapeOptionsT NativeTableType;
   typedef ReshapeOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
     VT_NEW_SHAPE = 4
   };
-  const flatbuffers::Vector<int32_t> *new_shape() const {
-    return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_NEW_SHAPE);
+  const ::flatbuffers::Vector<int32_t> *new_shape() const {
+    return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_NEW_SHAPE);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyOffset(verifier, VT_NEW_SHAPE) &&
            verifier.VerifyVector(new_shape()) &&
            verifier.EndTable();
   }
-  ReshapeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(ReshapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<ReshapeOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  ReshapeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(ReshapeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<ReshapeOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct ReshapeOptionsBuilder {
   typedef ReshapeOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_new_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> new_shape) {
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_new_shape(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> new_shape) {
     fbb_.AddOffset(ReshapeOptions::VT_NEW_SHAPE, new_shape);
   }
-  explicit ReshapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit ReshapeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<ReshapeOptions> Finish() {
+  ::flatbuffers::Offset<ReshapeOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<ReshapeOptions>(end);
+    auto o = ::flatbuffers::Offset<ReshapeOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<ReshapeOptions> CreateReshapeOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::Vector<int32_t>> new_shape = 0) {
+inline ::flatbuffers::Offset<ReshapeOptions> CreateReshapeOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> new_shape = 0) {
   ReshapeOptionsBuilder builder_(_fbb);
   builder_.add_new_shape(new_shape);
   return builder_.Finish();
 }
 
-inline flatbuffers::Offset<ReshapeOptions> CreateReshapeOptionsDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<ReshapeOptions> CreateReshapeOptionsDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     const std::vector<int32_t> *new_shape = nullptr) {
   auto new_shape__ = new_shape ? _fbb.CreateVector<int32_t>(*new_shape) : 0;
   return tflite::CreateReshapeOptions(
@@ -7050,94 +9709,94 @@
       new_shape__);
 }
 
-flatbuffers::Offset<ReshapeOptions> CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<ReshapeOptions> CreateReshapeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct SpaceToBatchNDOptionsT : public flatbuffers::NativeTable {
+struct SpaceToBatchNDOptionsT : public ::flatbuffers::NativeTable {
   typedef SpaceToBatchNDOptions TableType;
 };
 
-struct SpaceToBatchNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct SpaceToBatchNDOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef SpaceToBatchNDOptionsT NativeTableType;
   typedef SpaceToBatchNDOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  SpaceToBatchNDOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(SpaceToBatchNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<SpaceToBatchNDOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  SpaceToBatchNDOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(SpaceToBatchNDOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<SpaceToBatchNDOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct SpaceToBatchNDOptionsBuilder {
   typedef SpaceToBatchNDOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit SpaceToBatchNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit SpaceToBatchNDOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<SpaceToBatchNDOptions> Finish() {
+  ::flatbuffers::Offset<SpaceToBatchNDOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<SpaceToBatchNDOptions>(end);
+    auto o = ::flatbuffers::Offset<SpaceToBatchNDOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<SpaceToBatchNDOptions> CreateSpaceToBatchNDOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<SpaceToBatchNDOptions> CreateSpaceToBatchNDOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   SpaceToBatchNDOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<SpaceToBatchNDOptions> CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<SpaceToBatchNDOptions> CreateSpaceToBatchNDOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct BatchToSpaceNDOptionsT : public flatbuffers::NativeTable {
+struct BatchToSpaceNDOptionsT : public ::flatbuffers::NativeTable {
   typedef BatchToSpaceNDOptions TableType;
 };
 
-struct BatchToSpaceNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct BatchToSpaceNDOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef BatchToSpaceNDOptionsT NativeTableType;
   typedef BatchToSpaceNDOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  BatchToSpaceNDOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(BatchToSpaceNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<BatchToSpaceNDOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  BatchToSpaceNDOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(BatchToSpaceNDOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<BatchToSpaceNDOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct BatchToSpaceNDOptionsBuilder {
   typedef BatchToSpaceNDOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit BatchToSpaceNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit BatchToSpaceNDOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<BatchToSpaceNDOptions> Finish() {
+  ::flatbuffers::Offset<BatchToSpaceNDOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<BatchToSpaceNDOptions>(end);
+    auto o = ::flatbuffers::Offset<BatchToSpaceNDOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<BatchToSpaceNDOptions> CreateBatchToSpaceNDOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<BatchToSpaceNDOptions> CreateBatchToSpaceNDOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   BatchToSpaceNDOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<BatchToSpaceNDOptions> CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<BatchToSpaceNDOptions> CreateBatchToSpaceNDOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct SkipGramOptionsT : public flatbuffers::NativeTable {
+struct SkipGramOptionsT : public ::flatbuffers::NativeTable {
   typedef SkipGramOptions TableType;
   int32_t ngram_size = 0;
   int32_t max_skip_size = 0;
   bool include_all_ngrams = false;
 };
 
-struct SkipGramOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct SkipGramOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef SkipGramOptionsT NativeTableType;
   typedef SkipGramOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -7154,22 +9813,22 @@
   bool include_all_ngrams() const {
     return GetField<uint8_t>(VT_INCLUDE_ALL_NGRAMS, 0) != 0;
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int32_t>(verifier, VT_NGRAM_SIZE, 4) &&
            VerifyField<int32_t>(verifier, VT_MAX_SKIP_SIZE, 4) &&
            VerifyField<uint8_t>(verifier, VT_INCLUDE_ALL_NGRAMS, 1) &&
            verifier.EndTable();
   }
-  SkipGramOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(SkipGramOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<SkipGramOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  SkipGramOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(SkipGramOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<SkipGramOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct SkipGramOptionsBuilder {
   typedef SkipGramOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_ngram_size(int32_t ngram_size) {
     fbb_.AddElement<int32_t>(SkipGramOptions::VT_NGRAM_SIZE, ngram_size, 0);
   }
@@ -7179,19 +9838,19 @@
   void add_include_all_ngrams(bool include_all_ngrams) {
     fbb_.AddElement<uint8_t>(SkipGramOptions::VT_INCLUDE_ALL_NGRAMS, static_cast<uint8_t>(include_all_ngrams), 0);
   }
-  explicit SkipGramOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit SkipGramOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<SkipGramOptions> Finish() {
+  ::flatbuffers::Offset<SkipGramOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<SkipGramOptions>(end);
+    auto o = ::flatbuffers::Offset<SkipGramOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<SkipGramOptions> CreateSkipGramOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<SkipGramOptions> CreateSkipGramOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int32_t ngram_size = 0,
     int32_t max_skip_size = 0,
     bool include_all_ngrams = false) {
@@ -7202,14 +9861,14 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<SkipGramOptions> CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<SkipGramOptions> CreateSkipGramOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct SpaceToDepthOptionsT : public flatbuffers::NativeTable {
+struct SpaceToDepthOptionsT : public ::flatbuffers::NativeTable {
   typedef SpaceToDepthOptions TableType;
   int32_t block_size = 0;
 };
 
-struct SpaceToDepthOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct SpaceToDepthOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef SpaceToDepthOptionsT NativeTableType;
   typedef SpaceToDepthOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -7218,50 +9877,50 @@
   int32_t block_size() const {
     return GetField<int32_t>(VT_BLOCK_SIZE, 0);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int32_t>(verifier, VT_BLOCK_SIZE, 4) &&
            verifier.EndTable();
   }
-  SpaceToDepthOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(SpaceToDepthOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<SpaceToDepthOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  SpaceToDepthOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(SpaceToDepthOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<SpaceToDepthOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct SpaceToDepthOptionsBuilder {
   typedef SpaceToDepthOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_block_size(int32_t block_size) {
     fbb_.AddElement<int32_t>(SpaceToDepthOptions::VT_BLOCK_SIZE, block_size, 0);
   }
-  explicit SpaceToDepthOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit SpaceToDepthOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<SpaceToDepthOptions> Finish() {
+  ::flatbuffers::Offset<SpaceToDepthOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<SpaceToDepthOptions>(end);
+    auto o = ::flatbuffers::Offset<SpaceToDepthOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<SpaceToDepthOptions> CreateSpaceToDepthOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<SpaceToDepthOptions> CreateSpaceToDepthOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int32_t block_size = 0) {
   SpaceToDepthOptionsBuilder builder_(_fbb);
   builder_.add_block_size(block_size);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<SpaceToDepthOptions> CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<SpaceToDepthOptions> CreateSpaceToDepthOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct DepthToSpaceOptionsT : public flatbuffers::NativeTable {
+struct DepthToSpaceOptionsT : public ::flatbuffers::NativeTable {
   typedef DepthToSpaceOptions TableType;
   int32_t block_size = 0;
 };
 
-struct DepthToSpaceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct DepthToSpaceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef DepthToSpaceOptionsT NativeTableType;
   typedef DepthToSpaceOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -7270,51 +9929,51 @@
   int32_t block_size() const {
     return GetField<int32_t>(VT_BLOCK_SIZE, 0);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int32_t>(verifier, VT_BLOCK_SIZE, 4) &&
            verifier.EndTable();
   }
-  DepthToSpaceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(DepthToSpaceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<DepthToSpaceOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  DepthToSpaceOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(DepthToSpaceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<DepthToSpaceOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct DepthToSpaceOptionsBuilder {
   typedef DepthToSpaceOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_block_size(int32_t block_size) {
     fbb_.AddElement<int32_t>(DepthToSpaceOptions::VT_BLOCK_SIZE, block_size, 0);
   }
-  explicit DepthToSpaceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit DepthToSpaceOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<DepthToSpaceOptions> Finish() {
+  ::flatbuffers::Offset<DepthToSpaceOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<DepthToSpaceOptions>(end);
+    auto o = ::flatbuffers::Offset<DepthToSpaceOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<DepthToSpaceOptions> CreateDepthToSpaceOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<DepthToSpaceOptions> CreateDepthToSpaceOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int32_t block_size = 0) {
   DepthToSpaceOptionsBuilder builder_(_fbb);
   builder_.add_block_size(block_size);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<DepthToSpaceOptions> CreateDepthToSpaceOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<DepthToSpaceOptions> CreateDepthToSpaceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct SubOptionsT : public flatbuffers::NativeTable {
+struct SubOptionsT : public ::flatbuffers::NativeTable {
   typedef SubOptions TableType;
   tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE;
   bool pot_scale_int16 = true;
 };
 
-struct SubOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct SubOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef SubOptionsT NativeTableType;
   typedef SubOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -7327,40 +9986,40 @@
   bool pot_scale_int16() const {
     return GetField<uint8_t>(VT_POT_SCALE_INT16, 1) != 0;
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) &&
            VerifyField<uint8_t>(verifier, VT_POT_SCALE_INT16, 1) &&
            verifier.EndTable();
   }
-  SubOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(SubOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<SubOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  SubOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(SubOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<SubOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct SubOptionsBuilder {
   typedef SubOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) {
     fbb_.AddElement<int8_t>(SubOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
   }
   void add_pot_scale_int16(bool pot_scale_int16) {
     fbb_.AddElement<uint8_t>(SubOptions::VT_POT_SCALE_INT16, static_cast<uint8_t>(pot_scale_int16), 1);
   }
-  explicit SubOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit SubOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<SubOptions> Finish() {
+  ::flatbuffers::Offset<SubOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<SubOptions>(end);
+    auto o = ::flatbuffers::Offset<SubOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<SubOptions> CreateSubOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<SubOptions> CreateSubOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE,
     bool pot_scale_int16 = true) {
   SubOptionsBuilder builder_(_fbb);
@@ -7369,14 +10028,14 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<SubOptions> CreateSubOptions(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<SubOptions> CreateSubOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct DivOptionsT : public flatbuffers::NativeTable {
+struct DivOptionsT : public ::flatbuffers::NativeTable {
   typedef DivOptions TableType;
   tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE;
 };
 
-struct DivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct DivOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef DivOptionsT NativeTableType;
   typedef DivOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -7385,89 +10044,89 @@
   tflite::ActivationFunctionType fused_activation_function() const {
     return static_cast<tflite::ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) &&
            verifier.EndTable();
   }
-  DivOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(DivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<DivOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  DivOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(DivOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<DivOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct DivOptionsBuilder {
   typedef DivOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) {
     fbb_.AddElement<int8_t>(DivOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
   }
-  explicit DivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit DivOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<DivOptions> Finish() {
+  ::flatbuffers::Offset<DivOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<DivOptions>(end);
+    auto o = ::flatbuffers::Offset<DivOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<DivOptions> CreateDivOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<DivOptions> CreateDivOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) {
   DivOptionsBuilder builder_(_fbb);
   builder_.add_fused_activation_function(fused_activation_function);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<DivOptions> CreateDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<DivOptions> CreateDivOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct TopKV2OptionsT : public flatbuffers::NativeTable {
+struct TopKV2OptionsT : public ::flatbuffers::NativeTable {
   typedef TopKV2Options TableType;
 };
 
-struct TopKV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct TopKV2Options FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef TopKV2OptionsT NativeTableType;
   typedef TopKV2OptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  TopKV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(TopKV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<TopKV2Options> Pack(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  TopKV2OptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(TopKV2OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<TopKV2Options> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct TopKV2OptionsBuilder {
   typedef TopKV2Options Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit TopKV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit TopKV2OptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<TopKV2Options> Finish() {
+  ::flatbuffers::Offset<TopKV2Options> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<TopKV2Options>(end);
+    auto o = ::flatbuffers::Offset<TopKV2Options>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<TopKV2Options> CreateTopKV2Options(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<TopKV2Options> CreateTopKV2Options(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   TopKV2OptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<TopKV2Options> CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<TopKV2Options> CreateTopKV2Options(::flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct EmbeddingLookupSparseOptionsT : public flatbuffers::NativeTable {
+struct EmbeddingLookupSparseOptionsT : public ::flatbuffers::NativeTable {
   typedef EmbeddingLookupSparseOptions TableType;
   tflite::CombinerType combiner = tflite::CombinerType_SUM;
 };
 
-struct EmbeddingLookupSparseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct EmbeddingLookupSparseOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef EmbeddingLookupSparseOptionsT NativeTableType;
   typedef EmbeddingLookupSparseOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -7476,51 +10135,51 @@
   tflite::CombinerType combiner() const {
     return static_cast<tflite::CombinerType>(GetField<int8_t>(VT_COMBINER, 0));
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_COMBINER, 1) &&
            verifier.EndTable();
   }
-  EmbeddingLookupSparseOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(EmbeddingLookupSparseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<EmbeddingLookupSparseOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  EmbeddingLookupSparseOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(EmbeddingLookupSparseOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<EmbeddingLookupSparseOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct EmbeddingLookupSparseOptionsBuilder {
   typedef EmbeddingLookupSparseOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_combiner(tflite::CombinerType combiner) {
     fbb_.AddElement<int8_t>(EmbeddingLookupSparseOptions::VT_COMBINER, static_cast<int8_t>(combiner), 0);
   }
-  explicit EmbeddingLookupSparseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit EmbeddingLookupSparseOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<EmbeddingLookupSparseOptions> Finish() {
+  ::flatbuffers::Offset<EmbeddingLookupSparseOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<EmbeddingLookupSparseOptions>(end);
+    auto o = ::flatbuffers::Offset<EmbeddingLookupSparseOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<EmbeddingLookupSparseOptions> CreateEmbeddingLookupSparseOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<EmbeddingLookupSparseOptions> CreateEmbeddingLookupSparseOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::CombinerType combiner = tflite::CombinerType_SUM) {
   EmbeddingLookupSparseOptionsBuilder builder_(_fbb);
   builder_.add_combiner(combiner);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<EmbeddingLookupSparseOptions> CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<EmbeddingLookupSparseOptions> CreateEmbeddingLookupSparseOptions(::flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct GatherOptionsT : public flatbuffers::NativeTable {
+struct GatherOptionsT : public ::flatbuffers::NativeTable {
   typedef GatherOptions TableType;
   int32_t axis = 0;
   int32_t batch_dims = 0;
 };
 
-struct GatherOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct GatherOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef GatherOptionsT NativeTableType;
   typedef GatherOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -7533,40 +10192,40 @@
   int32_t batch_dims() const {
     return GetField<int32_t>(VT_BATCH_DIMS, 0);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int32_t>(verifier, VT_AXIS, 4) &&
            VerifyField<int32_t>(verifier, VT_BATCH_DIMS, 4) &&
            verifier.EndTable();
   }
-  GatherOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(GatherOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<GatherOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  GatherOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(GatherOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<GatherOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct GatherOptionsBuilder {
   typedef GatherOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_axis(int32_t axis) {
     fbb_.AddElement<int32_t>(GatherOptions::VT_AXIS, axis, 0);
   }
   void add_batch_dims(int32_t batch_dims) {
     fbb_.AddElement<int32_t>(GatherOptions::VT_BATCH_DIMS, batch_dims, 0);
   }
-  explicit GatherOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit GatherOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<GatherOptions> Finish() {
+  ::flatbuffers::Offset<GatherOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<GatherOptions>(end);
+    auto o = ::flatbuffers::Offset<GatherOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<GatherOptions> CreateGatherOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<GatherOptions> CreateGatherOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int32_t axis = 0,
     int32_t batch_dims = 0) {
   GatherOptionsBuilder builder_(_fbb);
@@ -7575,131 +10234,131 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<GatherOptions> CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<GatherOptions> CreateGatherOptions(::flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct TransposeOptionsT : public flatbuffers::NativeTable {
+struct TransposeOptionsT : public ::flatbuffers::NativeTable {
   typedef TransposeOptions TableType;
 };
 
-struct TransposeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct TransposeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef TransposeOptionsT NativeTableType;
   typedef TransposeOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  TransposeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(TransposeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<TransposeOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  TransposeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(TransposeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<TransposeOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct TransposeOptionsBuilder {
   typedef TransposeOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit TransposeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit TransposeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<TransposeOptions> Finish() {
+  ::flatbuffers::Offset<TransposeOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<TransposeOptions>(end);
+    auto o = ::flatbuffers::Offset<TransposeOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<TransposeOptions> CreateTransposeOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<TransposeOptions> CreateTransposeOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   TransposeOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<TransposeOptions> CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<TransposeOptions> CreateTransposeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct ExpOptionsT : public flatbuffers::NativeTable {
+struct ExpOptionsT : public ::flatbuffers::NativeTable {
   typedef ExpOptions TableType;
 };
 
-struct ExpOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct ExpOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef ExpOptionsT NativeTableType;
   typedef ExpOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  ExpOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(ExpOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<ExpOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  ExpOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(ExpOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<ExpOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct ExpOptionsBuilder {
   typedef ExpOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit ExpOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit ExpOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<ExpOptions> Finish() {
+  ::flatbuffers::Offset<ExpOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<ExpOptions>(end);
+    auto o = ::flatbuffers::Offset<ExpOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<ExpOptions> CreateExpOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<ExpOptions> CreateExpOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   ExpOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<ExpOptions> CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<ExpOptions> CreateExpOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct CosOptionsT : public flatbuffers::NativeTable {
+struct CosOptionsT : public ::flatbuffers::NativeTable {
   typedef CosOptions TableType;
 };
 
-struct CosOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct CosOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef CosOptionsT NativeTableType;
   typedef CosOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  CosOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(CosOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<CosOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  CosOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(CosOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<CosOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct CosOptionsBuilder {
   typedef CosOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit CosOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit CosOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<CosOptions> Finish() {
+  ::flatbuffers::Offset<CosOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<CosOptions>(end);
+    auto o = ::flatbuffers::Offset<CosOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<CosOptions> CreateCosOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<CosOptions> CreateCosOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   CosOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<CosOptions> CreateCosOptions(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<CosOptions> CreateCosOptions(::flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct ReducerOptionsT : public flatbuffers::NativeTable {
+struct ReducerOptionsT : public ::flatbuffers::NativeTable {
   typedef ReducerOptions TableType;
   bool keep_dims = false;
 };
 
-struct ReducerOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct ReducerOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef ReducerOptionsT NativeTableType;
   typedef ReducerOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -7708,97 +10367,97 @@
   bool keep_dims() const {
     return GetField<uint8_t>(VT_KEEP_DIMS, 0) != 0;
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<uint8_t>(verifier, VT_KEEP_DIMS, 1) &&
            verifier.EndTable();
   }
-  ReducerOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(ReducerOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<ReducerOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  ReducerOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(ReducerOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<ReducerOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct ReducerOptionsBuilder {
   typedef ReducerOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_keep_dims(bool keep_dims) {
     fbb_.AddElement<uint8_t>(ReducerOptions::VT_KEEP_DIMS, static_cast<uint8_t>(keep_dims), 0);
   }
-  explicit ReducerOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit ReducerOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<ReducerOptions> Finish() {
+  ::flatbuffers::Offset<ReducerOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<ReducerOptions>(end);
+    auto o = ::flatbuffers::Offset<ReducerOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<ReducerOptions> CreateReducerOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<ReducerOptions> CreateReducerOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     bool keep_dims = false) {
   ReducerOptionsBuilder builder_(_fbb);
   builder_.add_keep_dims(keep_dims);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<ReducerOptions> CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<ReducerOptions> CreateReducerOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct SqueezeOptionsT : public flatbuffers::NativeTable {
+struct SqueezeOptionsT : public ::flatbuffers::NativeTable {
   typedef SqueezeOptions TableType;
   std::vector<int32_t> squeeze_dims{};
 };
 
-struct SqueezeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct SqueezeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef SqueezeOptionsT NativeTableType;
   typedef SqueezeOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
     VT_SQUEEZE_DIMS = 4
   };
-  const flatbuffers::Vector<int32_t> *squeeze_dims() const {
-    return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SQUEEZE_DIMS);
+  const ::flatbuffers::Vector<int32_t> *squeeze_dims() const {
+    return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_SQUEEZE_DIMS);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyOffset(verifier, VT_SQUEEZE_DIMS) &&
            verifier.VerifyVector(squeeze_dims()) &&
            verifier.EndTable();
   }
-  SqueezeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(SqueezeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<SqueezeOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  SqueezeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(SqueezeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<SqueezeOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct SqueezeOptionsBuilder {
   typedef SqueezeOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_squeeze_dims(flatbuffers::Offset<flatbuffers::Vector<int32_t>> squeeze_dims) {
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_squeeze_dims(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> squeeze_dims) {
     fbb_.AddOffset(SqueezeOptions::VT_SQUEEZE_DIMS, squeeze_dims);
   }
-  explicit SqueezeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit SqueezeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<SqueezeOptions> Finish() {
+  ::flatbuffers::Offset<SqueezeOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<SqueezeOptions>(end);
+    auto o = ::flatbuffers::Offset<SqueezeOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<SqueezeOptions> CreateSqueezeOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::Vector<int32_t>> squeeze_dims = 0) {
+inline ::flatbuffers::Offset<SqueezeOptions> CreateSqueezeOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> squeeze_dims = 0) {
   SqueezeOptionsBuilder builder_(_fbb);
   builder_.add_squeeze_dims(squeeze_dims);
   return builder_.Finish();
 }
 
-inline flatbuffers::Offset<SqueezeOptions> CreateSqueezeOptionsDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<SqueezeOptions> CreateSqueezeOptionsDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     const std::vector<int32_t> *squeeze_dims = nullptr) {
   auto squeeze_dims__ = squeeze_dims ? _fbb.CreateVector<int32_t>(*squeeze_dims) : 0;
   return tflite::CreateSqueezeOptions(
@@ -7806,14 +10465,14 @@
       squeeze_dims__);
 }
 
-flatbuffers::Offset<SqueezeOptions> CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<SqueezeOptions> CreateSqueezeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct SplitOptionsT : public flatbuffers::NativeTable {
+struct SplitOptionsT : public ::flatbuffers::NativeTable {
   typedef SplitOptions TableType;
   int32_t num_splits = 0;
 };
 
-struct SplitOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct SplitOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef SplitOptionsT NativeTableType;
   typedef SplitOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -7822,50 +10481,50 @@
   int32_t num_splits() const {
     return GetField<int32_t>(VT_NUM_SPLITS, 0);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int32_t>(verifier, VT_NUM_SPLITS, 4) &&
            verifier.EndTable();
   }
-  SplitOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(SplitOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<SplitOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  SplitOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(SplitOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<SplitOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct SplitOptionsBuilder {
   typedef SplitOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_num_splits(int32_t num_splits) {
     fbb_.AddElement<int32_t>(SplitOptions::VT_NUM_SPLITS, num_splits, 0);
   }
-  explicit SplitOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit SplitOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<SplitOptions> Finish() {
+  ::flatbuffers::Offset<SplitOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<SplitOptions>(end);
+    auto o = ::flatbuffers::Offset<SplitOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<SplitOptions> CreateSplitOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<SplitOptions> CreateSplitOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int32_t num_splits = 0) {
   SplitOptionsBuilder builder_(_fbb);
   builder_.add_num_splits(num_splits);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<SplitOptions> CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<SplitOptions> CreateSplitOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct SplitVOptionsT : public flatbuffers::NativeTable {
+struct SplitVOptionsT : public ::flatbuffers::NativeTable {
   typedef SplitVOptions TableType;
   int32_t num_splits = 0;
 };
 
-struct SplitVOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct SplitVOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef SplitVOptionsT NativeTableType;
   typedef SplitVOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -7874,45 +10533,45 @@
   int32_t num_splits() const {
     return GetField<int32_t>(VT_NUM_SPLITS, 0);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int32_t>(verifier, VT_NUM_SPLITS, 4) &&
            verifier.EndTable();
   }
-  SplitVOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(SplitVOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<SplitVOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  SplitVOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(SplitVOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<SplitVOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct SplitVOptionsBuilder {
   typedef SplitVOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_num_splits(int32_t num_splits) {
     fbb_.AddElement<int32_t>(SplitVOptions::VT_NUM_SPLITS, num_splits, 0);
   }
-  explicit SplitVOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit SplitVOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<SplitVOptions> Finish() {
+  ::flatbuffers::Offset<SplitVOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<SplitVOptions>(end);
+    auto o = ::flatbuffers::Offset<SplitVOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<SplitVOptions> CreateSplitVOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<SplitVOptions> CreateSplitVOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int32_t num_splits = 0) {
   SplitVOptionsBuilder builder_(_fbb);
   builder_.add_num_splits(num_splits);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<SplitVOptions> CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<SplitVOptions> CreateSplitVOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct StridedSliceOptionsT : public flatbuffers::NativeTable {
+struct StridedSliceOptionsT : public ::flatbuffers::NativeTable {
   typedef StridedSliceOptions TableType;
   int32_t begin_mask = 0;
   int32_t end_mask = 0;
@@ -7922,7 +10581,7 @@
   bool offset = false;
 };
 
-struct StridedSliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct StridedSliceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef StridedSliceOptionsT NativeTableType;
   typedef StridedSliceOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -7951,7 +10610,7 @@
   bool offset() const {
     return GetField<uint8_t>(VT_OFFSET, 0) != 0;
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int32_t>(verifier, VT_BEGIN_MASK, 4) &&
            VerifyField<int32_t>(verifier, VT_END_MASK, 4) &&
@@ -7961,15 +10620,15 @@
            VerifyField<uint8_t>(verifier, VT_OFFSET, 1) &&
            verifier.EndTable();
   }
-  StridedSliceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(StridedSliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<StridedSliceOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  StridedSliceOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(StridedSliceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<StridedSliceOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct StridedSliceOptionsBuilder {
   typedef StridedSliceOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_begin_mask(int32_t begin_mask) {
     fbb_.AddElement<int32_t>(StridedSliceOptions::VT_BEGIN_MASK, begin_mask, 0);
   }
@@ -7988,19 +10647,19 @@
   void add_offset(bool offset) {
     fbb_.AddElement<uint8_t>(StridedSliceOptions::VT_OFFSET, static_cast<uint8_t>(offset), 0);
   }
-  explicit StridedSliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit StridedSliceOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<StridedSliceOptions> Finish() {
+  ::flatbuffers::Offset<StridedSliceOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<StridedSliceOptions>(end);
+    auto o = ::flatbuffers::Offset<StridedSliceOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<StridedSliceOptions> CreateStridedSliceOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<StridedSliceOptions> CreateStridedSliceOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int32_t begin_mask = 0,
     int32_t end_mask = 0,
     int32_t ellipsis_mask = 0,
@@ -8017,54 +10676,54 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<StridedSliceOptions> CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<StridedSliceOptions> CreateStridedSliceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct LogSoftmaxOptionsT : public flatbuffers::NativeTable {
+struct LogSoftmaxOptionsT : public ::flatbuffers::NativeTable {
   typedef LogSoftmaxOptions TableType;
 };
 
-struct LogSoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct LogSoftmaxOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef LogSoftmaxOptionsT NativeTableType;
   typedef LogSoftmaxOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  LogSoftmaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(LogSoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<LogSoftmaxOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  LogSoftmaxOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(LogSoftmaxOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<LogSoftmaxOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct LogSoftmaxOptionsBuilder {
   typedef LogSoftmaxOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit LogSoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit LogSoftmaxOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<LogSoftmaxOptions> Finish() {
+  ::flatbuffers::Offset<LogSoftmaxOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<LogSoftmaxOptions>(end);
+    auto o = ::flatbuffers::Offset<LogSoftmaxOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<LogSoftmaxOptions> CreateLogSoftmaxOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<LogSoftmaxOptions> CreateLogSoftmaxOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   LogSoftmaxOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<LogSoftmaxOptions> CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<LogSoftmaxOptions> CreateLogSoftmaxOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct CastOptionsT : public flatbuffers::NativeTable {
+struct CastOptionsT : public ::flatbuffers::NativeTable {
   typedef CastOptions TableType;
   tflite::TensorType in_data_type = tflite::TensorType_FLOAT32;
   tflite::TensorType out_data_type = tflite::TensorType_FLOAT32;
 };
 
-struct CastOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct CastOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef CastOptionsT NativeTableType;
   typedef CastOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -8077,40 +10736,40 @@
   tflite::TensorType out_data_type() const {
     return static_cast<tflite::TensorType>(GetField<int8_t>(VT_OUT_DATA_TYPE, 0));
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_IN_DATA_TYPE, 1) &&
            VerifyField<int8_t>(verifier, VT_OUT_DATA_TYPE, 1) &&
            verifier.EndTable();
   }
-  CastOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(CastOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<CastOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  CastOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(CastOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<CastOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct CastOptionsBuilder {
   typedef CastOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_in_data_type(tflite::TensorType in_data_type) {
     fbb_.AddElement<int8_t>(CastOptions::VT_IN_DATA_TYPE, static_cast<int8_t>(in_data_type), 0);
   }
   void add_out_data_type(tflite::TensorType out_data_type) {
     fbb_.AddElement<int8_t>(CastOptions::VT_OUT_DATA_TYPE, static_cast<int8_t>(out_data_type), 0);
   }
-  explicit CastOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit CastOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<CastOptions> Finish() {
+  ::flatbuffers::Offset<CastOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<CastOptions>(end);
+    auto o = ::flatbuffers::Offset<CastOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<CastOptions> CreateCastOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<CastOptions> CreateCastOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::TensorType in_data_type = tflite::TensorType_FLOAT32,
     tflite::TensorType out_data_type = tflite::TensorType_FLOAT32) {
   CastOptionsBuilder builder_(_fbb);
@@ -8119,131 +10778,131 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<CastOptions> CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<CastOptions> CreateCastOptions(::flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct DequantizeOptionsT : public flatbuffers::NativeTable {
+struct DequantizeOptionsT : public ::flatbuffers::NativeTable {
   typedef DequantizeOptions TableType;
 };
 
-struct DequantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct DequantizeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef DequantizeOptionsT NativeTableType;
   typedef DequantizeOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  DequantizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(DequantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<DequantizeOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  DequantizeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(DequantizeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<DequantizeOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct DequantizeOptionsBuilder {
   typedef DequantizeOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit DequantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit DequantizeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<DequantizeOptions> Finish() {
+  ::flatbuffers::Offset<DequantizeOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<DequantizeOptions>(end);
+    auto o = ::flatbuffers::Offset<DequantizeOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<DequantizeOptions> CreateDequantizeOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<DequantizeOptions> CreateDequantizeOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   DequantizeOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<DequantizeOptions> CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<DequantizeOptions> CreateDequantizeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct MaximumMinimumOptionsT : public flatbuffers::NativeTable {
+struct MaximumMinimumOptionsT : public ::flatbuffers::NativeTable {
   typedef MaximumMinimumOptions TableType;
 };
 
-struct MaximumMinimumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct MaximumMinimumOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef MaximumMinimumOptionsT NativeTableType;
   typedef MaximumMinimumOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  MaximumMinimumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(MaximumMinimumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<MaximumMinimumOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  MaximumMinimumOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(MaximumMinimumOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<MaximumMinimumOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct MaximumMinimumOptionsBuilder {
   typedef MaximumMinimumOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit MaximumMinimumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit MaximumMinimumOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<MaximumMinimumOptions> Finish() {
+  ::flatbuffers::Offset<MaximumMinimumOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<MaximumMinimumOptions>(end);
+    auto o = ::flatbuffers::Offset<MaximumMinimumOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<MaximumMinimumOptions> CreateMaximumMinimumOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<MaximumMinimumOptions> CreateMaximumMinimumOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   MaximumMinimumOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<MaximumMinimumOptions> CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<MaximumMinimumOptions> CreateMaximumMinimumOptions(::flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct TileOptionsT : public flatbuffers::NativeTable {
+struct TileOptionsT : public ::flatbuffers::NativeTable {
   typedef TileOptions TableType;
 };
 
-struct TileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct TileOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef TileOptionsT NativeTableType;
   typedef TileOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  TileOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(TileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<TileOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  TileOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(TileOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<TileOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct TileOptionsBuilder {
   typedef TileOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit TileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit TileOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<TileOptions> Finish() {
+  ::flatbuffers::Offset<TileOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<TileOptions>(end);
+    auto o = ::flatbuffers::Offset<TileOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<TileOptions> CreateTileOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<TileOptions> CreateTileOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   TileOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<TileOptions> CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<TileOptions> CreateTileOptions(::flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct ArgMaxOptionsT : public flatbuffers::NativeTable {
+struct ArgMaxOptionsT : public ::flatbuffers::NativeTable {
   typedef ArgMaxOptions TableType;
   tflite::TensorType output_type = tflite::TensorType_FLOAT32;
 };
 
-struct ArgMaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct ArgMaxOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef ArgMaxOptionsT NativeTableType;
   typedef ArgMaxOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -8252,50 +10911,50 @@
   tflite::TensorType output_type() const {
     return static_cast<tflite::TensorType>(GetField<int8_t>(VT_OUTPUT_TYPE, 0));
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_OUTPUT_TYPE, 1) &&
            verifier.EndTable();
   }
-  ArgMaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(ArgMaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<ArgMaxOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  ArgMaxOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(ArgMaxOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<ArgMaxOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct ArgMaxOptionsBuilder {
   typedef ArgMaxOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_output_type(tflite::TensorType output_type) {
     fbb_.AddElement<int8_t>(ArgMaxOptions::VT_OUTPUT_TYPE, static_cast<int8_t>(output_type), 0);
   }
-  explicit ArgMaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit ArgMaxOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<ArgMaxOptions> Finish() {
+  ::flatbuffers::Offset<ArgMaxOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<ArgMaxOptions>(end);
+    auto o = ::flatbuffers::Offset<ArgMaxOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<ArgMaxOptions> CreateArgMaxOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<ArgMaxOptions> CreateArgMaxOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::TensorType output_type = tflite::TensorType_FLOAT32) {
   ArgMaxOptionsBuilder builder_(_fbb);
   builder_.add_output_type(output_type);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<ArgMaxOptions> CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<ArgMaxOptions> CreateArgMaxOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct ArgMinOptionsT : public flatbuffers::NativeTable {
+struct ArgMinOptionsT : public ::flatbuffers::NativeTable {
   typedef ArgMinOptions TableType;
   tflite::TensorType output_type = tflite::TensorType_FLOAT32;
 };
 
-struct ArgMinOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct ArgMinOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef ArgMinOptionsT NativeTableType;
   typedef ArgMinOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -8304,333 +10963,335 @@
   tflite::TensorType output_type() const {
     return static_cast<tflite::TensorType>(GetField<int8_t>(VT_OUTPUT_TYPE, 0));
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_OUTPUT_TYPE, 1) &&
            verifier.EndTable();
   }
-  ArgMinOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(ArgMinOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<ArgMinOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  ArgMinOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(ArgMinOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<ArgMinOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct ArgMinOptionsBuilder {
   typedef ArgMinOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_output_type(tflite::TensorType output_type) {
     fbb_.AddElement<int8_t>(ArgMinOptions::VT_OUTPUT_TYPE, static_cast<int8_t>(output_type), 0);
   }
-  explicit ArgMinOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit ArgMinOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<ArgMinOptions> Finish() {
+  ::flatbuffers::Offset<ArgMinOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<ArgMinOptions>(end);
+    auto o = ::flatbuffers::Offset<ArgMinOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<ArgMinOptions> CreateArgMinOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<ArgMinOptions> CreateArgMinOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::TensorType output_type = tflite::TensorType_FLOAT32) {
   ArgMinOptionsBuilder builder_(_fbb);
   builder_.add_output_type(output_type);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<ArgMinOptions> CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<ArgMinOptions> CreateArgMinOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct GreaterOptionsT : public flatbuffers::NativeTable {
+struct GreaterOptionsT : public ::flatbuffers::NativeTable {
   typedef GreaterOptions TableType;
 };
 
-struct GreaterOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct GreaterOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef GreaterOptionsT NativeTableType;
   typedef GreaterOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  GreaterOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(GreaterOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<GreaterOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  GreaterOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(GreaterOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<GreaterOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct GreaterOptionsBuilder {
   typedef GreaterOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit GreaterOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit GreaterOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<GreaterOptions> Finish() {
+  ::flatbuffers::Offset<GreaterOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<GreaterOptions>(end);
+    auto o = ::flatbuffers::Offset<GreaterOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<GreaterOptions> CreateGreaterOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<GreaterOptions> CreateGreaterOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   GreaterOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<GreaterOptions> CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<GreaterOptions> CreateGreaterOptions(::flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct GreaterEqualOptionsT : public flatbuffers::NativeTable {
+struct GreaterEqualOptionsT : public ::flatbuffers::NativeTable {
   typedef GreaterEqualOptions TableType;
 };
 
-struct GreaterEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct GreaterEqualOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef GreaterEqualOptionsT NativeTableType;
   typedef GreaterEqualOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  GreaterEqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(GreaterEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<GreaterEqualOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  GreaterEqualOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(GreaterEqualOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<GreaterEqualOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct GreaterEqualOptionsBuilder {
   typedef GreaterEqualOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit GreaterEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit GreaterEqualOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<GreaterEqualOptions> Finish() {
+  ::flatbuffers::Offset<GreaterEqualOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<GreaterEqualOptions>(end);
+    auto o = ::flatbuffers::Offset<GreaterEqualOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<GreaterEqualOptions> CreateGreaterEqualOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<GreaterEqualOptions> CreateGreaterEqualOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   GreaterEqualOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<GreaterEqualOptions> CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<GreaterEqualOptions> CreateGreaterEqualOptions(::flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct LessOptionsT : public flatbuffers::NativeTable {
+struct LessOptionsT : public ::flatbuffers::NativeTable {
   typedef LessOptions TableType;
 };
 
-struct LessOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct LessOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef LessOptionsT NativeTableType;
   typedef LessOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  LessOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(LessOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<LessOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  LessOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(LessOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<LessOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct LessOptionsBuilder {
   typedef LessOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit LessOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit LessOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<LessOptions> Finish() {
+  ::flatbuffers::Offset<LessOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<LessOptions>(end);
+    auto o = ::flatbuffers::Offset<LessOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<LessOptions> CreateLessOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<LessOptions> CreateLessOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   LessOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<LessOptions> CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<LessOptions> CreateLessOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct LessEqualOptionsT : public flatbuffers::NativeTable {
+struct LessEqualOptionsT : public ::flatbuffers::NativeTable {
   typedef LessEqualOptions TableType;
 };
 
-struct LessEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct LessEqualOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef LessEqualOptionsT NativeTableType;
   typedef LessEqualOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  LessEqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(LessEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<LessEqualOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  LessEqualOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(LessEqualOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<LessEqualOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct LessEqualOptionsBuilder {
   typedef LessEqualOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit LessEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit LessEqualOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<LessEqualOptions> Finish() {
+  ::flatbuffers::Offset<LessEqualOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<LessEqualOptions>(end);
+    auto o = ::flatbuffers::Offset<LessEqualOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<LessEqualOptions> CreateLessEqualOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<LessEqualOptions> CreateLessEqualOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   LessEqualOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<LessEqualOptions> CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<LessEqualOptions> CreateLessEqualOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct NegOptionsT : public flatbuffers::NativeTable {
+struct NegOptionsT : public ::flatbuffers::NativeTable {
   typedef NegOptions TableType;
 };
 
-struct NegOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct NegOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef NegOptionsT NativeTableType;
   typedef NegOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  NegOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(NegOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<NegOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  NegOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(NegOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<NegOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct NegOptionsBuilder {
   typedef NegOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit NegOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit NegOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<NegOptions> Finish() {
+  ::flatbuffers::Offset<NegOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<NegOptions>(end);
+    auto o = ::flatbuffers::Offset<NegOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<NegOptions> CreateNegOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<NegOptions> CreateNegOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   NegOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<NegOptions> CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<NegOptions> CreateNegOptions(::flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct SelectOptionsT : public flatbuffers::NativeTable {
+struct SelectOptionsT : public ::flatbuffers::NativeTable {
   typedef SelectOptions TableType;
 };
 
-struct SelectOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct SelectOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef SelectOptionsT NativeTableType;
   typedef SelectOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  SelectOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(SelectOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<SelectOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  SelectOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(SelectOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<SelectOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct SelectOptionsBuilder {
   typedef SelectOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit SelectOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit SelectOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<SelectOptions> Finish() {
+  ::flatbuffers::Offset<SelectOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<SelectOptions>(end);
+    auto o = ::flatbuffers::Offset<SelectOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<SelectOptions> CreateSelectOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<SelectOptions> CreateSelectOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   SelectOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<SelectOptions> CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<SelectOptions> CreateSelectOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct SliceOptionsT : public flatbuffers::NativeTable {
+struct SliceOptionsT : public ::flatbuffers::NativeTable {
   typedef SliceOptions TableType;
 };
 
-struct SliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct SliceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef SliceOptionsT NativeTableType;
   typedef SliceOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  SliceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(SliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<SliceOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  SliceOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(SliceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<SliceOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct SliceOptionsBuilder {
   typedef SliceOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit SliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit SliceOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<SliceOptions> Finish() {
+  ::flatbuffers::Offset<SliceOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<SliceOptions>(end);
+    auto o = ::flatbuffers::Offset<SliceOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<SliceOptions> CreateSliceOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<SliceOptions> CreateSliceOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   SliceOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<SliceOptions> CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<SliceOptions> CreateSliceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct TransposeConvOptionsT : public flatbuffers::NativeTable {
+struct TransposeConvOptionsT : public ::flatbuffers::NativeTable {
   typedef TransposeConvOptions TableType;
   tflite::Padding padding = tflite::Padding_SAME;
   int32_t stride_w = 0;
   int32_t stride_h = 0;
   tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE;
+  tflite::TensorType quantized_bias_type = tflite::TensorType_FLOAT32;
 };
 
-struct TransposeConvOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct TransposeConvOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef TransposeConvOptionsT NativeTableType;
   typedef TransposeConvOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
     VT_PADDING = 4,
     VT_STRIDE_W = 6,
     VT_STRIDE_H = 8,
-    VT_FUSED_ACTIVATION_FUNCTION = 10
+    VT_FUSED_ACTIVATION_FUNCTION = 10,
+    VT_QUANTIZED_BIAS_TYPE = 12
   };
   tflite::Padding padding() const {
     return static_cast<tflite::Padding>(GetField<int8_t>(VT_PADDING, 0));
@@ -8644,23 +11305,27 @@
   tflite::ActivationFunctionType fused_activation_function() const {
     return static_cast<tflite::ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  tflite::TensorType quantized_bias_type() const {
+    return static_cast<tflite::TensorType>(GetField<int8_t>(VT_QUANTIZED_BIAS_TYPE, 0));
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_PADDING, 1) &&
            VerifyField<int32_t>(verifier, VT_STRIDE_W, 4) &&
            VerifyField<int32_t>(verifier, VT_STRIDE_H, 4) &&
            VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) &&
+           VerifyField<int8_t>(verifier, VT_QUANTIZED_BIAS_TYPE, 1) &&
            verifier.EndTable();
   }
-  TransposeConvOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(TransposeConvOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<TransposeConvOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  TransposeConvOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(TransposeConvOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<TransposeConvOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct TransposeConvOptionsBuilder {
   typedef TransposeConvOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_padding(tflite::Padding padding) {
     fbb_.AddElement<int8_t>(TransposeConvOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
   }
@@ -8673,78 +11338,83 @@
   void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) {
     fbb_.AddElement<int8_t>(TransposeConvOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
   }
-  explicit TransposeConvOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  void add_quantized_bias_type(tflite::TensorType quantized_bias_type) {
+    fbb_.AddElement<int8_t>(TransposeConvOptions::VT_QUANTIZED_BIAS_TYPE, static_cast<int8_t>(quantized_bias_type), 0);
+  }
+  explicit TransposeConvOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<TransposeConvOptions> Finish() {
+  ::flatbuffers::Offset<TransposeConvOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<TransposeConvOptions>(end);
+    auto o = ::flatbuffers::Offset<TransposeConvOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<TransposeConvOptions> CreateTransposeConvOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<TransposeConvOptions> CreateTransposeConvOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::Padding padding = tflite::Padding_SAME,
     int32_t stride_w = 0,
     int32_t stride_h = 0,
-    tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) {
+    tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE,
+    tflite::TensorType quantized_bias_type = tflite::TensorType_FLOAT32) {
   TransposeConvOptionsBuilder builder_(_fbb);
   builder_.add_stride_h(stride_h);
   builder_.add_stride_w(stride_w);
+  builder_.add_quantized_bias_type(quantized_bias_type);
   builder_.add_fused_activation_function(fused_activation_function);
   builder_.add_padding(padding);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<TransposeConvOptions> CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<TransposeConvOptions> CreateTransposeConvOptions(::flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct ExpandDimsOptionsT : public flatbuffers::NativeTable {
+struct ExpandDimsOptionsT : public ::flatbuffers::NativeTable {
   typedef ExpandDimsOptions TableType;
 };
 
-struct ExpandDimsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct ExpandDimsOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef ExpandDimsOptionsT NativeTableType;
   typedef ExpandDimsOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  ExpandDimsOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(ExpandDimsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<ExpandDimsOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  ExpandDimsOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(ExpandDimsOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<ExpandDimsOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct ExpandDimsOptionsBuilder {
   typedef ExpandDimsOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit ExpandDimsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit ExpandDimsOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<ExpandDimsOptions> Finish() {
+  ::flatbuffers::Offset<ExpandDimsOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<ExpandDimsOptions>(end);
+    auto o = ::flatbuffers::Offset<ExpandDimsOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<ExpandDimsOptions> CreateExpandDimsOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<ExpandDimsOptions> CreateExpandDimsOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   ExpandDimsOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<ExpandDimsOptions> CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<ExpandDimsOptions> CreateExpandDimsOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct SparseToDenseOptionsT : public flatbuffers::NativeTable {
+struct SparseToDenseOptionsT : public ::flatbuffers::NativeTable {
   typedef SparseToDenseOptions TableType;
   bool validate_indices = false;
 };
 
-struct SparseToDenseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct SparseToDenseOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef SparseToDenseOptionsT NativeTableType;
   typedef SparseToDenseOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -8753,128 +11423,128 @@
   bool validate_indices() const {
     return GetField<uint8_t>(VT_VALIDATE_INDICES, 0) != 0;
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<uint8_t>(verifier, VT_VALIDATE_INDICES, 1) &&
            verifier.EndTable();
   }
-  SparseToDenseOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(SparseToDenseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<SparseToDenseOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  SparseToDenseOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(SparseToDenseOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<SparseToDenseOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct SparseToDenseOptionsBuilder {
   typedef SparseToDenseOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_validate_indices(bool validate_indices) {
     fbb_.AddElement<uint8_t>(SparseToDenseOptions::VT_VALIDATE_INDICES, static_cast<uint8_t>(validate_indices), 0);
   }
-  explicit SparseToDenseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit SparseToDenseOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<SparseToDenseOptions> Finish() {
+  ::flatbuffers::Offset<SparseToDenseOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<SparseToDenseOptions>(end);
+    auto o = ::flatbuffers::Offset<SparseToDenseOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<SparseToDenseOptions> CreateSparseToDenseOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<SparseToDenseOptions> CreateSparseToDenseOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     bool validate_indices = false) {
   SparseToDenseOptionsBuilder builder_(_fbb);
   builder_.add_validate_indices(validate_indices);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<SparseToDenseOptions> CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<SparseToDenseOptions> CreateSparseToDenseOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct EqualOptionsT : public flatbuffers::NativeTable {
+struct EqualOptionsT : public ::flatbuffers::NativeTable {
   typedef EqualOptions TableType;
 };
 
-struct EqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct EqualOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef EqualOptionsT NativeTableType;
   typedef EqualOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  EqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(EqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<EqualOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  EqualOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(EqualOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<EqualOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct EqualOptionsBuilder {
   typedef EqualOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit EqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit EqualOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<EqualOptions> Finish() {
+  ::flatbuffers::Offset<EqualOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<EqualOptions>(end);
+    auto o = ::flatbuffers::Offset<EqualOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<EqualOptions> CreateEqualOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<EqualOptions> CreateEqualOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   EqualOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<EqualOptions> CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<EqualOptions> CreateEqualOptions(::flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct NotEqualOptionsT : public flatbuffers::NativeTable {
+struct NotEqualOptionsT : public ::flatbuffers::NativeTable {
   typedef NotEqualOptions TableType;
 };
 
-struct NotEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct NotEqualOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef NotEqualOptionsT NativeTableType;
   typedef NotEqualOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  NotEqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(NotEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<NotEqualOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  NotEqualOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(NotEqualOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<NotEqualOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct NotEqualOptionsBuilder {
   typedef NotEqualOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit NotEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit NotEqualOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<NotEqualOptions> Finish() {
+  ::flatbuffers::Offset<NotEqualOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<NotEqualOptions>(end);
+    auto o = ::flatbuffers::Offset<NotEqualOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<NotEqualOptions> CreateNotEqualOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<NotEqualOptions> CreateNotEqualOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   NotEqualOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<NotEqualOptions> CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<NotEqualOptions> CreateNotEqualOptions(::flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct ShapeOptionsT : public flatbuffers::NativeTable {
+struct ShapeOptionsT : public ::flatbuffers::NativeTable {
   typedef ShapeOptions TableType;
   tflite::TensorType out_type = tflite::TensorType_FLOAT32;
 };
 
-struct ShapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct ShapeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef ShapeOptionsT NativeTableType;
   typedef ShapeOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -8883,123 +11553,123 @@
   tflite::TensorType out_type() const {
     return static_cast<tflite::TensorType>(GetField<int8_t>(VT_OUT_TYPE, 0));
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_OUT_TYPE, 1) &&
            verifier.EndTable();
   }
-  ShapeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(ShapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<ShapeOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  ShapeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(ShapeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<ShapeOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct ShapeOptionsBuilder {
   typedef ShapeOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_out_type(tflite::TensorType out_type) {
     fbb_.AddElement<int8_t>(ShapeOptions::VT_OUT_TYPE, static_cast<int8_t>(out_type), 0);
   }
-  explicit ShapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit ShapeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<ShapeOptions> Finish() {
+  ::flatbuffers::Offset<ShapeOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<ShapeOptions>(end);
+    auto o = ::flatbuffers::Offset<ShapeOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<ShapeOptions> CreateShapeOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<ShapeOptions> CreateShapeOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::TensorType out_type = tflite::TensorType_FLOAT32) {
   ShapeOptionsBuilder builder_(_fbb);
   builder_.add_out_type(out_type);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<ShapeOptions> CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<ShapeOptions> CreateShapeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct RankOptionsT : public flatbuffers::NativeTable {
+struct RankOptionsT : public ::flatbuffers::NativeTable {
   typedef RankOptions TableType;
 };
 
-struct RankOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct RankOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef RankOptionsT NativeTableType;
   typedef RankOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  RankOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(RankOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<RankOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  RankOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(RankOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<RankOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct RankOptionsBuilder {
   typedef RankOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit RankOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit RankOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<RankOptions> Finish() {
+  ::flatbuffers::Offset<RankOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<RankOptions>(end);
+    auto o = ::flatbuffers::Offset<RankOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<RankOptions> CreateRankOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<RankOptions> CreateRankOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   RankOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<RankOptions> CreateRankOptions(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<RankOptions> CreateRankOptions(::flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct PowOptionsT : public flatbuffers::NativeTable {
+struct PowOptionsT : public ::flatbuffers::NativeTable {
   typedef PowOptions TableType;
 };
 
-struct PowOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct PowOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef PowOptionsT NativeTableType;
   typedef PowOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  PowOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(PowOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<PowOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  PowOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(PowOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<PowOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct PowOptionsBuilder {
   typedef PowOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit PowOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit PowOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<PowOptions> Finish() {
+  ::flatbuffers::Offset<PowOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<PowOptions>(end);
+    auto o = ::flatbuffers::Offset<PowOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<PowOptions> CreatePowOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<PowOptions> CreatePowOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   PowOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<PowOptions> CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<PowOptions> CreatePowOptions(::flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct FakeQuantOptionsT : public flatbuffers::NativeTable {
+struct FakeQuantOptionsT : public ::flatbuffers::NativeTable {
   typedef FakeQuantOptions TableType;
   float min = 0.0f;
   float max = 0.0f;
@@ -9007,7 +11677,7 @@
   bool narrow_range = false;
 };
 
-struct FakeQuantOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct FakeQuantOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef FakeQuantOptionsT NativeTableType;
   typedef FakeQuantOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -9028,7 +11698,7 @@
   bool narrow_range() const {
     return GetField<uint8_t>(VT_NARROW_RANGE, 0) != 0;
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<float>(verifier, VT_MIN, 4) &&
            VerifyField<float>(verifier, VT_MAX, 4) &&
@@ -9036,15 +11706,15 @@
            VerifyField<uint8_t>(verifier, VT_NARROW_RANGE, 1) &&
            verifier.EndTable();
   }
-  FakeQuantOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(FakeQuantOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<FakeQuantOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  FakeQuantOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(FakeQuantOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<FakeQuantOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct FakeQuantOptionsBuilder {
   typedef FakeQuantOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_min(float min) {
     fbb_.AddElement<float>(FakeQuantOptions::VT_MIN, min, 0.0f);
   }
@@ -9057,19 +11727,19 @@
   void add_narrow_range(bool narrow_range) {
     fbb_.AddElement<uint8_t>(FakeQuantOptions::VT_NARROW_RANGE, static_cast<uint8_t>(narrow_range), 0);
   }
-  explicit FakeQuantOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit FakeQuantOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<FakeQuantOptions> Finish() {
+  ::flatbuffers::Offset<FakeQuantOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<FakeQuantOptions>(end);
+    auto o = ::flatbuffers::Offset<FakeQuantOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<FakeQuantOptions> CreateFakeQuantOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<FakeQuantOptions> CreateFakeQuantOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     float min = 0.0f,
     float max = 0.0f,
     int32_t num_bits = 0,
@@ -9082,15 +11752,15 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<FakeQuantOptions> CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<FakeQuantOptions> CreateFakeQuantOptions(::flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct PackOptionsT : public flatbuffers::NativeTable {
+struct PackOptionsT : public ::flatbuffers::NativeTable {
   typedef PackOptions TableType;
   int32_t values_count = 0;
   int32_t axis = 0;
 };
 
-struct PackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct PackOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef PackOptionsT NativeTableType;
   typedef PackOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -9103,40 +11773,40 @@
   int32_t axis() const {
     return GetField<int32_t>(VT_AXIS, 0);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int32_t>(verifier, VT_VALUES_COUNT, 4) &&
            VerifyField<int32_t>(verifier, VT_AXIS, 4) &&
            verifier.EndTable();
   }
-  PackOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(PackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<PackOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  PackOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(PackOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<PackOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct PackOptionsBuilder {
   typedef PackOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_values_count(int32_t values_count) {
     fbb_.AddElement<int32_t>(PackOptions::VT_VALUES_COUNT, values_count, 0);
   }
   void add_axis(int32_t axis) {
     fbb_.AddElement<int32_t>(PackOptions::VT_AXIS, axis, 0);
   }
-  explicit PackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit PackOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<PackOptions> Finish() {
+  ::flatbuffers::Offset<PackOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<PackOptions>(end);
+    auto o = ::flatbuffers::Offset<PackOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<PackOptions> CreatePackOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<PackOptions> CreatePackOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int32_t values_count = 0,
     int32_t axis = 0) {
   PackOptionsBuilder builder_(_fbb);
@@ -9145,53 +11815,53 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<PackOptions> CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<PackOptions> CreatePackOptions(::flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct LogicalOrOptionsT : public flatbuffers::NativeTable {
+struct LogicalOrOptionsT : public ::flatbuffers::NativeTable {
   typedef LogicalOrOptions TableType;
 };
 
-struct LogicalOrOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct LogicalOrOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef LogicalOrOptionsT NativeTableType;
   typedef LogicalOrOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  LogicalOrOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(LogicalOrOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<LogicalOrOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  LogicalOrOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(LogicalOrOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<LogicalOrOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct LogicalOrOptionsBuilder {
   typedef LogicalOrOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit LogicalOrOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit LogicalOrOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<LogicalOrOptions> Finish() {
+  ::flatbuffers::Offset<LogicalOrOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<LogicalOrOptions>(end);
+    auto o = ::flatbuffers::Offset<LogicalOrOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<LogicalOrOptions> CreateLogicalOrOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<LogicalOrOptions> CreateLogicalOrOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   LogicalOrOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<LogicalOrOptions> CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<LogicalOrOptions> CreateLogicalOrOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct OneHotOptionsT : public flatbuffers::NativeTable {
+struct OneHotOptionsT : public ::flatbuffers::NativeTable {
   typedef OneHotOptions TableType;
   int32_t axis = 0;
 };
 
-struct OneHotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct OneHotOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef OneHotOptionsT NativeTableType;
   typedef OneHotOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -9200,207 +11870,207 @@
   int32_t axis() const {
     return GetField<int32_t>(VT_AXIS, 0);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int32_t>(verifier, VT_AXIS, 4) &&
            verifier.EndTable();
   }
-  OneHotOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(OneHotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<OneHotOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  OneHotOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(OneHotOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<OneHotOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct OneHotOptionsBuilder {
   typedef OneHotOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_axis(int32_t axis) {
     fbb_.AddElement<int32_t>(OneHotOptions::VT_AXIS, axis, 0);
   }
-  explicit OneHotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit OneHotOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<OneHotOptions> Finish() {
+  ::flatbuffers::Offset<OneHotOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<OneHotOptions>(end);
+    auto o = ::flatbuffers::Offset<OneHotOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<OneHotOptions> CreateOneHotOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<OneHotOptions> CreateOneHotOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int32_t axis = 0) {
   OneHotOptionsBuilder builder_(_fbb);
   builder_.add_axis(axis);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<OneHotOptions> CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<OneHotOptions> CreateOneHotOptions(::flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct AbsOptionsT : public flatbuffers::NativeTable {
+struct AbsOptionsT : public ::flatbuffers::NativeTable {
   typedef AbsOptions TableType;
 };
 
-struct AbsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct AbsOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef AbsOptionsT NativeTableType;
   typedef AbsOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  AbsOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(AbsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<AbsOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  AbsOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(AbsOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<AbsOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct AbsOptionsBuilder {
   typedef AbsOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit AbsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit AbsOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<AbsOptions> Finish() {
+  ::flatbuffers::Offset<AbsOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<AbsOptions>(end);
+    auto o = ::flatbuffers::Offset<AbsOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<AbsOptions> CreateAbsOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<AbsOptions> CreateAbsOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   AbsOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<AbsOptions> CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<AbsOptions> CreateAbsOptions(::flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct HardSwishOptionsT : public flatbuffers::NativeTable {
+struct HardSwishOptionsT : public ::flatbuffers::NativeTable {
   typedef HardSwishOptions TableType;
 };
 
-struct HardSwishOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct HardSwishOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef HardSwishOptionsT NativeTableType;
   typedef HardSwishOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  HardSwishOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(HardSwishOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<HardSwishOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  HardSwishOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(HardSwishOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<HardSwishOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct HardSwishOptionsBuilder {
   typedef HardSwishOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit HardSwishOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit HardSwishOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<HardSwishOptions> Finish() {
+  ::flatbuffers::Offset<HardSwishOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<HardSwishOptions>(end);
+    auto o = ::flatbuffers::Offset<HardSwishOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<HardSwishOptions> CreateHardSwishOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<HardSwishOptions> CreateHardSwishOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   HardSwishOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<HardSwishOptions> CreateHardSwishOptions(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<HardSwishOptions> CreateHardSwishOptions(::flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct LogicalAndOptionsT : public flatbuffers::NativeTable {
+struct LogicalAndOptionsT : public ::flatbuffers::NativeTable {
   typedef LogicalAndOptions TableType;
 };
 
-struct LogicalAndOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct LogicalAndOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef LogicalAndOptionsT NativeTableType;
   typedef LogicalAndOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  LogicalAndOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(LogicalAndOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<LogicalAndOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  LogicalAndOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(LogicalAndOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<LogicalAndOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct LogicalAndOptionsBuilder {
   typedef LogicalAndOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit LogicalAndOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit LogicalAndOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<LogicalAndOptions> Finish() {
+  ::flatbuffers::Offset<LogicalAndOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<LogicalAndOptions>(end);
+    auto o = ::flatbuffers::Offset<LogicalAndOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<LogicalAndOptions> CreateLogicalAndOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<LogicalAndOptions> CreateLogicalAndOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   LogicalAndOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<LogicalAndOptions> CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<LogicalAndOptions> CreateLogicalAndOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct LogicalNotOptionsT : public flatbuffers::NativeTable {
+struct LogicalNotOptionsT : public ::flatbuffers::NativeTable {
   typedef LogicalNotOptions TableType;
 };
 
-struct LogicalNotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct LogicalNotOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef LogicalNotOptionsT NativeTableType;
   typedef LogicalNotOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  LogicalNotOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(LogicalNotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<LogicalNotOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  LogicalNotOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(LogicalNotOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<LogicalNotOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct LogicalNotOptionsBuilder {
   typedef LogicalNotOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit LogicalNotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit LogicalNotOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<LogicalNotOptions> Finish() {
+  ::flatbuffers::Offset<LogicalNotOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<LogicalNotOptions>(end);
+    auto o = ::flatbuffers::Offset<LogicalNotOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<LogicalNotOptions> CreateLogicalNotOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<LogicalNotOptions> CreateLogicalNotOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   LogicalNotOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<LogicalNotOptions> CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<LogicalNotOptions> CreateLogicalNotOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct UnpackOptionsT : public flatbuffers::NativeTable {
+struct UnpackOptionsT : public ::flatbuffers::NativeTable {
   typedef UnpackOptions TableType;
   int32_t num = 0;
   int32_t axis = 0;
 };
 
-struct UnpackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct UnpackOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef UnpackOptionsT NativeTableType;
   typedef UnpackOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -9413,40 +12083,40 @@
   int32_t axis() const {
     return GetField<int32_t>(VT_AXIS, 0);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int32_t>(verifier, VT_NUM, 4) &&
            VerifyField<int32_t>(verifier, VT_AXIS, 4) &&
            verifier.EndTable();
   }
-  UnpackOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(UnpackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<UnpackOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  UnpackOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(UnpackOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<UnpackOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct UnpackOptionsBuilder {
   typedef UnpackOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_num(int32_t num) {
     fbb_.AddElement<int32_t>(UnpackOptions::VT_NUM, num, 0);
   }
   void add_axis(int32_t axis) {
     fbb_.AddElement<int32_t>(UnpackOptions::VT_AXIS, axis, 0);
   }
-  explicit UnpackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit UnpackOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<UnpackOptions> Finish() {
+  ::flatbuffers::Offset<UnpackOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<UnpackOptions>(end);
+    auto o = ::flatbuffers::Offset<UnpackOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<UnpackOptions> CreateUnpackOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<UnpackOptions> CreateUnpackOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int32_t num = 0,
     int32_t axis = 0) {
   UnpackOptionsBuilder builder_(_fbb);
@@ -9455,248 +12125,248 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<UnpackOptions> CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<UnpackOptions> CreateUnpackOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct FloorDivOptionsT : public flatbuffers::NativeTable {
+struct FloorDivOptionsT : public ::flatbuffers::NativeTable {
   typedef FloorDivOptions TableType;
 };
 
-struct FloorDivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct FloorDivOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef FloorDivOptionsT NativeTableType;
   typedef FloorDivOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  FloorDivOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(FloorDivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<FloorDivOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  FloorDivOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(FloorDivOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<FloorDivOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct FloorDivOptionsBuilder {
   typedef FloorDivOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit FloorDivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit FloorDivOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<FloorDivOptions> Finish() {
+  ::flatbuffers::Offset<FloorDivOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<FloorDivOptions>(end);
+    auto o = ::flatbuffers::Offset<FloorDivOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<FloorDivOptions> CreateFloorDivOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<FloorDivOptions> CreateFloorDivOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   FloorDivOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<FloorDivOptions> CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<FloorDivOptions> CreateFloorDivOptions(::flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct SquareOptionsT : public flatbuffers::NativeTable {
+struct SquareOptionsT : public ::flatbuffers::NativeTable {
   typedef SquareOptions TableType;
 };
 
-struct SquareOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct SquareOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef SquareOptionsT NativeTableType;
   typedef SquareOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  SquareOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(SquareOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<SquareOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  SquareOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(SquareOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<SquareOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct SquareOptionsBuilder {
   typedef SquareOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit SquareOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit SquareOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<SquareOptions> Finish() {
+  ::flatbuffers::Offset<SquareOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<SquareOptions>(end);
+    auto o = ::flatbuffers::Offset<SquareOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<SquareOptions> CreateSquareOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<SquareOptions> CreateSquareOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   SquareOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<SquareOptions> CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<SquareOptions> CreateSquareOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct ZerosLikeOptionsT : public flatbuffers::NativeTable {
+struct ZerosLikeOptionsT : public ::flatbuffers::NativeTable {
   typedef ZerosLikeOptions TableType;
 };
 
-struct ZerosLikeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct ZerosLikeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef ZerosLikeOptionsT NativeTableType;
   typedef ZerosLikeOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  ZerosLikeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(ZerosLikeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<ZerosLikeOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  ZerosLikeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(ZerosLikeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<ZerosLikeOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct ZerosLikeOptionsBuilder {
   typedef ZerosLikeOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit ZerosLikeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit ZerosLikeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<ZerosLikeOptions> Finish() {
+  ::flatbuffers::Offset<ZerosLikeOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<ZerosLikeOptions>(end);
+    auto o = ::flatbuffers::Offset<ZerosLikeOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<ZerosLikeOptions> CreateZerosLikeOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<ZerosLikeOptions> CreateZerosLikeOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   ZerosLikeOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<ZerosLikeOptions> CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<ZerosLikeOptions> CreateZerosLikeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct FillOptionsT : public flatbuffers::NativeTable {
+struct FillOptionsT : public ::flatbuffers::NativeTable {
   typedef FillOptions TableType;
 };
 
-struct FillOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct FillOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef FillOptionsT NativeTableType;
   typedef FillOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  FillOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(FillOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<FillOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  FillOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(FillOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<FillOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct FillOptionsBuilder {
   typedef FillOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit FillOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit FillOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<FillOptions> Finish() {
+  ::flatbuffers::Offset<FillOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<FillOptions>(end);
+    auto o = ::flatbuffers::Offset<FillOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<FillOptions> CreateFillOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<FillOptions> CreateFillOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   FillOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<FillOptions> CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<FillOptions> CreateFillOptions(::flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct FloorModOptionsT : public flatbuffers::NativeTable {
+struct FloorModOptionsT : public ::flatbuffers::NativeTable {
   typedef FloorModOptions TableType;
 };
 
-struct FloorModOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct FloorModOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef FloorModOptionsT NativeTableType;
   typedef FloorModOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  FloorModOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(FloorModOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<FloorModOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  FloorModOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(FloorModOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<FloorModOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct FloorModOptionsBuilder {
   typedef FloorModOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit FloorModOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit FloorModOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<FloorModOptions> Finish() {
+  ::flatbuffers::Offset<FloorModOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<FloorModOptions>(end);
+    auto o = ::flatbuffers::Offset<FloorModOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<FloorModOptions> CreateFloorModOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<FloorModOptions> CreateFloorModOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   FloorModOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<FloorModOptions> CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<FloorModOptions> CreateFloorModOptions(::flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct RangeOptionsT : public flatbuffers::NativeTable {
+struct RangeOptionsT : public ::flatbuffers::NativeTable {
   typedef RangeOptions TableType;
 };
 
-struct RangeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct RangeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef RangeOptionsT NativeTableType;
   typedef RangeOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  RangeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(RangeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<RangeOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  RangeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(RangeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<RangeOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct RangeOptionsBuilder {
   typedef RangeOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit RangeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit RangeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<RangeOptions> Finish() {
+  ::flatbuffers::Offset<RangeOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<RangeOptions>(end);
+    auto o = ::flatbuffers::Offset<RangeOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<RangeOptions> CreateRangeOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<RangeOptions> CreateRangeOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   RangeOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<RangeOptions> CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<RangeOptions> CreateRangeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct LeakyReluOptionsT : public flatbuffers::NativeTable {
+struct LeakyReluOptionsT : public ::flatbuffers::NativeTable {
   typedef LeakyReluOptions TableType;
   float alpha = 0.0f;
 };
 
-struct LeakyReluOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct LeakyReluOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef LeakyReluOptionsT NativeTableType;
   typedef LeakyReluOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -9705,89 +12375,89 @@
   float alpha() const {
     return GetField<float>(VT_ALPHA, 0.0f);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<float>(verifier, VT_ALPHA, 4) &&
            verifier.EndTable();
   }
-  LeakyReluOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(LeakyReluOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<LeakyReluOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  LeakyReluOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(LeakyReluOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<LeakyReluOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct LeakyReluOptionsBuilder {
   typedef LeakyReluOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_alpha(float alpha) {
     fbb_.AddElement<float>(LeakyReluOptions::VT_ALPHA, alpha, 0.0f);
   }
-  explicit LeakyReluOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit LeakyReluOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<LeakyReluOptions> Finish() {
+  ::flatbuffers::Offset<LeakyReluOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<LeakyReluOptions>(end);
+    auto o = ::flatbuffers::Offset<LeakyReluOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<LeakyReluOptions> CreateLeakyReluOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<LeakyReluOptions> CreateLeakyReluOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     float alpha = 0.0f) {
   LeakyReluOptionsBuilder builder_(_fbb);
   builder_.add_alpha(alpha);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<LeakyReluOptions> CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<LeakyReluOptions> CreateLeakyReluOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct SquaredDifferenceOptionsT : public flatbuffers::NativeTable {
+struct SquaredDifferenceOptionsT : public ::flatbuffers::NativeTable {
   typedef SquaredDifferenceOptions TableType;
 };
 
-struct SquaredDifferenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct SquaredDifferenceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef SquaredDifferenceOptionsT NativeTableType;
   typedef SquaredDifferenceOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  SquaredDifferenceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(SquaredDifferenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<SquaredDifferenceOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  SquaredDifferenceOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(SquaredDifferenceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<SquaredDifferenceOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct SquaredDifferenceOptionsBuilder {
   typedef SquaredDifferenceOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit SquaredDifferenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit SquaredDifferenceOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<SquaredDifferenceOptions> Finish() {
+  ::flatbuffers::Offset<SquaredDifferenceOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<SquaredDifferenceOptions>(end);
+    auto o = ::flatbuffers::Offset<SquaredDifferenceOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<SquaredDifferenceOptions> CreateSquaredDifferenceOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<SquaredDifferenceOptions> CreateSquaredDifferenceOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   SquaredDifferenceOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<SquaredDifferenceOptions> CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<SquaredDifferenceOptions> CreateSquaredDifferenceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct MirrorPadOptionsT : public flatbuffers::NativeTable {
+struct MirrorPadOptionsT : public ::flatbuffers::NativeTable {
   typedef MirrorPadOptions TableType;
   tflite::MirrorPadMode mode = tflite::MirrorPadMode_REFLECT;
 };
 
-struct MirrorPadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct MirrorPadOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef MirrorPadOptionsT NativeTableType;
   typedef MirrorPadOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -9796,50 +12466,50 @@
   tflite::MirrorPadMode mode() const {
     return static_cast<tflite::MirrorPadMode>(GetField<int8_t>(VT_MODE, 0));
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_MODE, 1) &&
            verifier.EndTable();
   }
-  MirrorPadOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(MirrorPadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<MirrorPadOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  MirrorPadOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(MirrorPadOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<MirrorPadOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct MirrorPadOptionsBuilder {
   typedef MirrorPadOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_mode(tflite::MirrorPadMode mode) {
     fbb_.AddElement<int8_t>(MirrorPadOptions::VT_MODE, static_cast<int8_t>(mode), 0);
   }
-  explicit MirrorPadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit MirrorPadOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<MirrorPadOptions> Finish() {
+  ::flatbuffers::Offset<MirrorPadOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<MirrorPadOptions>(end);
+    auto o = ::flatbuffers::Offset<MirrorPadOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<MirrorPadOptions> CreateMirrorPadOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<MirrorPadOptions> CreateMirrorPadOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::MirrorPadMode mode = tflite::MirrorPadMode_REFLECT) {
   MirrorPadOptionsBuilder builder_(_fbb);
   builder_.add_mode(mode);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<MirrorPadOptions> CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<MirrorPadOptions> CreateMirrorPadOptions(::flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct UniqueOptionsT : public flatbuffers::NativeTable {
+struct UniqueOptionsT : public ::flatbuffers::NativeTable {
   typedef UniqueOptions TableType;
   tflite::TensorType idx_out_type = tflite::TensorType_INT32;
 };
 
-struct UniqueOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct UniqueOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef UniqueOptionsT NativeTableType;
   typedef UniqueOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -9848,207 +12518,207 @@
   tflite::TensorType idx_out_type() const {
     return static_cast<tflite::TensorType>(GetField<int8_t>(VT_IDX_OUT_TYPE, 2));
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_IDX_OUT_TYPE, 1) &&
            verifier.EndTable();
   }
-  UniqueOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(UniqueOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<UniqueOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  UniqueOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(UniqueOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<UniqueOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct UniqueOptionsBuilder {
   typedef UniqueOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_idx_out_type(tflite::TensorType idx_out_type) {
     fbb_.AddElement<int8_t>(UniqueOptions::VT_IDX_OUT_TYPE, static_cast<int8_t>(idx_out_type), 2);
   }
-  explicit UniqueOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit UniqueOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<UniqueOptions> Finish() {
+  ::flatbuffers::Offset<UniqueOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<UniqueOptions>(end);
+    auto o = ::flatbuffers::Offset<UniqueOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<UniqueOptions> CreateUniqueOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<UniqueOptions> CreateUniqueOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     tflite::TensorType idx_out_type = tflite::TensorType_INT32) {
   UniqueOptionsBuilder builder_(_fbb);
   builder_.add_idx_out_type(idx_out_type);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<UniqueOptions> CreateUniqueOptions(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<UniqueOptions> CreateUniqueOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct ReverseV2OptionsT : public flatbuffers::NativeTable {
+struct ReverseV2OptionsT : public ::flatbuffers::NativeTable {
   typedef ReverseV2Options TableType;
 };
 
-struct ReverseV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct ReverseV2Options FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef ReverseV2OptionsT NativeTableType;
   typedef ReverseV2OptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  ReverseV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(ReverseV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<ReverseV2Options> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  ReverseV2OptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(ReverseV2OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<ReverseV2Options> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct ReverseV2OptionsBuilder {
   typedef ReverseV2Options Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit ReverseV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit ReverseV2OptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<ReverseV2Options> Finish() {
+  ::flatbuffers::Offset<ReverseV2Options> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<ReverseV2Options>(end);
+    auto o = ::flatbuffers::Offset<ReverseV2Options>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<ReverseV2Options> CreateReverseV2Options(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<ReverseV2Options> CreateReverseV2Options(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   ReverseV2OptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<ReverseV2Options> CreateReverseV2Options(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<ReverseV2Options> CreateReverseV2Options(::flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct AddNOptionsT : public flatbuffers::NativeTable {
+struct AddNOptionsT : public ::flatbuffers::NativeTable {
   typedef AddNOptions TableType;
 };
 
-struct AddNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct AddNOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef AddNOptionsT NativeTableType;
   typedef AddNOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  AddNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(AddNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<AddNOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  AddNOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(AddNOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<AddNOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct AddNOptionsBuilder {
   typedef AddNOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit AddNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit AddNOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<AddNOptions> Finish() {
+  ::flatbuffers::Offset<AddNOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<AddNOptions>(end);
+    auto o = ::flatbuffers::Offset<AddNOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<AddNOptions> CreateAddNOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<AddNOptions> CreateAddNOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   AddNOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<AddNOptions> CreateAddNOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<AddNOptions> CreateAddNOptions(::flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct GatherNdOptionsT : public flatbuffers::NativeTable {
+struct GatherNdOptionsT : public ::flatbuffers::NativeTable {
   typedef GatherNdOptions TableType;
 };
 
-struct GatherNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct GatherNdOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef GatherNdOptionsT NativeTableType;
   typedef GatherNdOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  GatherNdOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(GatherNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<GatherNdOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  GatherNdOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(GatherNdOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<GatherNdOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct GatherNdOptionsBuilder {
   typedef GatherNdOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit GatherNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit GatherNdOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<GatherNdOptions> Finish() {
+  ::flatbuffers::Offset<GatherNdOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<GatherNdOptions>(end);
+    auto o = ::flatbuffers::Offset<GatherNdOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<GatherNdOptions> CreateGatherNdOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<GatherNdOptions> CreateGatherNdOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   GatherNdOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<GatherNdOptions> CreateGatherNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<GatherNdOptions> CreateGatherNdOptions(::flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct WhereOptionsT : public flatbuffers::NativeTable {
+struct WhereOptionsT : public ::flatbuffers::NativeTable {
   typedef WhereOptions TableType;
 };
 
-struct WhereOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct WhereOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef WhereOptionsT NativeTableType;
   typedef WhereOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  WhereOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(WhereOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<WhereOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  WhereOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(WhereOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<WhereOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct WhereOptionsBuilder {
   typedef WhereOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit WhereOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit WhereOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<WhereOptions> Finish() {
+  ::flatbuffers::Offset<WhereOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<WhereOptions>(end);
+    auto o = ::flatbuffers::Offset<WhereOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<WhereOptions> CreateWhereOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<WhereOptions> CreateWhereOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   WhereOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<WhereOptions> CreateWhereOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<WhereOptions> CreateWhereOptions(::flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct ReverseSequenceOptionsT : public flatbuffers::NativeTable {
+struct ReverseSequenceOptionsT : public ::flatbuffers::NativeTable {
   typedef ReverseSequenceOptions TableType;
   int32_t seq_dim = 0;
   int32_t batch_dim = 0;
 };
 
-struct ReverseSequenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct ReverseSequenceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef ReverseSequenceOptionsT NativeTableType;
   typedef ReverseSequenceOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -10061,40 +12731,40 @@
   int32_t batch_dim() const {
     return GetField<int32_t>(VT_BATCH_DIM, 0);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int32_t>(verifier, VT_SEQ_DIM, 4) &&
            VerifyField<int32_t>(verifier, VT_BATCH_DIM, 4) &&
            verifier.EndTable();
   }
-  ReverseSequenceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(ReverseSequenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<ReverseSequenceOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  ReverseSequenceOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(ReverseSequenceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<ReverseSequenceOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct ReverseSequenceOptionsBuilder {
   typedef ReverseSequenceOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_seq_dim(int32_t seq_dim) {
     fbb_.AddElement<int32_t>(ReverseSequenceOptions::VT_SEQ_DIM, seq_dim, 0);
   }
   void add_batch_dim(int32_t batch_dim) {
     fbb_.AddElement<int32_t>(ReverseSequenceOptions::VT_BATCH_DIM, batch_dim, 0);
   }
-  explicit ReverseSequenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit ReverseSequenceOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<ReverseSequenceOptions> Finish() {
+  ::flatbuffers::Offset<ReverseSequenceOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<ReverseSequenceOptions>(end);
+    auto o = ::flatbuffers::Offset<ReverseSequenceOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<ReverseSequenceOptions> CreateReverseSequenceOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<ReverseSequenceOptions> CreateReverseSequenceOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int32_t seq_dim = 0,
     int32_t batch_dim = 0) {
   ReverseSequenceOptionsBuilder builder_(_fbb);
@@ -10103,132 +12773,132 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<ReverseSequenceOptions> CreateReverseSequenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<ReverseSequenceOptions> CreateReverseSequenceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct MatrixDiagOptionsT : public flatbuffers::NativeTable {
+struct MatrixDiagOptionsT : public ::flatbuffers::NativeTable {
   typedef MatrixDiagOptions TableType;
 };
 
-struct MatrixDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct MatrixDiagOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef MatrixDiagOptionsT NativeTableType;
   typedef MatrixDiagOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  MatrixDiagOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(MatrixDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<MatrixDiagOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  MatrixDiagOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(MatrixDiagOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<MatrixDiagOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct MatrixDiagOptionsBuilder {
   typedef MatrixDiagOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit MatrixDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit MatrixDiagOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<MatrixDiagOptions> Finish() {
+  ::flatbuffers::Offset<MatrixDiagOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<MatrixDiagOptions>(end);
+    auto o = ::flatbuffers::Offset<MatrixDiagOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<MatrixDiagOptions> CreateMatrixDiagOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<MatrixDiagOptions> CreateMatrixDiagOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   MatrixDiagOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<MatrixDiagOptions> CreateMatrixDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<MatrixDiagOptions> CreateMatrixDiagOptions(::flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct QuantizeOptionsT : public flatbuffers::NativeTable {
+struct QuantizeOptionsT : public ::flatbuffers::NativeTable {
   typedef QuantizeOptions TableType;
 };
 
-struct QuantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct QuantizeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef QuantizeOptionsT NativeTableType;
   typedef QuantizeOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  QuantizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(QuantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<QuantizeOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  QuantizeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(QuantizeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<QuantizeOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct QuantizeOptionsBuilder {
   typedef QuantizeOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit QuantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit QuantizeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<QuantizeOptions> Finish() {
+  ::flatbuffers::Offset<QuantizeOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<QuantizeOptions>(end);
+    auto o = ::flatbuffers::Offset<QuantizeOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<QuantizeOptions> CreateQuantizeOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<QuantizeOptions> CreateQuantizeOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   QuantizeOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<QuantizeOptions> CreateQuantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<QuantizeOptions> CreateQuantizeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct MatrixSetDiagOptionsT : public flatbuffers::NativeTable {
+struct MatrixSetDiagOptionsT : public ::flatbuffers::NativeTable {
   typedef MatrixSetDiagOptions TableType;
 };
 
-struct MatrixSetDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct MatrixSetDiagOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef MatrixSetDiagOptionsT NativeTableType;
   typedef MatrixSetDiagOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  MatrixSetDiagOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(MatrixSetDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<MatrixSetDiagOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  MatrixSetDiagOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(MatrixSetDiagOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<MatrixSetDiagOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct MatrixSetDiagOptionsBuilder {
   typedef MatrixSetDiagOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit MatrixSetDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit MatrixSetDiagOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<MatrixSetDiagOptions> Finish() {
+  ::flatbuffers::Offset<MatrixSetDiagOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<MatrixSetDiagOptions>(end);
+    auto o = ::flatbuffers::Offset<MatrixSetDiagOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<MatrixSetDiagOptions> CreateMatrixSetDiagOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<MatrixSetDiagOptions> CreateMatrixSetDiagOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   MatrixSetDiagOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<MatrixSetDiagOptions> CreateMatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<MatrixSetDiagOptions> CreateMatrixSetDiagOptions(::flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct IfOptionsT : public flatbuffers::NativeTable {
+struct IfOptionsT : public ::flatbuffers::NativeTable {
   typedef IfOptions TableType;
   int32_t then_subgraph_index = 0;
   int32_t else_subgraph_index = 0;
 };
 
-struct IfOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct IfOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef IfOptionsT NativeTableType;
   typedef IfOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -10241,40 +12911,40 @@
   int32_t else_subgraph_index() const {
     return GetField<int32_t>(VT_ELSE_SUBGRAPH_INDEX, 0);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int32_t>(verifier, VT_THEN_SUBGRAPH_INDEX, 4) &&
            VerifyField<int32_t>(verifier, VT_ELSE_SUBGRAPH_INDEX, 4) &&
            verifier.EndTable();
   }
-  IfOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(IfOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<IfOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  IfOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(IfOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<IfOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct IfOptionsBuilder {
   typedef IfOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_then_subgraph_index(int32_t then_subgraph_index) {
     fbb_.AddElement<int32_t>(IfOptions::VT_THEN_SUBGRAPH_INDEX, then_subgraph_index, 0);
   }
   void add_else_subgraph_index(int32_t else_subgraph_index) {
     fbb_.AddElement<int32_t>(IfOptions::VT_ELSE_SUBGRAPH_INDEX, else_subgraph_index, 0);
   }
-  explicit IfOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit IfOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<IfOptions> Finish() {
+  ::flatbuffers::Offset<IfOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<IfOptions>(end);
+    auto o = ::flatbuffers::Offset<IfOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<IfOptions> CreateIfOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<IfOptions> CreateIfOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int32_t then_subgraph_index = 0,
     int32_t else_subgraph_index = 0) {
   IfOptionsBuilder builder_(_fbb);
@@ -10283,14 +12953,14 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<IfOptions> CreateIfOptions(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<IfOptions> CreateIfOptions(::flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct CallOnceOptionsT : public flatbuffers::NativeTable {
+struct CallOnceOptionsT : public ::flatbuffers::NativeTable {
   typedef CallOnceOptions TableType;
   int32_t init_subgraph_index = 0;
 };
 
-struct CallOnceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct CallOnceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef CallOnceOptionsT NativeTableType;
   typedef CallOnceOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -10299,51 +12969,51 @@
   int32_t init_subgraph_index() const {
     return GetField<int32_t>(VT_INIT_SUBGRAPH_INDEX, 0);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int32_t>(verifier, VT_INIT_SUBGRAPH_INDEX, 4) &&
            verifier.EndTable();
   }
-  CallOnceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(CallOnceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<CallOnceOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  CallOnceOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(CallOnceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<CallOnceOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct CallOnceOptionsBuilder {
   typedef CallOnceOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_init_subgraph_index(int32_t init_subgraph_index) {
     fbb_.AddElement<int32_t>(CallOnceOptions::VT_INIT_SUBGRAPH_INDEX, init_subgraph_index, 0);
   }
-  explicit CallOnceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit CallOnceOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<CallOnceOptions> Finish() {
+  ::flatbuffers::Offset<CallOnceOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<CallOnceOptions>(end);
+    auto o = ::flatbuffers::Offset<CallOnceOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<CallOnceOptions> CreateCallOnceOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<CallOnceOptions> CreateCallOnceOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int32_t init_subgraph_index = 0) {
   CallOnceOptionsBuilder builder_(_fbb);
   builder_.add_init_subgraph_index(init_subgraph_index);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<CallOnceOptions> CreateCallOnceOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<CallOnceOptions> CreateCallOnceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct WhileOptionsT : public flatbuffers::NativeTable {
+struct WhileOptionsT : public ::flatbuffers::NativeTable {
   typedef WhileOptions TableType;
   int32_t cond_subgraph_index = 0;
   int32_t body_subgraph_index = 0;
 };
 
-struct WhileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct WhileOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef WhileOptionsT NativeTableType;
   typedef WhileOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -10356,40 +13026,40 @@
   int32_t body_subgraph_index() const {
     return GetField<int32_t>(VT_BODY_SUBGRAPH_INDEX, 0);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int32_t>(verifier, VT_COND_SUBGRAPH_INDEX, 4) &&
            VerifyField<int32_t>(verifier, VT_BODY_SUBGRAPH_INDEX, 4) &&
            verifier.EndTable();
   }
-  WhileOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(WhileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<WhileOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  WhileOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(WhileOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<WhileOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct WhileOptionsBuilder {
   typedef WhileOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_cond_subgraph_index(int32_t cond_subgraph_index) {
     fbb_.AddElement<int32_t>(WhileOptions::VT_COND_SUBGRAPH_INDEX, cond_subgraph_index, 0);
   }
   void add_body_subgraph_index(int32_t body_subgraph_index) {
     fbb_.AddElement<int32_t>(WhileOptions::VT_BODY_SUBGRAPH_INDEX, body_subgraph_index, 0);
   }
-  explicit WhileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit WhileOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<WhileOptions> Finish() {
+  ::flatbuffers::Offset<WhileOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<WhileOptions>(end);
+    auto o = ::flatbuffers::Offset<WhileOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<WhileOptions> CreateWhileOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<WhileOptions> CreateWhileOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int32_t cond_subgraph_index = 0,
     int32_t body_subgraph_index = 0) {
   WhileOptionsBuilder builder_(_fbb);
@@ -10398,250 +13068,250 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<WhileOptions> CreateWhileOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<WhileOptions> CreateWhileOptions(::flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct NonMaxSuppressionV4OptionsT : public flatbuffers::NativeTable {
+struct NonMaxSuppressionV4OptionsT : public ::flatbuffers::NativeTable {
   typedef NonMaxSuppressionV4Options TableType;
 };
 
-struct NonMaxSuppressionV4Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct NonMaxSuppressionV4Options FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef NonMaxSuppressionV4OptionsT NativeTableType;
   typedef NonMaxSuppressionV4OptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  NonMaxSuppressionV4OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(NonMaxSuppressionV4OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<NonMaxSuppressionV4Options> Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  NonMaxSuppressionV4OptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(NonMaxSuppressionV4OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<NonMaxSuppressionV4Options> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct NonMaxSuppressionV4OptionsBuilder {
   typedef NonMaxSuppressionV4Options Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit NonMaxSuppressionV4OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit NonMaxSuppressionV4OptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<NonMaxSuppressionV4Options> Finish() {
+  ::flatbuffers::Offset<NonMaxSuppressionV4Options> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<NonMaxSuppressionV4Options>(end);
+    auto o = ::flatbuffers::Offset<NonMaxSuppressionV4Options>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<NonMaxSuppressionV4Options> CreateNonMaxSuppressionV4Options(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<NonMaxSuppressionV4Options> CreateNonMaxSuppressionV4Options(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   NonMaxSuppressionV4OptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<NonMaxSuppressionV4Options> CreateNonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<NonMaxSuppressionV4Options> CreateNonMaxSuppressionV4Options(::flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct NonMaxSuppressionV5OptionsT : public flatbuffers::NativeTable {
+struct NonMaxSuppressionV5OptionsT : public ::flatbuffers::NativeTable {
   typedef NonMaxSuppressionV5Options TableType;
 };
 
-struct NonMaxSuppressionV5Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct NonMaxSuppressionV5Options FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef NonMaxSuppressionV5OptionsT NativeTableType;
   typedef NonMaxSuppressionV5OptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  NonMaxSuppressionV5OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(NonMaxSuppressionV5OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<NonMaxSuppressionV5Options> Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  NonMaxSuppressionV5OptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(NonMaxSuppressionV5OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<NonMaxSuppressionV5Options> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct NonMaxSuppressionV5OptionsBuilder {
   typedef NonMaxSuppressionV5Options Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit NonMaxSuppressionV5OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit NonMaxSuppressionV5OptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<NonMaxSuppressionV5Options> Finish() {
+  ::flatbuffers::Offset<NonMaxSuppressionV5Options> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<NonMaxSuppressionV5Options>(end);
+    auto o = ::flatbuffers::Offset<NonMaxSuppressionV5Options>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<NonMaxSuppressionV5Options> CreateNonMaxSuppressionV5Options(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<NonMaxSuppressionV5Options> CreateNonMaxSuppressionV5Options(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   NonMaxSuppressionV5OptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<NonMaxSuppressionV5Options> CreateNonMaxSuppressionV5Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<NonMaxSuppressionV5Options> CreateNonMaxSuppressionV5Options(::flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct ScatterNdOptionsT : public flatbuffers::NativeTable {
+struct ScatterNdOptionsT : public ::flatbuffers::NativeTable {
   typedef ScatterNdOptions TableType;
 };
 
-struct ScatterNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct ScatterNdOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef ScatterNdOptionsT NativeTableType;
   typedef ScatterNdOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  ScatterNdOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(ScatterNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<ScatterNdOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  ScatterNdOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(ScatterNdOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<ScatterNdOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct ScatterNdOptionsBuilder {
   typedef ScatterNdOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit ScatterNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit ScatterNdOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<ScatterNdOptions> Finish() {
+  ::flatbuffers::Offset<ScatterNdOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<ScatterNdOptions>(end);
+    auto o = ::flatbuffers::Offset<ScatterNdOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<ScatterNdOptions> CreateScatterNdOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<ScatterNdOptions> CreateScatterNdOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   ScatterNdOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<ScatterNdOptions> CreateScatterNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<ScatterNdOptions> CreateScatterNdOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct SelectV2OptionsT : public flatbuffers::NativeTable {
+struct SelectV2OptionsT : public ::flatbuffers::NativeTable {
   typedef SelectV2Options TableType;
 };
 
-struct SelectV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct SelectV2Options FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef SelectV2OptionsT NativeTableType;
   typedef SelectV2OptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  SelectV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(SelectV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<SelectV2Options> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  SelectV2OptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(SelectV2OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<SelectV2Options> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct SelectV2OptionsBuilder {
   typedef SelectV2Options Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit SelectV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit SelectV2OptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<SelectV2Options> Finish() {
+  ::flatbuffers::Offset<SelectV2Options> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<SelectV2Options>(end);
+    auto o = ::flatbuffers::Offset<SelectV2Options>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<SelectV2Options> CreateSelectV2Options(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<SelectV2Options> CreateSelectV2Options(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   SelectV2OptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<SelectV2Options> CreateSelectV2Options(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<SelectV2Options> CreateSelectV2Options(::flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct DensifyOptionsT : public flatbuffers::NativeTable {
+struct DensifyOptionsT : public ::flatbuffers::NativeTable {
   typedef DensifyOptions TableType;
 };
 
-struct DensifyOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct DensifyOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef DensifyOptionsT NativeTableType;
   typedef DensifyOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  DensifyOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(DensifyOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<DensifyOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  DensifyOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(DensifyOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<DensifyOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct DensifyOptionsBuilder {
   typedef DensifyOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit DensifyOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit DensifyOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<DensifyOptions> Finish() {
+  ::flatbuffers::Offset<DensifyOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<DensifyOptions>(end);
+    auto o = ::flatbuffers::Offset<DensifyOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<DensifyOptions> CreateDensifyOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<DensifyOptions> CreateDensifyOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   DensifyOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<DensifyOptions> CreateDensifyOptions(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<DensifyOptions> CreateDensifyOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct SegmentSumOptionsT : public flatbuffers::NativeTable {
+struct SegmentSumOptionsT : public ::flatbuffers::NativeTable {
   typedef SegmentSumOptions TableType;
 };
 
-struct SegmentSumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct SegmentSumOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef SegmentSumOptionsT NativeTableType;
   typedef SegmentSumOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  SegmentSumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(SegmentSumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<SegmentSumOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  SegmentSumOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(SegmentSumOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<SegmentSumOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct SegmentSumOptionsBuilder {
   typedef SegmentSumOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit SegmentSumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit SegmentSumOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<SegmentSumOptions> Finish() {
+  ::flatbuffers::Offset<SegmentSumOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<SegmentSumOptions>(end);
+    auto o = ::flatbuffers::Offset<SegmentSumOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<SegmentSumOptions> CreateSegmentSumOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<SegmentSumOptions> CreateSegmentSumOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   SegmentSumOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<SegmentSumOptions> CreateSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<SegmentSumOptions> CreateSegmentSumOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct BatchMatMulOptionsT : public flatbuffers::NativeTable {
+struct BatchMatMulOptionsT : public ::flatbuffers::NativeTable {
   typedef BatchMatMulOptions TableType;
   bool adj_x = false;
   bool adj_y = false;
   bool asymmetric_quantize_inputs = false;
 };
 
-struct BatchMatMulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct BatchMatMulOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef BatchMatMulOptionsT NativeTableType;
   typedef BatchMatMulOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -10658,22 +13328,22 @@
   bool asymmetric_quantize_inputs() const {
     return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<uint8_t>(verifier, VT_ADJ_X, 1) &&
            VerifyField<uint8_t>(verifier, VT_ADJ_Y, 1) &&
            VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) &&
            verifier.EndTable();
   }
-  BatchMatMulOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(BatchMatMulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<BatchMatMulOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  BatchMatMulOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(BatchMatMulOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<BatchMatMulOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct BatchMatMulOptionsBuilder {
   typedef BatchMatMulOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_adj_x(bool adj_x) {
     fbb_.AddElement<uint8_t>(BatchMatMulOptions::VT_ADJ_X, static_cast<uint8_t>(adj_x), 0);
   }
@@ -10683,19 +13353,19 @@
   void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) {
     fbb_.AddElement<uint8_t>(BatchMatMulOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
   }
-  explicit BatchMatMulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit BatchMatMulOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<BatchMatMulOptions> Finish() {
+  ::flatbuffers::Offset<BatchMatMulOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<BatchMatMulOptions>(end);
+    auto o = ::flatbuffers::Offset<BatchMatMulOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<BatchMatMulOptions> CreateBatchMatMulOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<BatchMatMulOptions> CreateBatchMatMulOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     bool adj_x = false,
     bool adj_y = false,
     bool asymmetric_quantize_inputs = false) {
@@ -10706,15 +13376,15 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<BatchMatMulOptions> CreateBatchMatMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<BatchMatMulOptions> CreateBatchMatMulOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct CumsumOptionsT : public flatbuffers::NativeTable {
+struct CumsumOptionsT : public ::flatbuffers::NativeTable {
   typedef CumsumOptions TableType;
   bool exclusive = false;
   bool reverse = false;
 };
 
-struct CumsumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct CumsumOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef CumsumOptionsT NativeTableType;
   typedef CumsumOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -10727,40 +13397,40 @@
   bool reverse() const {
     return GetField<uint8_t>(VT_REVERSE, 0) != 0;
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<uint8_t>(verifier, VT_EXCLUSIVE, 1) &&
            VerifyField<uint8_t>(verifier, VT_REVERSE, 1) &&
            verifier.EndTable();
   }
-  CumsumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(CumsumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<CumsumOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  CumsumOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(CumsumOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<CumsumOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct CumsumOptionsBuilder {
   typedef CumsumOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_exclusive(bool exclusive) {
     fbb_.AddElement<uint8_t>(CumsumOptions::VT_EXCLUSIVE, static_cast<uint8_t>(exclusive), 0);
   }
   void add_reverse(bool reverse) {
     fbb_.AddElement<uint8_t>(CumsumOptions::VT_REVERSE, static_cast<uint8_t>(reverse), 0);
   }
-  explicit CumsumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit CumsumOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<CumsumOptions> Finish() {
+  ::flatbuffers::Offset<CumsumOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<CumsumOptions>(end);
+    auto o = ::flatbuffers::Offset<CumsumOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<CumsumOptions> CreateCumsumOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<CumsumOptions> CreateCumsumOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     bool exclusive = false,
     bool reverse = false) {
   CumsumOptionsBuilder builder_(_fbb);
@@ -10769,94 +13439,94 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<CumsumOptions> CreateCumsumOptions(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<CumsumOptions> CreateCumsumOptions(::flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct BroadcastToOptionsT : public flatbuffers::NativeTable {
+struct BroadcastToOptionsT : public ::flatbuffers::NativeTable {
   typedef BroadcastToOptions TableType;
 };
 
-struct BroadcastToOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct BroadcastToOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef BroadcastToOptionsT NativeTableType;
   typedef BroadcastToOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  BroadcastToOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(BroadcastToOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<BroadcastToOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  BroadcastToOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(BroadcastToOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<BroadcastToOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct BroadcastToOptionsBuilder {
   typedef BroadcastToOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit BroadcastToOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit BroadcastToOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<BroadcastToOptions> Finish() {
+  ::flatbuffers::Offset<BroadcastToOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<BroadcastToOptions>(end);
+    auto o = ::flatbuffers::Offset<BroadcastToOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<BroadcastToOptions> CreateBroadcastToOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<BroadcastToOptions> CreateBroadcastToOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   BroadcastToOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<BroadcastToOptions> CreateBroadcastToOptions(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<BroadcastToOptions> CreateBroadcastToOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct Rfft2dOptionsT : public flatbuffers::NativeTable {
+struct Rfft2dOptionsT : public ::flatbuffers::NativeTable {
   typedef Rfft2dOptions TableType;
 };
 
-struct Rfft2dOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct Rfft2dOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef Rfft2dOptionsT NativeTableType;
   typedef Rfft2dOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  Rfft2dOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(Rfft2dOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<Rfft2dOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  Rfft2dOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(Rfft2dOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<Rfft2dOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct Rfft2dOptionsBuilder {
   typedef Rfft2dOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit Rfft2dOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit Rfft2dOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<Rfft2dOptions> Finish() {
+  ::flatbuffers::Offset<Rfft2dOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<Rfft2dOptions>(end);
+    auto o = ::flatbuffers::Offset<Rfft2dOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<Rfft2dOptions> CreateRfft2dOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<Rfft2dOptions> CreateRfft2dOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   Rfft2dOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<Rfft2dOptions> CreateRfft2dOptions(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<Rfft2dOptions> CreateRfft2dOptions(::flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct HashtableOptionsT : public flatbuffers::NativeTable {
+struct HashtableOptionsT : public ::flatbuffers::NativeTable {
   typedef HashtableOptions TableType;
   int32_t table_id = 0;
   tflite::TensorType key_dtype = tflite::TensorType_FLOAT32;
   tflite::TensorType value_dtype = tflite::TensorType_FLOAT32;
 };
 
-struct HashtableOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct HashtableOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef HashtableOptionsT NativeTableType;
   typedef HashtableOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -10873,22 +13543,22 @@
   tflite::TensorType value_dtype() const {
     return static_cast<tflite::TensorType>(GetField<int8_t>(VT_VALUE_DTYPE, 0));
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int32_t>(verifier, VT_TABLE_ID, 4) &&
            VerifyField<int8_t>(verifier, VT_KEY_DTYPE, 1) &&
            VerifyField<int8_t>(verifier, VT_VALUE_DTYPE, 1) &&
            verifier.EndTable();
   }
-  HashtableOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(HashtableOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<HashtableOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  HashtableOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(HashtableOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<HashtableOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct HashtableOptionsBuilder {
   typedef HashtableOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_table_id(int32_t table_id) {
     fbb_.AddElement<int32_t>(HashtableOptions::VT_TABLE_ID, table_id, 0);
   }
@@ -10898,19 +13568,19 @@
   void add_value_dtype(tflite::TensorType value_dtype) {
     fbb_.AddElement<int8_t>(HashtableOptions::VT_VALUE_DTYPE, static_cast<int8_t>(value_dtype), 0);
   }
-  explicit HashtableOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit HashtableOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<HashtableOptions> Finish() {
+  ::flatbuffers::Offset<HashtableOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<HashtableOptions>(end);
+    auto o = ::flatbuffers::Offset<HashtableOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<HashtableOptions> CreateHashtableOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<HashtableOptions> CreateHashtableOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int32_t table_id = 0,
     tflite::TensorType key_dtype = tflite::TensorType_FLOAT32,
     tflite::TensorType value_dtype = tflite::TensorType_FLOAT32) {
@@ -10921,145 +13591,145 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<HashtableOptions> CreateHashtableOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<HashtableOptions> CreateHashtableOptions(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct HashtableFindOptionsT : public flatbuffers::NativeTable {
+struct HashtableFindOptionsT : public ::flatbuffers::NativeTable {
   typedef HashtableFindOptions TableType;
 };
 
-struct HashtableFindOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct HashtableFindOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef HashtableFindOptionsT NativeTableType;
   typedef HashtableFindOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  HashtableFindOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(HashtableFindOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<HashtableFindOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  HashtableFindOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(HashtableFindOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<HashtableFindOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct HashtableFindOptionsBuilder {
   typedef HashtableFindOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit HashtableFindOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit HashtableFindOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<HashtableFindOptions> Finish() {
+  ::flatbuffers::Offset<HashtableFindOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<HashtableFindOptions>(end);
+    auto o = ::flatbuffers::Offset<HashtableFindOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<HashtableFindOptions> CreateHashtableFindOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<HashtableFindOptions> CreateHashtableFindOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   HashtableFindOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<HashtableFindOptions> CreateHashtableFindOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<HashtableFindOptions> CreateHashtableFindOptions(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct HashtableImportOptionsT : public flatbuffers::NativeTable {
+struct HashtableImportOptionsT : public ::flatbuffers::NativeTable {
   typedef HashtableImportOptions TableType;
 };
 
-struct HashtableImportOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct HashtableImportOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef HashtableImportOptionsT NativeTableType;
   typedef HashtableImportOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  HashtableImportOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(HashtableImportOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<HashtableImportOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  HashtableImportOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(HashtableImportOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<HashtableImportOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct HashtableImportOptionsBuilder {
   typedef HashtableImportOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit HashtableImportOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit HashtableImportOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<HashtableImportOptions> Finish() {
+  ::flatbuffers::Offset<HashtableImportOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<HashtableImportOptions>(end);
+    auto o = ::flatbuffers::Offset<HashtableImportOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<HashtableImportOptions> CreateHashtableImportOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<HashtableImportOptions> CreateHashtableImportOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   HashtableImportOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<HashtableImportOptions> CreateHashtableImportOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<HashtableImportOptions> CreateHashtableImportOptions(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct HashtableSizeOptionsT : public flatbuffers::NativeTable {
+struct HashtableSizeOptionsT : public ::flatbuffers::NativeTable {
   typedef HashtableSizeOptions TableType;
 };
 
-struct HashtableSizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct HashtableSizeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef HashtableSizeOptionsT NativeTableType;
   typedef HashtableSizeOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  HashtableSizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(HashtableSizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<HashtableSizeOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  HashtableSizeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(HashtableSizeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<HashtableSizeOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct HashtableSizeOptionsBuilder {
   typedef HashtableSizeOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit HashtableSizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit HashtableSizeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<HashtableSizeOptions> Finish() {
+  ::flatbuffers::Offset<HashtableSizeOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<HashtableSizeOptions>(end);
+    auto o = ::flatbuffers::Offset<HashtableSizeOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<HashtableSizeOptions> CreateHashtableSizeOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<HashtableSizeOptions> CreateHashtableSizeOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   HashtableSizeOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<HashtableSizeOptions> CreateHashtableSizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<HashtableSizeOptions> CreateHashtableSizeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct VarHandleOptionsT : public flatbuffers::NativeTable {
+struct VarHandleOptionsT : public ::flatbuffers::NativeTable {
   typedef VarHandleOptions TableType;
   std::string container{};
   std::string shared_name{};
 };
 
-struct VarHandleOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct VarHandleOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef VarHandleOptionsT NativeTableType;
   typedef VarHandleOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
     VT_CONTAINER = 4,
     VT_SHARED_NAME = 6
   };
-  const flatbuffers::String *container() const {
-    return GetPointer<const flatbuffers::String *>(VT_CONTAINER);
+  const ::flatbuffers::String *container() const {
+    return GetPointer<const ::flatbuffers::String *>(VT_CONTAINER);
   }
-  const flatbuffers::String *shared_name() const {
-    return GetPointer<const flatbuffers::String *>(VT_SHARED_NAME);
+  const ::flatbuffers::String *shared_name() const {
+    return GetPointer<const ::flatbuffers::String *>(VT_SHARED_NAME);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyOffset(verifier, VT_CONTAINER) &&
            verifier.VerifyString(container()) &&
@@ -11067,44 +13737,44 @@
            verifier.VerifyString(shared_name()) &&
            verifier.EndTable();
   }
-  VarHandleOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(VarHandleOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<VarHandleOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  VarHandleOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(VarHandleOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<VarHandleOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct VarHandleOptionsBuilder {
   typedef VarHandleOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_container(flatbuffers::Offset<flatbuffers::String> container) {
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_container(::flatbuffers::Offset<::flatbuffers::String> container) {
     fbb_.AddOffset(VarHandleOptions::VT_CONTAINER, container);
   }
-  void add_shared_name(flatbuffers::Offset<flatbuffers::String> shared_name) {
+  void add_shared_name(::flatbuffers::Offset<::flatbuffers::String> shared_name) {
     fbb_.AddOffset(VarHandleOptions::VT_SHARED_NAME, shared_name);
   }
-  explicit VarHandleOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit VarHandleOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<VarHandleOptions> Finish() {
+  ::flatbuffers::Offset<VarHandleOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<VarHandleOptions>(end);
+    auto o = ::flatbuffers::Offset<VarHandleOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<VarHandleOptions> CreateVarHandleOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::String> container = 0,
-    flatbuffers::Offset<flatbuffers::String> shared_name = 0) {
+inline ::flatbuffers::Offset<VarHandleOptions> CreateVarHandleOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::String> container = 0,
+    ::flatbuffers::Offset<::flatbuffers::String> shared_name = 0) {
   VarHandleOptionsBuilder builder_(_fbb);
   builder_.add_shared_name(shared_name);
   builder_.add_container(container);
   return builder_.Finish();
 }
 
-inline flatbuffers::Offset<VarHandleOptions> CreateVarHandleOptionsDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<VarHandleOptions> CreateVarHandleOptionsDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     const char *container = nullptr,
     const char *shared_name = nullptr) {
   auto container__ = container ? _fbb.CreateString(container) : 0;
@@ -11115,93 +13785,93 @@
       shared_name__);
 }
 
-flatbuffers::Offset<VarHandleOptions> CreateVarHandleOptions(flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<VarHandleOptions> CreateVarHandleOptions(::flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct ReadVariableOptionsT : public flatbuffers::NativeTable {
+struct ReadVariableOptionsT : public ::flatbuffers::NativeTable {
   typedef ReadVariableOptions TableType;
 };
 
-struct ReadVariableOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct ReadVariableOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef ReadVariableOptionsT NativeTableType;
   typedef ReadVariableOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  ReadVariableOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(ReadVariableOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<ReadVariableOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  ReadVariableOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(ReadVariableOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<ReadVariableOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct ReadVariableOptionsBuilder {
   typedef ReadVariableOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit ReadVariableOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit ReadVariableOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<ReadVariableOptions> Finish() {
+  ::flatbuffers::Offset<ReadVariableOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<ReadVariableOptions>(end);
+    auto o = ::flatbuffers::Offset<ReadVariableOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<ReadVariableOptions> CreateReadVariableOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<ReadVariableOptions> CreateReadVariableOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   ReadVariableOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<ReadVariableOptions> CreateReadVariableOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<ReadVariableOptions> CreateReadVariableOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct AssignVariableOptionsT : public flatbuffers::NativeTable {
+struct AssignVariableOptionsT : public ::flatbuffers::NativeTable {
   typedef AssignVariableOptions TableType;
 };
 
-struct AssignVariableOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct AssignVariableOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef AssignVariableOptionsT NativeTableType;
   typedef AssignVariableOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  AssignVariableOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(AssignVariableOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<AssignVariableOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  AssignVariableOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(AssignVariableOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<AssignVariableOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct AssignVariableOptionsBuilder {
   typedef AssignVariableOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit AssignVariableOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit AssignVariableOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<AssignVariableOptions> Finish() {
+  ::flatbuffers::Offset<AssignVariableOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<AssignVariableOptions>(end);
+    auto o = ::flatbuffers::Offset<AssignVariableOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<AssignVariableOptions> CreateAssignVariableOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<AssignVariableOptions> CreateAssignVariableOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   AssignVariableOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<AssignVariableOptions> CreateAssignVariableOptions(flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<AssignVariableOptions> CreateAssignVariableOptions(::flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct RandomOptionsT : public flatbuffers::NativeTable {
+struct RandomOptionsT : public ::flatbuffers::NativeTable {
   typedef RandomOptions TableType;
   int64_t seed = 0;
   int64_t seed2 = 0;
 };
 
-struct RandomOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct RandomOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef RandomOptionsT NativeTableType;
   typedef RandomOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -11214,40 +13884,40 @@
   int64_t seed2() const {
     return GetField<int64_t>(VT_SEED2, 0);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int64_t>(verifier, VT_SEED, 8) &&
            VerifyField<int64_t>(verifier, VT_SEED2, 8) &&
            verifier.EndTable();
   }
-  RandomOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(RandomOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<RandomOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  RandomOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(RandomOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<RandomOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct RandomOptionsBuilder {
   typedef RandomOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_seed(int64_t seed) {
     fbb_.AddElement<int64_t>(RandomOptions::VT_SEED, seed, 0);
   }
   void add_seed2(int64_t seed2) {
     fbb_.AddElement<int64_t>(RandomOptions::VT_SEED2, seed2, 0);
   }
-  explicit RandomOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit RandomOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<RandomOptions> Finish() {
+  ::flatbuffers::Offset<RandomOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<RandomOptions>(end);
+    auto o = ::flatbuffers::Offset<RandomOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<RandomOptions> CreateRandomOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<RandomOptions> CreateRandomOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int64_t seed = 0,
     int64_t seed2 = 0) {
   RandomOptionsBuilder builder_(_fbb);
@@ -11256,61 +13926,61 @@
   return builder_.Finish();
 }
 
-flatbuffers::Offset<RandomOptions> CreateRandomOptions(flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<RandomOptions> CreateRandomOptions(::flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct BucketizeOptionsT : public flatbuffers::NativeTable {
+struct BucketizeOptionsT : public ::flatbuffers::NativeTable {
   typedef BucketizeOptions TableType;
   std::vector<float> boundaries{};
 };
 
-struct BucketizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct BucketizeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef BucketizeOptionsT NativeTableType;
   typedef BucketizeOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
     VT_BOUNDARIES = 4
   };
-  const flatbuffers::Vector<float> *boundaries() const {
-    return GetPointer<const flatbuffers::Vector<float> *>(VT_BOUNDARIES);
+  const ::flatbuffers::Vector<float> *boundaries() const {
+    return GetPointer<const ::flatbuffers::Vector<float> *>(VT_BOUNDARIES);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyOffset(verifier, VT_BOUNDARIES) &&
            verifier.VerifyVector(boundaries()) &&
            verifier.EndTable();
   }
-  BucketizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(BucketizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<BucketizeOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const BucketizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  BucketizeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(BucketizeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<BucketizeOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BucketizeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct BucketizeOptionsBuilder {
   typedef BucketizeOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_boundaries(flatbuffers::Offset<flatbuffers::Vector<float>> boundaries) {
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_boundaries(::flatbuffers::Offset<::flatbuffers::Vector<float>> boundaries) {
     fbb_.AddOffset(BucketizeOptions::VT_BOUNDARIES, boundaries);
   }
-  explicit BucketizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit BucketizeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<BucketizeOptions> Finish() {
+  ::flatbuffers::Offset<BucketizeOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<BucketizeOptions>(end);
+    auto o = ::flatbuffers::Offset<BucketizeOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<BucketizeOptions> CreateBucketizeOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::Vector<float>> boundaries = 0) {
+inline ::flatbuffers::Offset<BucketizeOptions> CreateBucketizeOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<float>> boundaries = 0) {
   BucketizeOptionsBuilder builder_(_fbb);
   builder_.add_boundaries(boundaries);
   return builder_.Finish();
 }
 
-inline flatbuffers::Offset<BucketizeOptions> CreateBucketizeOptionsDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<BucketizeOptions> CreateBucketizeOptionsDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     const std::vector<float> *boundaries = nullptr) {
   auto boundaries__ = boundaries ? _fbb.CreateVector<float>(*boundaries) : 0;
   return tflite::CreateBucketizeOptions(
@@ -11318,14 +13988,14 @@
       boundaries__);
 }
 
-flatbuffers::Offset<BucketizeOptions> CreateBucketizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const BucketizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<BucketizeOptions> CreateBucketizeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BucketizeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct GeluOptionsT : public flatbuffers::NativeTable {
+struct GeluOptionsT : public ::flatbuffers::NativeTable {
   typedef GeluOptions TableType;
   bool approximate = false;
 };
 
-struct GeluOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct GeluOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef GeluOptionsT NativeTableType;
   typedef GeluOptionsBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -11334,435 +14004,526 @@
   bool approximate() const {
     return GetField<uint8_t>(VT_APPROXIMATE, 0) != 0;
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<uint8_t>(verifier, VT_APPROXIMATE, 1) &&
            verifier.EndTable();
   }
-  GeluOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(GeluOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<GeluOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const GeluOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  GeluOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(GeluOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<GeluOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GeluOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct GeluOptionsBuilder {
   typedef GeluOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_approximate(bool approximate) {
     fbb_.AddElement<uint8_t>(GeluOptions::VT_APPROXIMATE, static_cast<uint8_t>(approximate), 0);
   }
-  explicit GeluOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit GeluOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<GeluOptions> Finish() {
+  ::flatbuffers::Offset<GeluOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<GeluOptions>(end);
+    auto o = ::flatbuffers::Offset<GeluOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<GeluOptions> CreateGeluOptions(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<GeluOptions> CreateGeluOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     bool approximate = false) {
   GeluOptionsBuilder builder_(_fbb);
   builder_.add_approximate(approximate);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<GeluOptions> CreateGeluOptions(flatbuffers::FlatBufferBuilder &_fbb, const GeluOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<GeluOptions> CreateGeluOptions(::flatbuffers::FlatBufferBuilder &_fbb, const GeluOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct DynamicUpdateSliceOptionsT : public flatbuffers::NativeTable {
+struct DynamicUpdateSliceOptionsT : public ::flatbuffers::NativeTable {
   typedef DynamicUpdateSliceOptions TableType;
 };
 
-struct DynamicUpdateSliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct DynamicUpdateSliceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef DynamicUpdateSliceOptionsT NativeTableType;
   typedef DynamicUpdateSliceOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  DynamicUpdateSliceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(DynamicUpdateSliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<DynamicUpdateSliceOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const DynamicUpdateSliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  DynamicUpdateSliceOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(DynamicUpdateSliceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<DynamicUpdateSliceOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DynamicUpdateSliceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct DynamicUpdateSliceOptionsBuilder {
   typedef DynamicUpdateSliceOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit DynamicUpdateSliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit DynamicUpdateSliceOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<DynamicUpdateSliceOptions> Finish() {
+  ::flatbuffers::Offset<DynamicUpdateSliceOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<DynamicUpdateSliceOptions>(end);
+    auto o = ::flatbuffers::Offset<DynamicUpdateSliceOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<DynamicUpdateSliceOptions> CreateDynamicUpdateSliceOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<DynamicUpdateSliceOptions> CreateDynamicUpdateSliceOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   DynamicUpdateSliceOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<DynamicUpdateSliceOptions> CreateDynamicUpdateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const DynamicUpdateSliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<DynamicUpdateSliceOptions> CreateDynamicUpdateSliceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DynamicUpdateSliceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct UnsortedSegmentProdOptionsT : public flatbuffers::NativeTable {
+struct UnsortedSegmentProdOptionsT : public ::flatbuffers::NativeTable {
   typedef UnsortedSegmentProdOptions TableType;
 };
 
-struct UnsortedSegmentProdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct UnsortedSegmentProdOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef UnsortedSegmentProdOptionsT NativeTableType;
   typedef UnsortedSegmentProdOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  UnsortedSegmentProdOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(UnsortedSegmentProdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<UnsortedSegmentProdOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentProdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  UnsortedSegmentProdOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(UnsortedSegmentProdOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<UnsortedSegmentProdOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentProdOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct UnsortedSegmentProdOptionsBuilder {
   typedef UnsortedSegmentProdOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit UnsortedSegmentProdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit UnsortedSegmentProdOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<UnsortedSegmentProdOptions> Finish() {
+  ::flatbuffers::Offset<UnsortedSegmentProdOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<UnsortedSegmentProdOptions>(end);
+    auto o = ::flatbuffers::Offset<UnsortedSegmentProdOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<UnsortedSegmentProdOptions> CreateUnsortedSegmentProdOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<UnsortedSegmentProdOptions> CreateUnsortedSegmentProdOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   UnsortedSegmentProdOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<UnsortedSegmentProdOptions> CreateUnsortedSegmentProdOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentProdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<UnsortedSegmentProdOptions> CreateUnsortedSegmentProdOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentProdOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct UnsortedSegmentMaxOptionsT : public flatbuffers::NativeTable {
+struct UnsortedSegmentMaxOptionsT : public ::flatbuffers::NativeTable {
   typedef UnsortedSegmentMaxOptions TableType;
 };
 
-struct UnsortedSegmentMaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct UnsortedSegmentMaxOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef UnsortedSegmentMaxOptionsT NativeTableType;
   typedef UnsortedSegmentMaxOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  UnsortedSegmentMaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(UnsortedSegmentMaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<UnsortedSegmentMaxOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  UnsortedSegmentMaxOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(UnsortedSegmentMaxOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<UnsortedSegmentMaxOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMaxOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct UnsortedSegmentMaxOptionsBuilder {
   typedef UnsortedSegmentMaxOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit UnsortedSegmentMaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit UnsortedSegmentMaxOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<UnsortedSegmentMaxOptions> Finish() {
+  ::flatbuffers::Offset<UnsortedSegmentMaxOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<UnsortedSegmentMaxOptions>(end);
+    auto o = ::flatbuffers::Offset<UnsortedSegmentMaxOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<UnsortedSegmentMaxOptions> CreateUnsortedSegmentMaxOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<UnsortedSegmentMaxOptions> CreateUnsortedSegmentMaxOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   UnsortedSegmentMaxOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<UnsortedSegmentMaxOptions> CreateUnsortedSegmentMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<UnsortedSegmentMaxOptions> CreateUnsortedSegmentMaxOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMaxOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct UnsortedSegmentSumOptionsT : public flatbuffers::NativeTable {
+struct UnsortedSegmentSumOptionsT : public ::flatbuffers::NativeTable {
   typedef UnsortedSegmentSumOptions TableType;
 };
 
-struct UnsortedSegmentSumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct UnsortedSegmentSumOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef UnsortedSegmentSumOptionsT NativeTableType;
   typedef UnsortedSegmentSumOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  UnsortedSegmentSumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(UnsortedSegmentSumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<UnsortedSegmentSumOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentSumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  UnsortedSegmentSumOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(UnsortedSegmentSumOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<UnsortedSegmentSumOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentSumOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct UnsortedSegmentSumOptionsBuilder {
   typedef UnsortedSegmentSumOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit UnsortedSegmentSumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit UnsortedSegmentSumOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<UnsortedSegmentSumOptions> Finish() {
+  ::flatbuffers::Offset<UnsortedSegmentSumOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<UnsortedSegmentSumOptions>(end);
+    auto o = ::flatbuffers::Offset<UnsortedSegmentSumOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<UnsortedSegmentSumOptions> CreateUnsortedSegmentSumOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<UnsortedSegmentSumOptions> CreateUnsortedSegmentSumOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   UnsortedSegmentSumOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<UnsortedSegmentSumOptions> CreateUnsortedSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentSumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<UnsortedSegmentSumOptions> CreateUnsortedSegmentSumOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentSumOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct ATan2OptionsT : public flatbuffers::NativeTable {
+struct ATan2OptionsT : public ::flatbuffers::NativeTable {
   typedef ATan2Options TableType;
 };
 
-struct ATan2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct ATan2Options FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef ATan2OptionsT NativeTableType;
   typedef ATan2OptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  ATan2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(ATan2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<ATan2Options> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ATan2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  ATan2OptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(ATan2OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<ATan2Options> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ATan2OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct ATan2OptionsBuilder {
   typedef ATan2Options Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit ATan2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit ATan2OptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<ATan2Options> Finish() {
+  ::flatbuffers::Offset<ATan2Options> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<ATan2Options>(end);
+    auto o = ::flatbuffers::Offset<ATan2Options>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<ATan2Options> CreateATan2Options(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<ATan2Options> CreateATan2Options(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   ATan2OptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<ATan2Options> CreateATan2Options(flatbuffers::FlatBufferBuilder &_fbb, const ATan2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<ATan2Options> CreateATan2Options(::flatbuffers::FlatBufferBuilder &_fbb, const ATan2OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct UnsortedSegmentMinOptionsT : public flatbuffers::NativeTable {
+struct UnsortedSegmentMinOptionsT : public ::flatbuffers::NativeTable {
   typedef UnsortedSegmentMinOptions TableType;
 };
 
-struct UnsortedSegmentMinOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct UnsortedSegmentMinOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef UnsortedSegmentMinOptionsT NativeTableType;
   typedef UnsortedSegmentMinOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  UnsortedSegmentMinOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(UnsortedSegmentMinOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<UnsortedSegmentMinOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMinOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  UnsortedSegmentMinOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(UnsortedSegmentMinOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<UnsortedSegmentMinOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMinOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct UnsortedSegmentMinOptionsBuilder {
   typedef UnsortedSegmentMinOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit UnsortedSegmentMinOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit UnsortedSegmentMinOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<UnsortedSegmentMinOptions> Finish() {
+  ::flatbuffers::Offset<UnsortedSegmentMinOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<UnsortedSegmentMinOptions>(end);
+    auto o = ::flatbuffers::Offset<UnsortedSegmentMinOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<UnsortedSegmentMinOptions> CreateUnsortedSegmentMinOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<UnsortedSegmentMinOptions> CreateUnsortedSegmentMinOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   UnsortedSegmentMinOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<UnsortedSegmentMinOptions> CreateUnsortedSegmentMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMinOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<UnsortedSegmentMinOptions> CreateUnsortedSegmentMinOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMinOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct SignOptionsT : public flatbuffers::NativeTable {
+struct SignOptionsT : public ::flatbuffers::NativeTable {
   typedef SignOptions TableType;
 };
 
-struct SignOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct SignOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef SignOptionsT NativeTableType;
   typedef SignOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  SignOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(SignOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<SignOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SignOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  SignOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(SignOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<SignOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SignOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct SignOptionsBuilder {
   typedef SignOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit SignOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit SignOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<SignOptions> Finish() {
+  ::flatbuffers::Offset<SignOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<SignOptions>(end);
+    auto o = ::flatbuffers::Offset<SignOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<SignOptions> CreateSignOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<SignOptions> CreateSignOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   SignOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<SignOptions> CreateSignOptions(flatbuffers::FlatBufferBuilder &_fbb, const SignOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<SignOptions> CreateSignOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SignOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct BitcastOptionsT : public flatbuffers::NativeTable {
+struct BitcastOptionsT : public ::flatbuffers::NativeTable {
   typedef BitcastOptions TableType;
 };
 
-struct BitcastOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct BitcastOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef BitcastOptionsT NativeTableType;
   typedef BitcastOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  BitcastOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(BitcastOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<BitcastOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const BitcastOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  BitcastOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(BitcastOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<BitcastOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BitcastOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct BitcastOptionsBuilder {
   typedef BitcastOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit BitcastOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit BitcastOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<BitcastOptions> Finish() {
+  ::flatbuffers::Offset<BitcastOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<BitcastOptions>(end);
+    auto o = ::flatbuffers::Offset<BitcastOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<BitcastOptions> CreateBitcastOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<BitcastOptions> CreateBitcastOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   BitcastOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<BitcastOptions> CreateBitcastOptions(flatbuffers::FlatBufferBuilder &_fbb, const BitcastOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<BitcastOptions> CreateBitcastOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BitcastOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct BitwiseXorOptionsT : public flatbuffers::NativeTable {
+struct BitwiseXorOptionsT : public ::flatbuffers::NativeTable {
   typedef BitwiseXorOptions TableType;
 };
 
-struct BitwiseXorOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct BitwiseXorOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef BitwiseXorOptionsT NativeTableType;
   typedef BitwiseXorOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  BitwiseXorOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(BitwiseXorOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<BitwiseXorOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const BitwiseXorOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  BitwiseXorOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(BitwiseXorOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<BitwiseXorOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BitwiseXorOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct BitwiseXorOptionsBuilder {
   typedef BitwiseXorOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit BitwiseXorOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit BitwiseXorOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<BitwiseXorOptions> Finish() {
+  ::flatbuffers::Offset<BitwiseXorOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<BitwiseXorOptions>(end);
+    auto o = ::flatbuffers::Offset<BitwiseXorOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<BitwiseXorOptions> CreateBitwiseXorOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<BitwiseXorOptions> CreateBitwiseXorOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   BitwiseXorOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<BitwiseXorOptions> CreateBitwiseXorOptions(flatbuffers::FlatBufferBuilder &_fbb, const BitwiseXorOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<BitwiseXorOptions> CreateBitwiseXorOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BitwiseXorOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct RightShiftOptionsT : public flatbuffers::NativeTable {
+struct RightShiftOptionsT : public ::flatbuffers::NativeTable {
   typedef RightShiftOptions TableType;
 };
 
-struct RightShiftOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct RightShiftOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef RightShiftOptionsT NativeTableType;
   typedef RightShiftOptionsBuilder Builder;
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            verifier.EndTable();
   }
-  RightShiftOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(RightShiftOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<RightShiftOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const RightShiftOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  RightShiftOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(RightShiftOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<RightShiftOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const RightShiftOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct RightShiftOptionsBuilder {
   typedef RightShiftOptions Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  explicit RightShiftOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit RightShiftOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<RightShiftOptions> Finish() {
+  ::flatbuffers::Offset<RightShiftOptions> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<RightShiftOptions>(end);
+    auto o = ::flatbuffers::Offset<RightShiftOptions>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<RightShiftOptions> CreateRightShiftOptions(
-    flatbuffers::FlatBufferBuilder &_fbb) {
+inline ::flatbuffers::Offset<RightShiftOptions> CreateRightShiftOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
   RightShiftOptionsBuilder builder_(_fbb);
   return builder_.Finish();
 }
 
-flatbuffers::Offset<RightShiftOptions> CreateRightShiftOptions(flatbuffers::FlatBufferBuilder &_fbb, const RightShiftOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<RightShiftOptions> CreateRightShiftOptions(::flatbuffers::FlatBufferBuilder &_fbb, const RightShiftOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct OperatorCodeT : public flatbuffers::NativeTable {
+struct DilateOptionsT : public ::flatbuffers::NativeTable {
+  typedef DilateOptions TableType;
+};
+
+struct DilateOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef DilateOptionsT NativeTableType;
+  typedef DilateOptionsBuilder Builder;
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           verifier.EndTable();
+  }
+  DilateOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(DilateOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<DilateOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DilateOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct DilateOptionsBuilder {
+  typedef DilateOptions Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  explicit DilateOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<DilateOptions> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<DilateOptions>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<DilateOptions> CreateDilateOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb) {
+  DilateOptionsBuilder builder_(_fbb);
+  return builder_.Finish();
+}
+
+::flatbuffers::Offset<DilateOptions> CreateDilateOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DilateOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ReduceWindowOptionsT : public ::flatbuffers::NativeTable {
+  typedef ReduceWindowOptions TableType;
+  tflite::ReduceWindowFunction reduce_function = tflite::ReduceWindowFunction_UNSUPPORTED;
+};
+
+struct ReduceWindowOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef ReduceWindowOptionsT NativeTableType;
+  typedef ReduceWindowOptionsBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_REDUCE_FUNCTION = 4
+  };
+  tflite::ReduceWindowFunction reduce_function() const {
+    return static_cast<tflite::ReduceWindowFunction>(GetField<int32_t>(VT_REDUCE_FUNCTION, 0));
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyField<int32_t>(verifier, VT_REDUCE_FUNCTION, 4) &&
+           verifier.EndTable();
+  }
+  ReduceWindowOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(ReduceWindowOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<ReduceWindowOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReduceWindowOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ReduceWindowOptionsBuilder {
+  typedef ReduceWindowOptions Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_reduce_function(tflite::ReduceWindowFunction reduce_function) {
+    fbb_.AddElement<int32_t>(ReduceWindowOptions::VT_REDUCE_FUNCTION, static_cast<int32_t>(reduce_function), 0);
+  }
+  explicit ReduceWindowOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<ReduceWindowOptions> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<ReduceWindowOptions>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<ReduceWindowOptions> CreateReduceWindowOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    tflite::ReduceWindowFunction reduce_function = tflite::ReduceWindowFunction_UNSUPPORTED) {
+  ReduceWindowOptionsBuilder builder_(_fbb);
+  builder_.add_reduce_function(reduce_function);
+  return builder_.Finish();
+}
+
+::flatbuffers::Offset<ReduceWindowOptions> CreateReduceWindowOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ReduceWindowOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct OperatorCodeT : public ::flatbuffers::NativeTable {
   typedef OperatorCode TableType;
   int8_t deprecated_builtin_code = 0;
   std::string custom_code{};
@@ -11770,7 +14531,7 @@
   tflite::BuiltinOperator builtin_code = tflite::BuiltinOperator_ADD;
 };
 
-struct OperatorCode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct OperatorCode FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef OperatorCodeT NativeTableType;
   typedef OperatorCodeBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -11782,8 +14543,8 @@
   int8_t deprecated_builtin_code() const {
     return GetField<int8_t>(VT_DEPRECATED_BUILTIN_CODE, 0);
   }
-  const flatbuffers::String *custom_code() const {
-    return GetPointer<const flatbuffers::String *>(VT_CUSTOM_CODE);
+  const ::flatbuffers::String *custom_code() const {
+    return GetPointer<const ::flatbuffers::String *>(VT_CUSTOM_CODE);
   }
   int32_t version() const {
     return GetField<int32_t>(VT_VERSION, 1);
@@ -11791,7 +14552,7 @@
   tflite::BuiltinOperator builtin_code() const {
     return static_cast<tflite::BuiltinOperator>(GetField<int32_t>(VT_BUILTIN_CODE, 0));
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<int8_t>(verifier, VT_DEPRECATED_BUILTIN_CODE, 1) &&
            VerifyOffset(verifier, VT_CUSTOM_CODE) &&
@@ -11800,19 +14561,19 @@
            VerifyField<int32_t>(verifier, VT_BUILTIN_CODE, 4) &&
            verifier.EndTable();
   }
-  OperatorCodeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(OperatorCodeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<OperatorCode> Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  OperatorCodeT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(OperatorCodeT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<OperatorCode> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct OperatorCodeBuilder {
   typedef OperatorCode Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_deprecated_builtin_code(int8_t deprecated_builtin_code) {
     fbb_.AddElement<int8_t>(OperatorCode::VT_DEPRECATED_BUILTIN_CODE, deprecated_builtin_code, 0);
   }
-  void add_custom_code(flatbuffers::Offset<flatbuffers::String> custom_code) {
+  void add_custom_code(::flatbuffers::Offset<::flatbuffers::String> custom_code) {
     fbb_.AddOffset(OperatorCode::VT_CUSTOM_CODE, custom_code);
   }
   void add_version(int32_t version) {
@@ -11821,21 +14582,21 @@
   void add_builtin_code(tflite::BuiltinOperator builtin_code) {
     fbb_.AddElement<int32_t>(OperatorCode::VT_BUILTIN_CODE, static_cast<int32_t>(builtin_code), 0);
   }
-  explicit OperatorCodeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit OperatorCodeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<OperatorCode> Finish() {
+  ::flatbuffers::Offset<OperatorCode> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<OperatorCode>(end);
+    auto o = ::flatbuffers::Offset<OperatorCode>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<OperatorCode> CreateOperatorCode(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<OperatorCode> CreateOperatorCode(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int8_t deprecated_builtin_code = 0,
-    flatbuffers::Offset<flatbuffers::String> custom_code = 0,
+    ::flatbuffers::Offset<::flatbuffers::String> custom_code = 0,
     int32_t version = 1,
     tflite::BuiltinOperator builtin_code = tflite::BuiltinOperator_ADD) {
   OperatorCodeBuilder builder_(_fbb);
@@ -11846,8 +14607,8 @@
   return builder_.Finish();
 }
 
-inline flatbuffers::Offset<OperatorCode> CreateOperatorCodeDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<OperatorCode> CreateOperatorCodeDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     int8_t deprecated_builtin_code = 0,
     const char *custom_code = nullptr,
     int32_t version = 1,
@@ -11861,9 +14622,125 @@
       builtin_code);
 }
 
-flatbuffers::Offset<OperatorCode> CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<OperatorCode> CreateOperatorCode(::flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct OperatorT : public flatbuffers::NativeTable {
+struct StableHLOCompositeOptionsT : public ::flatbuffers::NativeTable {
+  typedef StableHLOCompositeOptions TableType;
+  std::string name{};
+  int32_t decomposition_subgraph_index = 0;
+  std::vector<uint8_t> composite_attributes{};
+  tflite::CustomOptionsFormat composite_attributes_format = tflite::CustomOptionsFormat_FLEXBUFFERS;
+  int32_t version = 0;
+};
+
+struct StableHLOCompositeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+  typedef StableHLOCompositeOptionsT NativeTableType;
+  typedef StableHLOCompositeOptionsBuilder Builder;
+  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+    VT_NAME = 4,
+    VT_DECOMPOSITION_SUBGRAPH_INDEX = 6,
+    VT_COMPOSITE_ATTRIBUTES = 8,
+    VT_COMPOSITE_ATTRIBUTES_FORMAT = 10,
+    VT_VERSION = 12
+  };
+  const ::flatbuffers::String *name() const {
+    return GetPointer<const ::flatbuffers::String *>(VT_NAME);
+  }
+  int32_t decomposition_subgraph_index() const {
+    return GetField<int32_t>(VT_DECOMPOSITION_SUBGRAPH_INDEX, 0);
+  }
+  const ::flatbuffers::Vector<uint8_t> *composite_attributes() const {
+    return GetPointer<const ::flatbuffers::Vector<uint8_t> *>(VT_COMPOSITE_ATTRIBUTES);
+  }
+  tflite::CustomOptionsFormat composite_attributes_format() const {
+    return static_cast<tflite::CustomOptionsFormat>(GetField<int8_t>(VT_COMPOSITE_ATTRIBUTES_FORMAT, 0));
+  }
+  int32_t version() const {
+    return GetField<int32_t>(VT_VERSION, 0);
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
+    return VerifyTableStart(verifier) &&
+           VerifyOffset(verifier, VT_NAME) &&
+           verifier.VerifyString(name()) &&
+           VerifyField<int32_t>(verifier, VT_DECOMPOSITION_SUBGRAPH_INDEX, 4) &&
+           VerifyOffset(verifier, VT_COMPOSITE_ATTRIBUTES) &&
+           verifier.VerifyVector(composite_attributes()) &&
+           VerifyField<int8_t>(verifier, VT_COMPOSITE_ATTRIBUTES_FORMAT, 1) &&
+           VerifyField<int32_t>(verifier, VT_VERSION, 4) &&
+           verifier.EndTable();
+  }
+  StableHLOCompositeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(StableHLOCompositeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<StableHLOCompositeOptions> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StableHLOCompositeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StableHLOCompositeOptionsBuilder {
+  typedef StableHLOCompositeOptions Table;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_name(::flatbuffers::Offset<::flatbuffers::String> name) {
+    fbb_.AddOffset(StableHLOCompositeOptions::VT_NAME, name);
+  }
+  void add_decomposition_subgraph_index(int32_t decomposition_subgraph_index) {
+    fbb_.AddElement<int32_t>(StableHLOCompositeOptions::VT_DECOMPOSITION_SUBGRAPH_INDEX, decomposition_subgraph_index, 0);
+  }
+  void add_composite_attributes(::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> composite_attributes) {
+    fbb_.AddOffset(StableHLOCompositeOptions::VT_COMPOSITE_ATTRIBUTES, composite_attributes);
+  }
+  void add_composite_attributes_format(tflite::CustomOptionsFormat composite_attributes_format) {
+    fbb_.AddElement<int8_t>(StableHLOCompositeOptions::VT_COMPOSITE_ATTRIBUTES_FORMAT, static_cast<int8_t>(composite_attributes_format), 0);
+  }
+  void add_version(int32_t version) {
+    fbb_.AddElement<int32_t>(StableHLOCompositeOptions::VT_VERSION, version, 0);
+  }
+  explicit StableHLOCompositeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+        : fbb_(_fbb) {
+    start_ = fbb_.StartTable();
+  }
+  ::flatbuffers::Offset<StableHLOCompositeOptions> Finish() {
+    const auto end = fbb_.EndTable(start_);
+    auto o = ::flatbuffers::Offset<StableHLOCompositeOptions>(end);
+    return o;
+  }
+};
+
+inline ::flatbuffers::Offset<StableHLOCompositeOptions> CreateStableHLOCompositeOptions(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::String> name = 0,
+    int32_t decomposition_subgraph_index = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> composite_attributes = 0,
+    tflite::CustomOptionsFormat composite_attributes_format = tflite::CustomOptionsFormat_FLEXBUFFERS,
+    int32_t version = 0) {
+  StableHLOCompositeOptionsBuilder builder_(_fbb);
+  builder_.add_version(version);
+  builder_.add_composite_attributes(composite_attributes);
+  builder_.add_decomposition_subgraph_index(decomposition_subgraph_index);
+  builder_.add_name(name);
+  builder_.add_composite_attributes_format(composite_attributes_format);
+  return builder_.Finish();
+}
+
+inline ::flatbuffers::Offset<StableHLOCompositeOptions> CreateStableHLOCompositeOptionsDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    const char *name = nullptr,
+    int32_t decomposition_subgraph_index = 0,
+    const std::vector<uint8_t> *composite_attributes = nullptr,
+    tflite::CustomOptionsFormat composite_attributes_format = tflite::CustomOptionsFormat_FLEXBUFFERS,
+    int32_t version = 0) {
+  auto name__ = name ? _fbb.CreateString(name) : 0;
+  auto composite_attributes__ = composite_attributes ? _fbb.CreateVector<uint8_t>(*composite_attributes) : 0;
+  return tflite::CreateStableHLOCompositeOptions(
+      _fbb,
+      name__,
+      decomposition_subgraph_index,
+      composite_attributes__,
+      composite_attributes_format,
+      version);
+}
+
+::flatbuffers::Offset<StableHLOCompositeOptions> CreateStableHLOCompositeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StableHLOCompositeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct OperatorT : public ::flatbuffers::NativeTable {
   typedef Operator TableType;
   uint32_t opcode_index = 0;
   std::vector<int32_t> inputs{};
@@ -11875,9 +14752,10 @@
   std::vector<int32_t> intermediates{};
   uint64_t large_custom_options_offset = 0;
   uint64_t large_custom_options_size = 0;
+  tflite::BuiltinOptions2Union builtin_options_2{};
 };
 
-struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct Operator FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef OperatorT NativeTableType;
   typedef OperatorBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -11891,16 +14769,18 @@
     VT_MUTATING_VARIABLE_INPUTS = 18,
     VT_INTERMEDIATES = 20,
     VT_LARGE_CUSTOM_OPTIONS_OFFSET = 22,
-    VT_LARGE_CUSTOM_OPTIONS_SIZE = 24
+    VT_LARGE_CUSTOM_OPTIONS_SIZE = 24,
+    VT_BUILTIN_OPTIONS_2_TYPE = 26,
+    VT_BUILTIN_OPTIONS_2 = 28
   };
   uint32_t opcode_index() const {
     return GetField<uint32_t>(VT_OPCODE_INDEX, 0);
   }
-  const flatbuffers::Vector<int32_t> *inputs() const {
-    return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUTS);
+  const ::flatbuffers::Vector<int32_t> *inputs() const {
+    return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_INPUTS);
   }
-  const flatbuffers::Vector<int32_t> *outputs() const {
-    return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUTS);
+  const ::flatbuffers::Vector<int32_t> *outputs() const {
+    return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_OUTPUTS);
   }
   tflite::BuiltinOptions builtin_options_type() const {
     return static_cast<tflite::BuiltinOptions>(GetField<uint8_t>(VT_BUILTIN_OPTIONS_TYPE, 0));
@@ -12287,17 +15167,17 @@
   const tflite::RightShiftOptions *builtin_options_as_RightShiftOptions() const {
     return builtin_options_type() == tflite::BuiltinOptions_RightShiftOptions ? static_cast<const tflite::RightShiftOptions *>(builtin_options()) : nullptr;
   }
-  const flatbuffers::Vector<uint8_t> *custom_options() const {
-    return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM_OPTIONS);
+  const ::flatbuffers::Vector<uint8_t> *custom_options() const {
+    return GetPointer<const ::flatbuffers::Vector<uint8_t> *>(VT_CUSTOM_OPTIONS);
   }
   tflite::CustomOptionsFormat custom_options_format() const {
     return static_cast<tflite::CustomOptionsFormat>(GetField<int8_t>(VT_CUSTOM_OPTIONS_FORMAT, 0));
   }
-  const flatbuffers::Vector<uint8_t> *mutating_variable_inputs() const {
-    return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_MUTATING_VARIABLE_INPUTS);
+  const ::flatbuffers::Vector<uint8_t> *mutating_variable_inputs() const {
+    return GetPointer<const ::flatbuffers::Vector<uint8_t> *>(VT_MUTATING_VARIABLE_INPUTS);
   }
-  const flatbuffers::Vector<int32_t> *intermediates() const {
-    return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INTERMEDIATES);
+  const ::flatbuffers::Vector<int32_t> *intermediates() const {
+    return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_INTERMEDIATES);
   }
   uint64_t large_custom_options_offset() const {
     return GetField<uint64_t>(VT_LARGE_CUSTOM_OPTIONS_OFFSET, 0);
@@ -12305,7 +15185,77 @@
   uint64_t large_custom_options_size() const {
     return GetField<uint64_t>(VT_LARGE_CUSTOM_OPTIONS_SIZE, 0);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  tflite::BuiltinOptions2 builtin_options_2_type() const {
+    return static_cast<tflite::BuiltinOptions2>(GetField<uint8_t>(VT_BUILTIN_OPTIONS_2_TYPE, 0));
+  }
+  const void *builtin_options_2() const {
+    return GetPointer<const void *>(VT_BUILTIN_OPTIONS_2);
+  }
+  template<typename T> const T *builtin_options_2_as() const;
+  const tflite::StablehloConcatenateOptions *builtin_options_2_as_StablehloConcatenateOptions() const {
+    return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloConcatenateOptions ? static_cast<const tflite::StablehloConcatenateOptions *>(builtin_options_2()) : nullptr;
+  }
+  const tflite::StablehloBroadcastInDimOptions *builtin_options_2_as_StablehloBroadcastInDimOptions() const {
+    return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloBroadcastInDimOptions ? static_cast<const tflite::StablehloBroadcastInDimOptions *>(builtin_options_2()) : nullptr;
+  }
+  const tflite::StablehloSliceOptions *builtin_options_2_as_StablehloSliceOptions() const {
+    return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloSliceOptions ? static_cast<const tflite::StablehloSliceOptions *>(builtin_options_2()) : nullptr;
+  }
+  const tflite::StablehloConvolutionOptions *builtin_options_2_as_StablehloConvolutionOptions() const {
+    return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloConvolutionOptions ? static_cast<const tflite::StablehloConvolutionOptions *>(builtin_options_2()) : nullptr;
+  }
+  const tflite::StablehloCustomCallOptions *builtin_options_2_as_StablehloCustomCallOptions() const {
+    return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloCustomCallOptions ? static_cast<const tflite::StablehloCustomCallOptions *>(builtin_options_2()) : nullptr;
+  }
+  const tflite::StablehloReduceOptions *builtin_options_2_as_StablehloReduceOptions() const {
+    return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloReduceOptions ? static_cast<const tflite::StablehloReduceOptions *>(builtin_options_2()) : nullptr;
+  }
+  const tflite::StablehloScatterOptions *builtin_options_2_as_StablehloScatterOptions() const {
+    return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloScatterOptions ? static_cast<const tflite::StablehloScatterOptions *>(builtin_options_2()) : nullptr;
+  }
+  const tflite::StablehloCompareOptions *builtin_options_2_as_StablehloCompareOptions() const {
+    return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloCompareOptions ? static_cast<const tflite::StablehloCompareOptions *>(builtin_options_2()) : nullptr;
+  }
+  const tflite::StablehloDynamicSliceOptions *builtin_options_2_as_StablehloDynamicSliceOptions() const {
+    return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloDynamicSliceOptions ? static_cast<const tflite::StablehloDynamicSliceOptions *>(builtin_options_2()) : nullptr;
+  }
+  const tflite::StablehloPadOptions *builtin_options_2_as_StablehloPadOptions() const {
+    return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloPadOptions ? static_cast<const tflite::StablehloPadOptions *>(builtin_options_2()) : nullptr;
+  }
+  const tflite::StablehloIotaOptions *builtin_options_2_as_StablehloIotaOptions() const {
+    return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloIotaOptions ? static_cast<const tflite::StablehloIotaOptions *>(builtin_options_2()) : nullptr;
+  }
+  const tflite::StablehloDotGeneralOptions *builtin_options_2_as_StablehloDotGeneralOptions() const {
+    return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloDotGeneralOptions ? static_cast<const tflite::StablehloDotGeneralOptions *>(builtin_options_2()) : nullptr;
+  }
+  const tflite::StablehloReduceWindowOptions *builtin_options_2_as_StablehloReduceWindowOptions() const {
+    return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloReduceWindowOptions ? static_cast<const tflite::StablehloReduceWindowOptions *>(builtin_options_2()) : nullptr;
+  }
+  const tflite::StablehloSortOptions *builtin_options_2_as_StablehloSortOptions() const {
+    return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloSortOptions ? static_cast<const tflite::StablehloSortOptions *>(builtin_options_2()) : nullptr;
+  }
+  const tflite::StablehloWhileOptions *builtin_options_2_as_StablehloWhileOptions() const {
+    return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloWhileOptions ? static_cast<const tflite::StablehloWhileOptions *>(builtin_options_2()) : nullptr;
+  }
+  const tflite::StablehloGatherOptions *builtin_options_2_as_StablehloGatherOptions() const {
+    return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloGatherOptions ? static_cast<const tflite::StablehloGatherOptions *>(builtin_options_2()) : nullptr;
+  }
+  const tflite::StablehloTransposeOptions *builtin_options_2_as_StablehloTransposeOptions() const {
+    return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloTransposeOptions ? static_cast<const tflite::StablehloTransposeOptions *>(builtin_options_2()) : nullptr;
+  }
+  const tflite::DilateOptions *builtin_options_2_as_DilateOptions() const {
+    return builtin_options_2_type() == tflite::BuiltinOptions2_DilateOptions ? static_cast<const tflite::DilateOptions *>(builtin_options_2()) : nullptr;
+  }
+  const tflite::StablehloRngBitGeneratorOptions *builtin_options_2_as_StablehloRngBitGeneratorOptions() const {
+    return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloRngBitGeneratorOptions ? static_cast<const tflite::StablehloRngBitGeneratorOptions *>(builtin_options_2()) : nullptr;
+  }
+  const tflite::ReduceWindowOptions *builtin_options_2_as_ReduceWindowOptions() const {
+    return builtin_options_2_type() == tflite::BuiltinOptions2_ReduceWindowOptions ? static_cast<const tflite::ReduceWindowOptions *>(builtin_options_2()) : nullptr;
+  }
+  const tflite::StableHLOCompositeOptions *builtin_options_2_as_StableHLOCompositeOptions() const {
+    return builtin_options_2_type() == tflite::BuiltinOptions2_StableHLOCompositeOptions ? static_cast<const tflite::StableHLOCompositeOptions *>(builtin_options_2()) : nullptr;
+  }
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<uint32_t>(verifier, VT_OPCODE_INDEX, 4) &&
            VerifyOffset(verifier, VT_INPUTS) &&
@@ -12324,11 +15274,14 @@
            verifier.VerifyVector(intermediates()) &&
            VerifyField<uint64_t>(verifier, VT_LARGE_CUSTOM_OPTIONS_OFFSET, 8) &&
            VerifyField<uint64_t>(verifier, VT_LARGE_CUSTOM_OPTIONS_SIZE, 8) &&
+           VerifyField<uint8_t>(verifier, VT_BUILTIN_OPTIONS_2_TYPE, 1) &&
+           VerifyOffset(verifier, VT_BUILTIN_OPTIONS_2) &&
+           VerifyBuiltinOptions2(verifier, builtin_options_2(), builtin_options_2_type()) &&
            verifier.EndTable();
   }
-  OperatorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(OperatorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<Operator> Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  OperatorT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(OperatorT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<Operator> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const OperatorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 template<> inline const tflite::Conv2DOptions *Operator::builtin_options_as<tflite::Conv2DOptions>() const {
@@ -12835,35 +15788,119 @@
   return builtin_options_as_RightShiftOptions();
 }
 
+template<> inline const tflite::StablehloConcatenateOptions *Operator::builtin_options_2_as<tflite::StablehloConcatenateOptions>() const {
+  return builtin_options_2_as_StablehloConcatenateOptions();
+}
+
+template<> inline const tflite::StablehloBroadcastInDimOptions *Operator::builtin_options_2_as<tflite::StablehloBroadcastInDimOptions>() const {
+  return builtin_options_2_as_StablehloBroadcastInDimOptions();
+}
+
+template<> inline const tflite::StablehloSliceOptions *Operator::builtin_options_2_as<tflite::StablehloSliceOptions>() const {
+  return builtin_options_2_as_StablehloSliceOptions();
+}
+
+template<> inline const tflite::StablehloConvolutionOptions *Operator::builtin_options_2_as<tflite::StablehloConvolutionOptions>() const {
+  return builtin_options_2_as_StablehloConvolutionOptions();
+}
+
+template<> inline const tflite::StablehloCustomCallOptions *Operator::builtin_options_2_as<tflite::StablehloCustomCallOptions>() const {
+  return builtin_options_2_as_StablehloCustomCallOptions();
+}
+
+template<> inline const tflite::StablehloReduceOptions *Operator::builtin_options_2_as<tflite::StablehloReduceOptions>() const {
+  return builtin_options_2_as_StablehloReduceOptions();
+}
+
+template<> inline const tflite::StablehloScatterOptions *Operator::builtin_options_2_as<tflite::StablehloScatterOptions>() const {
+  return builtin_options_2_as_StablehloScatterOptions();
+}
+
+template<> inline const tflite::StablehloCompareOptions *Operator::builtin_options_2_as<tflite::StablehloCompareOptions>() const {
+  return builtin_options_2_as_StablehloCompareOptions();
+}
+
+template<> inline const tflite::StablehloDynamicSliceOptions *Operator::builtin_options_2_as<tflite::StablehloDynamicSliceOptions>() const {
+  return builtin_options_2_as_StablehloDynamicSliceOptions();
+}
+
+template<> inline const tflite::StablehloPadOptions *Operator::builtin_options_2_as<tflite::StablehloPadOptions>() const {
+  return builtin_options_2_as_StablehloPadOptions();
+}
+
+template<> inline const tflite::StablehloIotaOptions *Operator::builtin_options_2_as<tflite::StablehloIotaOptions>() const {
+  return builtin_options_2_as_StablehloIotaOptions();
+}
+
+template<> inline const tflite::StablehloDotGeneralOptions *Operator::builtin_options_2_as<tflite::StablehloDotGeneralOptions>() const {
+  return builtin_options_2_as_StablehloDotGeneralOptions();
+}
+
+template<> inline const tflite::StablehloReduceWindowOptions *Operator::builtin_options_2_as<tflite::StablehloReduceWindowOptions>() const {
+  return builtin_options_2_as_StablehloReduceWindowOptions();
+}
+
+template<> inline const tflite::StablehloSortOptions *Operator::builtin_options_2_as<tflite::StablehloSortOptions>() const {
+  return builtin_options_2_as_StablehloSortOptions();
+}
+
+template<> inline const tflite::StablehloWhileOptions *Operator::builtin_options_2_as<tflite::StablehloWhileOptions>() const {
+  return builtin_options_2_as_StablehloWhileOptions();
+}
+
+template<> inline const tflite::StablehloGatherOptions *Operator::builtin_options_2_as<tflite::StablehloGatherOptions>() const {
+  return builtin_options_2_as_StablehloGatherOptions();
+}
+
+template<> inline const tflite::StablehloTransposeOptions *Operator::builtin_options_2_as<tflite::StablehloTransposeOptions>() const {
+  return builtin_options_2_as_StablehloTransposeOptions();
+}
+
+template<> inline const tflite::DilateOptions *Operator::builtin_options_2_as<tflite::DilateOptions>() const {
+  return builtin_options_2_as_DilateOptions();
+}
+
+template<> inline const tflite::StablehloRngBitGeneratorOptions *Operator::builtin_options_2_as<tflite::StablehloRngBitGeneratorOptions>() const {
+  return builtin_options_2_as_StablehloRngBitGeneratorOptions();
+}
+
+template<> inline const tflite::ReduceWindowOptions *Operator::builtin_options_2_as<tflite::ReduceWindowOptions>() const {
+  return builtin_options_2_as_ReduceWindowOptions();
+}
+
+template<> inline const tflite::StableHLOCompositeOptions *Operator::builtin_options_2_as<tflite::StableHLOCompositeOptions>() const {
+  return builtin_options_2_as_StableHLOCompositeOptions();
+}
+
 struct OperatorBuilder {
   typedef Operator Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_opcode_index(uint32_t opcode_index) {
     fbb_.AddElement<uint32_t>(Operator::VT_OPCODE_INDEX, opcode_index, 0);
   }
-  void add_inputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs) {
+  void add_inputs(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> inputs) {
     fbb_.AddOffset(Operator::VT_INPUTS, inputs);
   }
-  void add_outputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs) {
+  void add_outputs(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> outputs) {
     fbb_.AddOffset(Operator::VT_OUTPUTS, outputs);
   }
   void add_builtin_options_type(tflite::BuiltinOptions builtin_options_type) {
     fbb_.AddElement<uint8_t>(Operator::VT_BUILTIN_OPTIONS_TYPE, static_cast<uint8_t>(builtin_options_type), 0);
   }
-  void add_builtin_options(flatbuffers::Offset<void> builtin_options) {
+  void add_builtin_options(::flatbuffers::Offset<void> builtin_options) {
     fbb_.AddOffset(Operator::VT_BUILTIN_OPTIONS, builtin_options);
   }
-  void add_custom_options(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom_options) {
+  void add_custom_options(::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> custom_options) {
     fbb_.AddOffset(Operator::VT_CUSTOM_OPTIONS, custom_options);
   }
   void add_custom_options_format(tflite::CustomOptionsFormat custom_options_format) {
     fbb_.AddElement<int8_t>(Operator::VT_CUSTOM_OPTIONS_FORMAT, static_cast<int8_t>(custom_options_format), 0);
   }
-  void add_mutating_variable_inputs(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> mutating_variable_inputs) {
+  void add_mutating_variable_inputs(::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> mutating_variable_inputs) {
     fbb_.AddOffset(Operator::VT_MUTATING_VARIABLE_INPUTS, mutating_variable_inputs);
   }
-  void add_intermediates(flatbuffers::Offset<flatbuffers::Vector<int32_t>> intermediates) {
+  void add_intermediates(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> intermediates) {
     fbb_.AddOffset(Operator::VT_INTERMEDIATES, intermediates);
   }
   void add_large_custom_options_offset(uint64_t large_custom_options_offset) {
@@ -12872,33 +15909,42 @@
   void add_large_custom_options_size(uint64_t large_custom_options_size) {
     fbb_.AddElement<uint64_t>(Operator::VT_LARGE_CUSTOM_OPTIONS_SIZE, large_custom_options_size, 0);
   }
-  explicit OperatorBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  void add_builtin_options_2_type(tflite::BuiltinOptions2 builtin_options_2_type) {
+    fbb_.AddElement<uint8_t>(Operator::VT_BUILTIN_OPTIONS_2_TYPE, static_cast<uint8_t>(builtin_options_2_type), 0);
+  }
+  void add_builtin_options_2(::flatbuffers::Offset<void> builtin_options_2) {
+    fbb_.AddOffset(Operator::VT_BUILTIN_OPTIONS_2, builtin_options_2);
+  }
+  explicit OperatorBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<Operator> Finish() {
+  ::flatbuffers::Offset<Operator> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<Operator>(end);
+    auto o = ::flatbuffers::Offset<Operator>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<Operator> CreateOperator(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<Operator> CreateOperator(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     uint32_t opcode_index = 0,
-    flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs = 0,
-    flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> inputs = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> outputs = 0,
     tflite::BuiltinOptions builtin_options_type = tflite::BuiltinOptions_NONE,
-    flatbuffers::Offset<void> builtin_options = 0,
-    flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom_options = 0,
+    ::flatbuffers::Offset<void> builtin_options = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> custom_options = 0,
     tflite::CustomOptionsFormat custom_options_format = tflite::CustomOptionsFormat_FLEXBUFFERS,
-    flatbuffers::Offset<flatbuffers::Vector<uint8_t>> mutating_variable_inputs = 0,
-    flatbuffers::Offset<flatbuffers::Vector<int32_t>> intermediates = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> mutating_variable_inputs = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> intermediates = 0,
     uint64_t large_custom_options_offset = 0,
-    uint64_t large_custom_options_size = 0) {
+    uint64_t large_custom_options_size = 0,
+    tflite::BuiltinOptions2 builtin_options_2_type = tflite::BuiltinOptions2_NONE,
+    ::flatbuffers::Offset<void> builtin_options_2 = 0) {
   OperatorBuilder builder_(_fbb);
   builder_.add_large_custom_options_size(large_custom_options_size);
   builder_.add_large_custom_options_offset(large_custom_options_offset);
+  builder_.add_builtin_options_2(builtin_options_2);
   builder_.add_intermediates(intermediates);
   builder_.add_mutating_variable_inputs(mutating_variable_inputs);
   builder_.add_custom_options(custom_options);
@@ -12906,24 +15952,27 @@
   builder_.add_outputs(outputs);
   builder_.add_inputs(inputs);
   builder_.add_opcode_index(opcode_index);
+  builder_.add_builtin_options_2_type(builtin_options_2_type);
   builder_.add_custom_options_format(custom_options_format);
   builder_.add_builtin_options_type(builtin_options_type);
   return builder_.Finish();
 }
 
-inline flatbuffers::Offset<Operator> CreateOperatorDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<Operator> CreateOperatorDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     uint32_t opcode_index = 0,
     const std::vector<int32_t> *inputs = nullptr,
     const std::vector<int32_t> *outputs = nullptr,
     tflite::BuiltinOptions builtin_options_type = tflite::BuiltinOptions_NONE,
-    flatbuffers::Offset<void> builtin_options = 0,
+    ::flatbuffers::Offset<void> builtin_options = 0,
     const std::vector<uint8_t> *custom_options = nullptr,
     tflite::CustomOptionsFormat custom_options_format = tflite::CustomOptionsFormat_FLEXBUFFERS,
     const std::vector<uint8_t> *mutating_variable_inputs = nullptr,
     const std::vector<int32_t> *intermediates = nullptr,
     uint64_t large_custom_options_offset = 0,
-    uint64_t large_custom_options_size = 0) {
+    uint64_t large_custom_options_size = 0,
+    tflite::BuiltinOptions2 builtin_options_2_type = tflite::BuiltinOptions2_NONE,
+    ::flatbuffers::Offset<void> builtin_options_2 = 0) {
   auto inputs__ = inputs ? _fbb.CreateVector<int32_t>(*inputs) : 0;
   auto outputs__ = outputs ? _fbb.CreateVector<int32_t>(*outputs) : 0;
   auto custom_options__ = custom_options ? _fbb.CreateVector<uint8_t>(*custom_options) : 0;
@@ -12941,12 +15990,14 @@
       mutating_variable_inputs__,
       intermediates__,
       large_custom_options_offset,
-      large_custom_options_size);
+      large_custom_options_size,
+      builtin_options_2_type,
+      builtin_options_2);
 }
 
-flatbuffers::Offset<Operator> CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<Operator> CreateOperator(::flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct SubGraphT : public flatbuffers::NativeTable {
+struct SubGraphT : public ::flatbuffers::NativeTable {
   typedef SubGraph TableType;
   std::vector<std::unique_ptr<tflite::TensorT>> tensors{};
   std::vector<int32_t> inputs{};
@@ -12959,7 +16010,7 @@
   SubGraphT &operator=(SubGraphT o) FLATBUFFERS_NOEXCEPT;
 };
 
-struct SubGraph FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct SubGraph FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef SubGraphT NativeTableType;
   typedef SubGraphBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -12969,22 +16020,22 @@
     VT_OPERATORS = 10,
     VT_NAME = 12
   };
-  const flatbuffers::Vector<flatbuffers::Offset<tflite::Tensor>> *tensors() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::Tensor>> *>(VT_TENSORS);
+  const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::Tensor>> *tensors() const {
+    return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::Tensor>> *>(VT_TENSORS);
   }
-  const flatbuffers::Vector<int32_t> *inputs() const {
-    return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUTS);
+  const ::flatbuffers::Vector<int32_t> *inputs() const {
+    return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_INPUTS);
   }
-  const flatbuffers::Vector<int32_t> *outputs() const {
-    return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUTS);
+  const ::flatbuffers::Vector<int32_t> *outputs() const {
+    return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_OUTPUTS);
   }
-  const flatbuffers::Vector<flatbuffers::Offset<tflite::Operator>> *operators() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::Operator>> *>(VT_OPERATORS);
+  const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::Operator>> *operators() const {
+    return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::Operator>> *>(VT_OPERATORS);
   }
-  const flatbuffers::String *name() const {
-    return GetPointer<const flatbuffers::String *>(VT_NAME);
+  const ::flatbuffers::String *name() const {
+    return GetPointer<const ::flatbuffers::String *>(VT_NAME);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyOffset(verifier, VT_TENSORS) &&
            verifier.VerifyVector(tensors()) &&
@@ -13000,48 +16051,48 @@
            verifier.VerifyString(name()) &&
            verifier.EndTable();
   }
-  SubGraphT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(SubGraphT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<SubGraph> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  SubGraphT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(SubGraphT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<SubGraph> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct SubGraphBuilder {
   typedef SubGraph Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_tensors(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::Tensor>>> tensors) {
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_tensors(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::Tensor>>> tensors) {
     fbb_.AddOffset(SubGraph::VT_TENSORS, tensors);
   }
-  void add_inputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs) {
+  void add_inputs(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> inputs) {
     fbb_.AddOffset(SubGraph::VT_INPUTS, inputs);
   }
-  void add_outputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs) {
+  void add_outputs(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> outputs) {
     fbb_.AddOffset(SubGraph::VT_OUTPUTS, outputs);
   }
-  void add_operators(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::Operator>>> operators) {
+  void add_operators(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::Operator>>> operators) {
     fbb_.AddOffset(SubGraph::VT_OPERATORS, operators);
   }
-  void add_name(flatbuffers::Offset<flatbuffers::String> name) {
+  void add_name(::flatbuffers::Offset<::flatbuffers::String> name) {
     fbb_.AddOffset(SubGraph::VT_NAME, name);
   }
-  explicit SubGraphBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit SubGraphBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<SubGraph> Finish() {
+  ::flatbuffers::Offset<SubGraph> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<SubGraph>(end);
+    auto o = ::flatbuffers::Offset<SubGraph>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<SubGraph> CreateSubGraph(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::Tensor>>> tensors = 0,
-    flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs = 0,
-    flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::Operator>>> operators = 0,
-    flatbuffers::Offset<flatbuffers::String> name = 0) {
+inline ::flatbuffers::Offset<SubGraph> CreateSubGraph(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::Tensor>>> tensors = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> inputs = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> outputs = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::Operator>>> operators = 0,
+    ::flatbuffers::Offset<::flatbuffers::String> name = 0) {
   SubGraphBuilder builder_(_fbb);
   builder_.add_name(name);
   builder_.add_operators(operators);
@@ -13051,17 +16102,17 @@
   return builder_.Finish();
 }
 
-inline flatbuffers::Offset<SubGraph> CreateSubGraphDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    const std::vector<flatbuffers::Offset<tflite::Tensor>> *tensors = nullptr,
+inline ::flatbuffers::Offset<SubGraph> CreateSubGraphDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    const std::vector<::flatbuffers::Offset<tflite::Tensor>> *tensors = nullptr,
     const std::vector<int32_t> *inputs = nullptr,
     const std::vector<int32_t> *outputs = nullptr,
-    const std::vector<flatbuffers::Offset<tflite::Operator>> *operators = nullptr,
+    const std::vector<::flatbuffers::Offset<tflite::Operator>> *operators = nullptr,
     const char *name = nullptr) {
-  auto tensors__ = tensors ? _fbb.CreateVector<flatbuffers::Offset<tflite::Tensor>>(*tensors) : 0;
+  auto tensors__ = tensors ? _fbb.CreateVector<::flatbuffers::Offset<tflite::Tensor>>(*tensors) : 0;
   auto inputs__ = inputs ? _fbb.CreateVector<int32_t>(*inputs) : 0;
   auto outputs__ = outputs ? _fbb.CreateVector<int32_t>(*outputs) : 0;
-  auto operators__ = operators ? _fbb.CreateVector<flatbuffers::Offset<tflite::Operator>>(*operators) : 0;
+  auto operators__ = operators ? _fbb.CreateVector<::flatbuffers::Offset<tflite::Operator>>(*operators) : 0;
   auto name__ = name ? _fbb.CreateString(name) : 0;
   return tflite::CreateSubGraph(
       _fbb,
@@ -13072,16 +16123,16 @@
       name__);
 }
 
-flatbuffers::Offset<SubGraph> CreateSubGraph(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<SubGraph> CreateSubGraph(::flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct BufferT : public flatbuffers::NativeTable {
+struct BufferT : public ::flatbuffers::NativeTable {
   typedef Buffer TableType;
   std::vector<uint8_t> data{};
   uint64_t offset = 0;
   uint64_t size = 0;
 };
 
-struct Buffer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct Buffer FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef BufferT NativeTableType;
   typedef BufferBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -13089,8 +16140,8 @@
     VT_OFFSET = 6,
     VT_SIZE = 8
   };
-  const flatbuffers::Vector<uint8_t> *data() const {
-    return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_DATA);
+  const ::flatbuffers::Vector<uint8_t> *data() const {
+    return GetPointer<const ::flatbuffers::Vector<uint8_t> *>(VT_DATA);
   }
   uint64_t offset() const {
     return GetField<uint64_t>(VT_OFFSET, 0);
@@ -13098,7 +16149,7 @@
   uint64_t size() const {
     return GetField<uint64_t>(VT_SIZE, 0);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyOffset(verifier, VT_DATA) &&
            verifier.VerifyVector(data()) &&
@@ -13106,16 +16157,16 @@
            VerifyField<uint64_t>(verifier, VT_SIZE, 8) &&
            verifier.EndTable();
   }
-  BufferT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(BufferT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<Buffer> Pack(flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  BufferT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(BufferT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<Buffer> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct BufferBuilder {
   typedef Buffer Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_data(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data) {
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_data(::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> data) {
     fbb_.AddOffset(Buffer::VT_DATA, data);
   }
   void add_offset(uint64_t offset) {
@@ -13124,20 +16175,20 @@
   void add_size(uint64_t size) {
     fbb_.AddElement<uint64_t>(Buffer::VT_SIZE, size, 0);
   }
-  explicit BufferBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit BufferBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<Buffer> Finish() {
+  ::flatbuffers::Offset<Buffer> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<Buffer>(end);
+    auto o = ::flatbuffers::Offset<Buffer>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<Buffer> CreateBuffer(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data = 0,
+inline ::flatbuffers::Offset<Buffer> CreateBuffer(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> data = 0,
     uint64_t offset = 0,
     uint64_t size = 0) {
   BufferBuilder builder_(_fbb);
@@ -13147,8 +16198,8 @@
   return builder_.Finish();
 }
 
-inline flatbuffers::Offset<Buffer> CreateBufferDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<Buffer> CreateBufferDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     const std::vector<uint8_t> *data = nullptr,
     uint64_t offset = 0,
     uint64_t size = 0) {
@@ -13161,63 +16212,63 @@
       size);
 }
 
-flatbuffers::Offset<Buffer> CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<Buffer> CreateBuffer(::flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct MetadataT : public flatbuffers::NativeTable {
+struct MetadataT : public ::flatbuffers::NativeTable {
   typedef Metadata TableType;
   std::string name{};
   uint32_t buffer = 0;
 };
 
-struct Metadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct Metadata FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef MetadataT NativeTableType;
   typedef MetadataBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
     VT_NAME = 4,
     VT_BUFFER = 6
   };
-  const flatbuffers::String *name() const {
-    return GetPointer<const flatbuffers::String *>(VT_NAME);
+  const ::flatbuffers::String *name() const {
+    return GetPointer<const ::flatbuffers::String *>(VT_NAME);
   }
   uint32_t buffer() const {
     return GetField<uint32_t>(VT_BUFFER, 0);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyOffset(verifier, VT_NAME) &&
            verifier.VerifyString(name()) &&
            VerifyField<uint32_t>(verifier, VT_BUFFER, 4) &&
            verifier.EndTable();
   }
-  MetadataT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(MetadataT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<Metadata> Pack(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  MetadataT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(MetadataT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<Metadata> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct MetadataBuilder {
   typedef Metadata Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_name(flatbuffers::Offset<flatbuffers::String> name) {
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_name(::flatbuffers::Offset<::flatbuffers::String> name) {
     fbb_.AddOffset(Metadata::VT_NAME, name);
   }
   void add_buffer(uint32_t buffer) {
     fbb_.AddElement<uint32_t>(Metadata::VT_BUFFER, buffer, 0);
   }
-  explicit MetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit MetadataBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<Metadata> Finish() {
+  ::flatbuffers::Offset<Metadata> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<Metadata>(end);
+    auto o = ::flatbuffers::Offset<Metadata>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<Metadata> CreateMetadata(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::String> name = 0,
+inline ::flatbuffers::Offset<Metadata> CreateMetadata(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::String> name = 0,
     uint32_t buffer = 0) {
   MetadataBuilder builder_(_fbb);
   builder_.add_buffer(buffer);
@@ -13225,8 +16276,8 @@
   return builder_.Finish();
 }
 
-inline flatbuffers::Offset<Metadata> CreateMetadataDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<Metadata> CreateMetadataDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     const char *name = nullptr,
     uint32_t buffer = 0) {
   auto name__ = name ? _fbb.CreateString(name) : 0;
@@ -13236,63 +16287,63 @@
       buffer);
 }
 
-flatbuffers::Offset<Metadata> CreateMetadata(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<Metadata> CreateMetadata(::flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct TensorMapT : public flatbuffers::NativeTable {
+struct TensorMapT : public ::flatbuffers::NativeTable {
   typedef TensorMap TableType;
   std::string name{};
   uint32_t tensor_index = 0;
 };
 
-struct TensorMap FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct TensorMap FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef TensorMapT NativeTableType;
   typedef TensorMapBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
     VT_NAME = 4,
     VT_TENSOR_INDEX = 6
   };
-  const flatbuffers::String *name() const {
-    return GetPointer<const flatbuffers::String *>(VT_NAME);
+  const ::flatbuffers::String *name() const {
+    return GetPointer<const ::flatbuffers::String *>(VT_NAME);
   }
   uint32_t tensor_index() const {
     return GetField<uint32_t>(VT_TENSOR_INDEX, 0);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyOffset(verifier, VT_NAME) &&
            verifier.VerifyString(name()) &&
            VerifyField<uint32_t>(verifier, VT_TENSOR_INDEX, 4) &&
            verifier.EndTable();
   }
-  TensorMapT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(TensorMapT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<TensorMap> Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  TensorMapT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(TensorMapT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<TensorMap> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct TensorMapBuilder {
   typedef TensorMap Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_name(flatbuffers::Offset<flatbuffers::String> name) {
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_name(::flatbuffers::Offset<::flatbuffers::String> name) {
     fbb_.AddOffset(TensorMap::VT_NAME, name);
   }
   void add_tensor_index(uint32_t tensor_index) {
     fbb_.AddElement<uint32_t>(TensorMap::VT_TENSOR_INDEX, tensor_index, 0);
   }
-  explicit TensorMapBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit TensorMapBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<TensorMap> Finish() {
+  ::flatbuffers::Offset<TensorMap> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<TensorMap>(end);
+    auto o = ::flatbuffers::Offset<TensorMap>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<TensorMap> CreateTensorMap(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::String> name = 0,
+inline ::flatbuffers::Offset<TensorMap> CreateTensorMap(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::String> name = 0,
     uint32_t tensor_index = 0) {
   TensorMapBuilder builder_(_fbb);
   builder_.add_tensor_index(tensor_index);
@@ -13300,8 +16351,8 @@
   return builder_.Finish();
 }
 
-inline flatbuffers::Offset<TensorMap> CreateTensorMapDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<TensorMap> CreateTensorMapDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     const char *name = nullptr,
     uint32_t tensor_index = 0) {
   auto name__ = name ? _fbb.CreateString(name) : 0;
@@ -13311,9 +16362,9 @@
       tensor_index);
 }
 
-flatbuffers::Offset<TensorMap> CreateTensorMap(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<TensorMap> CreateTensorMap(::flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct SignatureDefT : public flatbuffers::NativeTable {
+struct SignatureDefT : public ::flatbuffers::NativeTable {
   typedef SignatureDef TableType;
   std::vector<std::unique_ptr<tflite::TensorMapT>> inputs{};
   std::vector<std::unique_ptr<tflite::TensorMapT>> outputs{};
@@ -13325,7 +16376,7 @@
   SignatureDefT &operator=(SignatureDefT o) FLATBUFFERS_NOEXCEPT;
 };
 
-struct SignatureDef FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct SignatureDef FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef SignatureDefT NativeTableType;
   typedef SignatureDefBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -13334,19 +16385,19 @@
     VT_SIGNATURE_KEY = 8,
     VT_SUBGRAPH_INDEX = 12
   };
-  const flatbuffers::Vector<flatbuffers::Offset<tflite::TensorMap>> *inputs() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::TensorMap>> *>(VT_INPUTS);
+  const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::TensorMap>> *inputs() const {
+    return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::TensorMap>> *>(VT_INPUTS);
   }
-  const flatbuffers::Vector<flatbuffers::Offset<tflite::TensorMap>> *outputs() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::TensorMap>> *>(VT_OUTPUTS);
+  const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::TensorMap>> *outputs() const {
+    return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::TensorMap>> *>(VT_OUTPUTS);
   }
-  const flatbuffers::String *signature_key() const {
-    return GetPointer<const flatbuffers::String *>(VT_SIGNATURE_KEY);
+  const ::flatbuffers::String *signature_key() const {
+    return GetPointer<const ::flatbuffers::String *>(VT_SIGNATURE_KEY);
   }
   uint32_t subgraph_index() const {
     return GetField<uint32_t>(VT_SUBGRAPH_INDEX, 0);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyOffset(verifier, VT_INPUTS) &&
            verifier.VerifyVector(inputs()) &&
@@ -13359,43 +16410,43 @@
            VerifyField<uint32_t>(verifier, VT_SUBGRAPH_INDEX, 4) &&
            verifier.EndTable();
   }
-  SignatureDefT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(SignatureDefT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<SignatureDef> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  SignatureDefT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(SignatureDefT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<SignatureDef> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct SignatureDefBuilder {
   typedef SignatureDef Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_inputs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::TensorMap>>> inputs) {
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
+  void add_inputs(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::TensorMap>>> inputs) {
     fbb_.AddOffset(SignatureDef::VT_INPUTS, inputs);
   }
-  void add_outputs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::TensorMap>>> outputs) {
+  void add_outputs(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::TensorMap>>> outputs) {
     fbb_.AddOffset(SignatureDef::VT_OUTPUTS, outputs);
   }
-  void add_signature_key(flatbuffers::Offset<flatbuffers::String> signature_key) {
+  void add_signature_key(::flatbuffers::Offset<::flatbuffers::String> signature_key) {
     fbb_.AddOffset(SignatureDef::VT_SIGNATURE_KEY, signature_key);
   }
   void add_subgraph_index(uint32_t subgraph_index) {
     fbb_.AddElement<uint32_t>(SignatureDef::VT_SUBGRAPH_INDEX, subgraph_index, 0);
   }
-  explicit SignatureDefBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit SignatureDefBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<SignatureDef> Finish() {
+  ::flatbuffers::Offset<SignatureDef> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<SignatureDef>(end);
+    auto o = ::flatbuffers::Offset<SignatureDef>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<SignatureDef> CreateSignatureDef(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::TensorMap>>> inputs = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::TensorMap>>> outputs = 0,
-    flatbuffers::Offset<flatbuffers::String> signature_key = 0,
+inline ::flatbuffers::Offset<SignatureDef> CreateSignatureDef(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::TensorMap>>> inputs = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::TensorMap>>> outputs = 0,
+    ::flatbuffers::Offset<::flatbuffers::String> signature_key = 0,
     uint32_t subgraph_index = 0) {
   SignatureDefBuilder builder_(_fbb);
   builder_.add_subgraph_index(subgraph_index);
@@ -13405,14 +16456,14 @@
   return builder_.Finish();
 }
 
-inline flatbuffers::Offset<SignatureDef> CreateSignatureDefDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    const std::vector<flatbuffers::Offset<tflite::TensorMap>> *inputs = nullptr,
-    const std::vector<flatbuffers::Offset<tflite::TensorMap>> *outputs = nullptr,
+inline ::flatbuffers::Offset<SignatureDef> CreateSignatureDefDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
+    const std::vector<::flatbuffers::Offset<tflite::TensorMap>> *inputs = nullptr,
+    const std::vector<::flatbuffers::Offset<tflite::TensorMap>> *outputs = nullptr,
     const char *signature_key = nullptr,
     uint32_t subgraph_index = 0) {
-  auto inputs__ = inputs ? _fbb.CreateVector<flatbuffers::Offset<tflite::TensorMap>>(*inputs) : 0;
-  auto outputs__ = outputs ? _fbb.CreateVector<flatbuffers::Offset<tflite::TensorMap>>(*outputs) : 0;
+  auto inputs__ = inputs ? _fbb.CreateVector<::flatbuffers::Offset<tflite::TensorMap>>(*inputs) : 0;
+  auto outputs__ = outputs ? _fbb.CreateVector<::flatbuffers::Offset<tflite::TensorMap>>(*outputs) : 0;
   auto signature_key__ = signature_key ? _fbb.CreateString(signature_key) : 0;
   return tflite::CreateSignatureDef(
       _fbb,
@@ -13422,9 +16473,9 @@
       subgraph_index);
 }
 
-flatbuffers::Offset<SignatureDef> CreateSignatureDef(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<SignatureDef> CreateSignatureDef(::flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-struct ModelT : public flatbuffers::NativeTable {
+struct ModelT : public ::flatbuffers::NativeTable {
   typedef Model TableType;
   uint32_t version = 0;
   std::vector<std::unique_ptr<tflite::OperatorCodeT>> operator_codes{};
@@ -13440,7 +16491,7 @@
   ModelT &operator=(ModelT o) FLATBUFFERS_NOEXCEPT;
 };
 
-struct Model FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct Model FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
   typedef ModelT NativeTableType;
   typedef ModelBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
@@ -13456,28 +16507,28 @@
   uint32_t version() const {
     return GetField<uint32_t>(VT_VERSION, 0);
   }
-  const flatbuffers::Vector<flatbuffers::Offset<tflite::OperatorCode>> *operator_codes() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::OperatorCode>> *>(VT_OPERATOR_CODES);
+  const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::OperatorCode>> *operator_codes() const {
+    return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::OperatorCode>> *>(VT_OPERATOR_CODES);
   }
-  const flatbuffers::Vector<flatbuffers::Offset<tflite::SubGraph>> *subgraphs() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::SubGraph>> *>(VT_SUBGRAPHS);
+  const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::SubGraph>> *subgraphs() const {
+    return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::SubGraph>> *>(VT_SUBGRAPHS);
   }
-  const flatbuffers::String *description() const {
-    return GetPointer<const flatbuffers::String *>(VT_DESCRIPTION);
+  const ::flatbuffers::String *description() const {
+    return GetPointer<const ::flatbuffers::String *>(VT_DESCRIPTION);
   }
-  const flatbuffers::Vector<flatbuffers::Offset<tflite::Buffer>> *buffers() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::Buffer>> *>(VT_BUFFERS);
+  const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::Buffer>> *buffers() const {
+    return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::Buffer>> *>(VT_BUFFERS);
   }
-  const flatbuffers::Vector<int32_t> *metadata_buffer() const {
-    return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_METADATA_BUFFER);
+  const ::flatbuffers::Vector<int32_t> *metadata_buffer() const {
+    return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_METADATA_BUFFER);
   }
-  const flatbuffers::Vector<flatbuffers::Offset<tflite::Metadata>> *metadata() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::Metadata>> *>(VT_METADATA);
+  const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::Metadata>> *metadata() const {
+    return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::Metadata>> *>(VT_METADATA);
   }
-  const flatbuffers::Vector<flatbuffers::Offset<tflite::SignatureDef>> *signature_defs() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::SignatureDef>> *>(VT_SIGNATURE_DEFS);
+  const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::SignatureDef>> *signature_defs() const {
+    return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::SignatureDef>> *>(VT_SIGNATURE_DEFS);
   }
-  bool Verify(flatbuffers::Verifier &verifier) const {
+  bool Verify(::flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<uint32_t>(verifier, VT_VERSION, 4) &&
            VerifyOffset(verifier, VT_OPERATOR_CODES) &&
@@ -13501,60 +16552,60 @@
            verifier.VerifyVectorOfTables(signature_defs()) &&
            verifier.EndTable();
   }
-  ModelT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  void UnPackTo(ModelT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
-  static flatbuffers::Offset<Model> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ModelT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+  ModelT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  void UnPackTo(ModelT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
+  static ::flatbuffers::Offset<Model> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ModelT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 };
 
 struct ModelBuilder {
   typedef Model Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
+  ::flatbuffers::FlatBufferBuilder &fbb_;
+  ::flatbuffers::uoffset_t start_;
   void add_version(uint32_t version) {
     fbb_.AddElement<uint32_t>(Model::VT_VERSION, version, 0);
   }
-  void add_operator_codes(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::OperatorCode>>> operator_codes) {
+  void add_operator_codes(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::OperatorCode>>> operator_codes) {
     fbb_.AddOffset(Model::VT_OPERATOR_CODES, operator_codes);
   }
-  void add_subgraphs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::SubGraph>>> subgraphs) {
+  void add_subgraphs(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::SubGraph>>> subgraphs) {
     fbb_.AddOffset(Model::VT_SUBGRAPHS, subgraphs);
   }
-  void add_description(flatbuffers::Offset<flatbuffers::String> description) {
+  void add_description(::flatbuffers::Offset<::flatbuffers::String> description) {
     fbb_.AddOffset(Model::VT_DESCRIPTION, description);
   }
-  void add_buffers(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::Buffer>>> buffers) {
+  void add_buffers(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::Buffer>>> buffers) {
     fbb_.AddOffset(Model::VT_BUFFERS, buffers);
   }
-  void add_metadata_buffer(flatbuffers::Offset<flatbuffers::Vector<int32_t>> metadata_buffer) {
+  void add_metadata_buffer(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> metadata_buffer) {
     fbb_.AddOffset(Model::VT_METADATA_BUFFER, metadata_buffer);
   }
-  void add_metadata(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::Metadata>>> metadata) {
+  void add_metadata(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::Metadata>>> metadata) {
     fbb_.AddOffset(Model::VT_METADATA, metadata);
   }
-  void add_signature_defs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::SignatureDef>>> signature_defs) {
+  void add_signature_defs(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::SignatureDef>>> signature_defs) {
     fbb_.AddOffset(Model::VT_SIGNATURE_DEFS, signature_defs);
   }
-  explicit ModelBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+  explicit ModelBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
   }
-  flatbuffers::Offset<Model> Finish() {
+  ::flatbuffers::Offset<Model> Finish() {
     const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<Model>(end);
+    auto o = ::flatbuffers::Offset<Model>(end);
     return o;
   }
 };
 
-inline flatbuffers::Offset<Model> CreateModel(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<Model> CreateModel(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     uint32_t version = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::OperatorCode>>> operator_codes = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::SubGraph>>> subgraphs = 0,
-    flatbuffers::Offset<flatbuffers::String> description = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::Buffer>>> buffers = 0,
-    flatbuffers::Offset<flatbuffers::Vector<int32_t>> metadata_buffer = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::Metadata>>> metadata = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::SignatureDef>>> signature_defs = 0) {
+    ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::OperatorCode>>> operator_codes = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::SubGraph>>> subgraphs = 0,
+    ::flatbuffers::Offset<::flatbuffers::String> description = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::Buffer>>> buffers = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> metadata_buffer = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::Metadata>>> metadata = 0,
+    ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::SignatureDef>>> signature_defs = 0) {
   ModelBuilder builder_(_fbb);
   builder_.add_signature_defs(signature_defs);
   builder_.add_metadata(metadata);
@@ -13567,23 +16618,23 @@
   return builder_.Finish();
 }
 
-inline flatbuffers::Offset<Model> CreateModelDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<Model> CreateModelDirect(
+    ::flatbuffers::FlatBufferBuilder &_fbb,
     uint32_t version = 0,
-    const std::vector<flatbuffers::Offset<tflite::OperatorCode>> *operator_codes = nullptr,
-    const std::vector<flatbuffers::Offset<tflite::SubGraph>> *subgraphs = nullptr,
+    const std::vector<::flatbuffers::Offset<tflite::OperatorCode>> *operator_codes = nullptr,
+    const std::vector<::flatbuffers::Offset<tflite::SubGraph>> *subgraphs = nullptr,
     const char *description = nullptr,
-    const std::vector<flatbuffers::Offset<tflite::Buffer>> *buffers = nullptr,
+    const std::vector<::flatbuffers::Offset<tflite::Buffer>> *buffers = nullptr,
     const std::vector<int32_t> *metadata_buffer = nullptr,
-    const std::vector<flatbuffers::Offset<tflite::Metadata>> *metadata = nullptr,
-    const std::vector<flatbuffers::Offset<tflite::SignatureDef>> *signature_defs = nullptr) {
-  auto operator_codes__ = operator_codes ? _fbb.CreateVector<flatbuffers::Offset<tflite::OperatorCode>>(*operator_codes) : 0;
-  auto subgraphs__ = subgraphs ? _fbb.CreateVector<flatbuffers::Offset<tflite::SubGraph>>(*subgraphs) : 0;
+    const std::vector<::flatbuffers::Offset<tflite::Metadata>> *metadata = nullptr,
+    const std::vector<::flatbuffers::Offset<tflite::SignatureDef>> *signature_defs = nullptr) {
+  auto operator_codes__ = operator_codes ? _fbb.CreateVector<::flatbuffers::Offset<tflite::OperatorCode>>(*operator_codes) : 0;
+  auto subgraphs__ = subgraphs ? _fbb.CreateVector<::flatbuffers::Offset<tflite::SubGraph>>(*subgraphs) : 0;
   auto description__ = description ? _fbb.CreateString(description) : 0;
-  auto buffers__ = buffers ? _fbb.CreateVector<flatbuffers::Offset<tflite::Buffer>>(*buffers) : 0;
+  auto buffers__ = buffers ? _fbb.CreateVector<::flatbuffers::Offset<tflite::Buffer>>(*buffers) : 0;
   auto metadata_buffer__ = metadata_buffer ? _fbb.CreateVector<int32_t>(*metadata_buffer) : 0;
-  auto metadata__ = metadata ? _fbb.CreateVector<flatbuffers::Offset<tflite::Metadata>>(*metadata) : 0;
-  auto signature_defs__ = signature_defs ? _fbb.CreateVector<flatbuffers::Offset<tflite::SignatureDef>>(*signature_defs) : 0;
+  auto metadata__ = metadata ? _fbb.CreateVector<::flatbuffers::Offset<tflite::Metadata>>(*metadata) : 0;
+  auto signature_defs__ = signature_defs ? _fbb.CreateVector<::flatbuffers::Offset<tflite::SignatureDef>>(*signature_defs) : 0;
   return tflite::CreateModel(
       _fbb,
       version,
@@ -13596,28 +16647,28 @@
       signature_defs__);
 }
 
-flatbuffers::Offset<Model> CreateModel(flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+::flatbuffers::Offset<Model> CreateModel(::flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
 
-inline CustomQuantizationT *CustomQuantization::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline CustomQuantizationT *CustomQuantization::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<CustomQuantizationT>(new CustomQuantizationT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void CustomQuantization::UnPackTo(CustomQuantizationT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void CustomQuantization::UnPackTo(CustomQuantizationT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = custom(); if (_e) { _o->custom.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->custom.begin()); } }
 }
 
-inline flatbuffers::Offset<CustomQuantization> CustomQuantization::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<CustomQuantization> CustomQuantization::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateCustomQuantization(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<CustomQuantization> CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<CustomQuantization> CreateCustomQuantization(::flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CustomQuantizationT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const CustomQuantizationT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   _fbb.ForceVectorAlignment(_o->custom.size(), sizeof(uint8_t), 16);
   auto _custom = _o->custom.size() ? _fbb.CreateVector(_o->custom) : 0;
   return tflite::CreateCustomQuantization(
@@ -13625,32 +16676,32 @@
       _custom);
 }
 
-inline QuantizationParametersT *QuantizationParameters::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline QuantizationParametersT *QuantizationParameters::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<QuantizationParametersT>(new QuantizationParametersT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void QuantizationParameters::UnPackTo(QuantizationParametersT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void QuantizationParameters::UnPackTo(QuantizationParametersT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
-  { auto _e = min(); if (_e) { _o->min.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->min[_i] = _e->Get(_i); } } }
-  { auto _e = max(); if (_e) { _o->max.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->max[_i] = _e->Get(_i); } } }
-  { auto _e = scale(); if (_e) { _o->scale.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->scale[_i] = _e->Get(_i); } } }
-  { auto _e = zero_point(); if (_e) { _o->zero_point.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->zero_point[_i] = _e->Get(_i); } } }
+  { auto _e = min(); if (_e) { _o->min.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->min[_i] = _e->Get(_i); } } else { _o->min.resize(0); } }
+  { auto _e = max(); if (_e) { _o->max.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->max[_i] = _e->Get(_i); } } else { _o->max.resize(0); } }
+  { auto _e = scale(); if (_e) { _o->scale.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->scale[_i] = _e->Get(_i); } } else { _o->scale.resize(0); } }
+  { auto _e = zero_point(); if (_e) { _o->zero_point.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->zero_point[_i] = _e->Get(_i); } } else { _o->zero_point.resize(0); } }
   { auto _e = details_type(); _o->details.type = _e; }
   { auto _e = details(); if (_e) _o->details.value = tflite::QuantizationDetailsUnion::UnPack(_e, details_type(), _resolver); }
   { auto _e = quantized_dimension(); _o->quantized_dimension = _e; }
 }
 
-inline flatbuffers::Offset<QuantizationParameters> QuantizationParameters::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<QuantizationParameters> QuantizationParameters::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateQuantizationParameters(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<QuantizationParameters> CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<QuantizationParameters> CreateQuantizationParameters(::flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizationParametersT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const QuantizationParametersT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _min = _o->min.size() ? _fbb.CreateVector(_o->min) : 0;
   auto _max = _o->max.size() ? _fbb.CreateVector(_o->max) : 0;
   auto _scale = _o->scale.size() ? _fbb.CreateVector(_o->scale) : 0;
@@ -13669,52 +16720,52 @@
       _quantized_dimension);
 }
 
-inline Int32VectorT *Int32Vector::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline Int32VectorT *Int32Vector::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<Int32VectorT>(new Int32VectorT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void Int32Vector::UnPackTo(Int32VectorT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void Int32Vector::UnPackTo(Int32VectorT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
-  { auto _e = values(); if (_e) { _o->values.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->values[_i] = _e->Get(_i); } } }
+  { auto _e = values(); if (_e) { _o->values.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->values[_i] = _e->Get(_i); } } else { _o->values.resize(0); } }
 }
 
-inline flatbuffers::Offset<Int32Vector> Int32Vector::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Int32Vector> Int32Vector::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateInt32Vector(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<Int32Vector> CreateInt32Vector(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Int32Vector> CreateInt32Vector(::flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Int32VectorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const Int32VectorT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0;
   return tflite::CreateInt32Vector(
       _fbb,
       _values);
 }
 
-inline Uint16VectorT *Uint16Vector::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline Uint16VectorT *Uint16Vector::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<Uint16VectorT>(new Uint16VectorT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void Uint16Vector::UnPackTo(Uint16VectorT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void Uint16Vector::UnPackTo(Uint16VectorT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
-  { auto _e = values(); if (_e) { _o->values.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->values[_i] = _e->Get(_i); } } }
+  { auto _e = values(); if (_e) { _o->values.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->values[_i] = _e->Get(_i); } } else { _o->values.resize(0); } }
 }
 
-inline flatbuffers::Offset<Uint16Vector> Uint16Vector::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Uint16Vector> Uint16Vector::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateUint16Vector(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<Uint16Vector> CreateUint16Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Uint16Vector> CreateUint16Vector(::flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Uint16VectorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const Uint16VectorT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   _fbb.ForceVectorAlignment(_o->values.size(), sizeof(uint16_t), 4);
   auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0;
   return tflite::CreateUint16Vector(
@@ -13722,26 +16773,26 @@
       _values);
 }
 
-inline Uint8VectorT *Uint8Vector::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline Uint8VectorT *Uint8Vector::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<Uint8VectorT>(new Uint8VectorT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void Uint8Vector::UnPackTo(Uint8VectorT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void Uint8Vector::UnPackTo(Uint8VectorT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = values(); if (_e) { _o->values.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->values.begin()); } }
 }
 
-inline flatbuffers::Offset<Uint8Vector> Uint8Vector::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Uint8Vector> Uint8Vector::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateUint8Vector(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<Uint8Vector> CreateUint8Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Uint8Vector> CreateUint8Vector(::flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Uint8VectorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const Uint8VectorT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   _fbb.ForceVectorAlignment(_o->values.size(), sizeof(uint8_t), 4);
   auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0;
   return tflite::CreateUint8Vector(
@@ -13749,13 +16800,13 @@
       _values);
 }
 
-inline DimensionMetadataT *DimensionMetadata::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline DimensionMetadataT *DimensionMetadata::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<DimensionMetadataT>(new DimensionMetadataT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void DimensionMetadata::UnPackTo(DimensionMetadataT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void DimensionMetadata::UnPackTo(DimensionMetadataT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = format(); _o->format = _e; }
@@ -13766,14 +16817,14 @@
   { auto _e = array_indices(); if (_e) _o->array_indices.value = tflite::SparseIndexVectorUnion::UnPack(_e, array_indices_type(), _resolver); }
 }
 
-inline flatbuffers::Offset<DimensionMetadata> DimensionMetadata::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<DimensionMetadata> DimensionMetadata::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateDimensionMetadata(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<DimensionMetadata> CreateDimensionMetadata(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<DimensionMetadata> CreateDimensionMetadata(::flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DimensionMetadataT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const DimensionMetadataT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _format = _o->format;
   auto _dense_size = _o->dense_size;
   auto _array_segments_type = _o->array_segments.type;
@@ -13804,31 +16855,31 @@
   return *this;
 }
 
-inline SparsityParametersT *SparsityParameters::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline SparsityParametersT *SparsityParameters::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<SparsityParametersT>(new SparsityParametersT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void SparsityParameters::UnPackTo(SparsityParametersT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void SparsityParameters::UnPackTo(SparsityParametersT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
-  { auto _e = traversal_order(); if (_e) { _o->traversal_order.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->traversal_order[_i] = _e->Get(_i); } } }
-  { auto _e = block_map(); if (_e) { _o->block_map.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->block_map[_i] = _e->Get(_i); } } }
-  { auto _e = dim_metadata(); if (_e) { _o->dim_metadata.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->dim_metadata[_i]) { _e->Get(_i)->UnPackTo(_o->dim_metadata[_i].get(), _resolver); } else { _o->dim_metadata[_i] = std::unique_ptr<tflite::DimensionMetadataT>(_e->Get(_i)->UnPack(_resolver)); }; } } }
+  { auto _e = traversal_order(); if (_e) { _o->traversal_order.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->traversal_order[_i] = _e->Get(_i); } } else { _o->traversal_order.resize(0); } }
+  { auto _e = block_map(); if (_e) { _o->block_map.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->block_map[_i] = _e->Get(_i); } } else { _o->block_map.resize(0); } }
+  { auto _e = dim_metadata(); if (_e) { _o->dim_metadata.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->dim_metadata[_i]) { _e->Get(_i)->UnPackTo(_o->dim_metadata[_i].get(), _resolver); } else { _o->dim_metadata[_i] = std::unique_ptr<tflite::DimensionMetadataT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->dim_metadata.resize(0); } }
 }
 
-inline flatbuffers::Offset<SparsityParameters> SparsityParameters::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SparsityParameters> SparsityParameters::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateSparsityParameters(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<SparsityParameters> CreateSparsityParameters(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SparsityParameters> CreateSparsityParameters(::flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SparsityParametersT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SparsityParametersT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _traversal_order = _o->traversal_order.size() ? _fbb.CreateVector(_o->traversal_order) : 0;
   auto _block_map = _o->block_map.size() ? _fbb.CreateVector(_o->block_map) : 0;
-  auto _dim_metadata = _o->dim_metadata.size() ? _fbb.CreateVector<flatbuffers::Offset<tflite::DimensionMetadata>> (_o->dim_metadata.size(), [](size_t i, _VectorArgs *__va) { return CreateDimensionMetadata(*__va->__fbb, __va->__o->dim_metadata[i].get(), __va->__rehasher); }, &_va ) : 0;
+  auto _dim_metadata = _o->dim_metadata.size() ? _fbb.CreateVector<::flatbuffers::Offset<tflite::DimensionMetadata>> (_o->dim_metadata.size(), [](size_t i, _VectorArgs *__va) { return CreateDimensionMetadata(*__va->__fbb, __va->__o->dim_metadata[i].get(), __va->__rehasher); }, &_va ) : 0;
   return tflite::CreateSparsityParameters(
       _fbb,
       _traversal_order,
@@ -13836,28 +16887,28 @@
       _dim_metadata);
 }
 
-inline VariantSubTypeT *VariantSubType::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline VariantSubTypeT *VariantSubType::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<VariantSubTypeT>(new VariantSubTypeT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void VariantSubType::UnPackTo(VariantSubTypeT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void VariantSubType::UnPackTo(VariantSubTypeT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
-  { auto _e = shape(); if (_e) { _o->shape.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape[_i] = _e->Get(_i); } } }
+  { auto _e = shape(); if (_e) { _o->shape.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape[_i] = _e->Get(_i); } } else { _o->shape.resize(0); } }
   { auto _e = type(); _o->type = _e; }
   { auto _e = has_rank(); _o->has_rank = _e; }
 }
 
-inline flatbuffers::Offset<VariantSubType> VariantSubType::Pack(flatbuffers::FlatBufferBuilder &_fbb, const VariantSubTypeT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<VariantSubType> VariantSubType::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const VariantSubTypeT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateVariantSubType(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<VariantSubType> CreateVariantSubType(flatbuffers::FlatBufferBuilder &_fbb, const VariantSubTypeT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<VariantSubType> CreateVariantSubType(::flatbuffers::FlatBufferBuilder &_fbb, const VariantSubTypeT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const VariantSubTypeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const VariantSubTypeT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _shape = _o->shape.size() ? _fbb.CreateVector(_o->shape) : 0;
   auto _type = _o->type;
   auto _has_rank = _o->has_rank;
@@ -13896,35 +16947,35 @@
   return *this;
 }
 
-inline TensorT *Tensor::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline TensorT *Tensor::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<TensorT>(new TensorT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void Tensor::UnPackTo(TensorT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void Tensor::UnPackTo(TensorT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
-  { auto _e = shape(); if (_e) { _o->shape.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape[_i] = _e->Get(_i); } } }
+  { auto _e = shape(); if (_e) { _o->shape.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape[_i] = _e->Get(_i); } } else { _o->shape.resize(0); } }
   { auto _e = type(); _o->type = _e; }
   { auto _e = buffer(); _o->buffer = _e; }
   { auto _e = name(); if (_e) _o->name = _e->str(); }
-  { auto _e = quantization(); if (_e) { if(_o->quantization) { _e->UnPackTo(_o->quantization.get(), _resolver); } else { _o->quantization = std::unique_ptr<tflite::QuantizationParametersT>(_e->UnPack(_resolver)); } } }
+  { auto _e = quantization(); if (_e) { if(_o->quantization) { _e->UnPackTo(_o->quantization.get(), _resolver); } else { _o->quantization = std::unique_ptr<tflite::QuantizationParametersT>(_e->UnPack(_resolver)); } } else if (_o->quantization) { _o->quantization.reset(); } }
   { auto _e = is_variable(); _o->is_variable = _e; }
-  { auto _e = sparsity(); if (_e) { if(_o->sparsity) { _e->UnPackTo(_o->sparsity.get(), _resolver); } else { _o->sparsity = std::unique_ptr<tflite::SparsityParametersT>(_e->UnPack(_resolver)); } } }
-  { auto _e = shape_signature(); if (_e) { _o->shape_signature.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape_signature[_i] = _e->Get(_i); } } }
+  { auto _e = sparsity(); if (_e) { if(_o->sparsity) { _e->UnPackTo(_o->sparsity.get(), _resolver); } else { _o->sparsity = std::unique_ptr<tflite::SparsityParametersT>(_e->UnPack(_resolver)); } } else if (_o->sparsity) { _o->sparsity.reset(); } }
+  { auto _e = shape_signature(); if (_e) { _o->shape_signature.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape_signature[_i] = _e->Get(_i); } } else { _o->shape_signature.resize(0); } }
   { auto _e = has_rank(); _o->has_rank = _e; }
-  { auto _e = variant_tensors(); if (_e) { _o->variant_tensors.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->variant_tensors[_i]) { _e->Get(_i)->UnPackTo(_o->variant_tensors[_i].get(), _resolver); } else { _o->variant_tensors[_i] = std::unique_ptr<tflite::VariantSubTypeT>(_e->Get(_i)->UnPack(_resolver)); }; } } }
+  { auto _e = variant_tensors(); if (_e) { _o->variant_tensors.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->variant_tensors[_i]) { _e->Get(_i)->UnPackTo(_o->variant_tensors[_i].get(), _resolver); } else { _o->variant_tensors[_i] = std::unique_ptr<tflite::VariantSubTypeT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->variant_tensors.resize(0); } }
 }
 
-inline flatbuffers::Offset<Tensor> Tensor::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Tensor> Tensor::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TensorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateTensor(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<Tensor> CreateTensor(flatbuffers::FlatBufferBuilder &_fbb, const TensorT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Tensor> CreateTensor(::flatbuffers::FlatBufferBuilder &_fbb, const TensorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TensorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const TensorT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _shape = _o->shape.size() ? _fbb.CreateVector(_o->shape) : 0;
   auto _type = _o->type;
   auto _buffer = _o->buffer;
@@ -13934,7 +16985,7 @@
   auto _sparsity = _o->sparsity ? CreateSparsityParameters(_fbb, _o->sparsity.get(), _rehasher) : 0;
   auto _shape_signature = _o->shape_signature.size() ? _fbb.CreateVector(_o->shape_signature) : 0;
   auto _has_rank = _o->has_rank;
-  auto _variant_tensors = _o->variant_tensors.size() ? _fbb.CreateVector<flatbuffers::Offset<tflite::VariantSubType>> (_o->variant_tensors.size(), [](size_t i, _VectorArgs *__va) { return CreateVariantSubType(*__va->__fbb, __va->__o->variant_tensors[i].get(), __va->__rehasher); }, &_va ) : 0;
+  auto _variant_tensors = _o->variant_tensors.size() ? _fbb.CreateVector<::flatbuffers::Offset<tflite::VariantSubType>> (_o->variant_tensors.size(), [](size_t i, _VectorArgs *__va) { return CreateVariantSubType(*__va->__fbb, __va->__o->variant_tensors[i].get(), __va->__rehasher); }, &_va ) : 0;
   return tflite::CreateTensor(
       _fbb,
       _shape,
@@ -13949,13 +17000,631 @@
       _variant_tensors);
 }
 
-inline Conv2DOptionsT *Conv2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline StablehloGatherOptionsT *StablehloGatherOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<StablehloGatherOptionsT>(new StablehloGatherOptionsT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void StablehloGatherOptions::UnPackTo(StablehloGatherOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = offset_dims(); if (_e) { _o->offset_dims.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->offset_dims[_i] = _e->Get(_i); } } else { _o->offset_dims.resize(0); } }
+  { auto _e = collapsed_slice_dims(); if (_e) { _o->collapsed_slice_dims.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->collapsed_slice_dims[_i] = _e->Get(_i); } } else { _o->collapsed_slice_dims.resize(0); } }
+  { auto _e = start_index_map(); if (_e) { _o->start_index_map.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->start_index_map[_i] = _e->Get(_i); } } else { _o->start_index_map.resize(0); } }
+  { auto _e = index_vector_dim(); _o->index_vector_dim = _e; }
+  { auto _e = slice_sizes(); if (_e) { _o->slice_sizes.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->slice_sizes[_i] = _e->Get(_i); } } else { _o->slice_sizes.resize(0); } }
+  { auto _e = indices_are_sorted(); _o->indices_are_sorted = _e; }
+}
+
+inline ::flatbuffers::Offset<StablehloGatherOptions> StablehloGatherOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloGatherOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateStablehloGatherOptions(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<StablehloGatherOptions> CreateStablehloGatherOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloGatherOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloGatherOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _offset_dims = _o->offset_dims.size() ? _fbb.CreateVector(_o->offset_dims) : 0;
+  auto _collapsed_slice_dims = _o->collapsed_slice_dims.size() ? _fbb.CreateVector(_o->collapsed_slice_dims) : 0;
+  auto _start_index_map = _o->start_index_map.size() ? _fbb.CreateVector(_o->start_index_map) : 0;
+  auto _index_vector_dim = _o->index_vector_dim;
+  auto _slice_sizes = _o->slice_sizes.size() ? _fbb.CreateVector(_o->slice_sizes) : 0;
+  auto _indices_are_sorted = _o->indices_are_sorted;
+  return tflite::CreateStablehloGatherOptions(
+      _fbb,
+      _offset_dims,
+      _collapsed_slice_dims,
+      _start_index_map,
+      _index_vector_dim,
+      _slice_sizes,
+      _indices_are_sorted);
+}
+
+inline StablehloTransposeOptionsT *StablehloTransposeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<StablehloTransposeOptionsT>(new StablehloTransposeOptionsT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void StablehloTransposeOptions::UnPackTo(StablehloTransposeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = permutation(); if (_e) { _o->permutation.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->permutation[_i] = _e->Get(_i); } } else { _o->permutation.resize(0); } }
+}
+
+inline ::flatbuffers::Offset<StablehloTransposeOptions> StablehloTransposeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloTransposeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateStablehloTransposeOptions(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<StablehloTransposeOptions> CreateStablehloTransposeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloTransposeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloTransposeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _permutation = _o->permutation.size() ? _fbb.CreateVector(_o->permutation) : 0;
+  return tflite::CreateStablehloTransposeOptions(
+      _fbb,
+      _permutation);
+}
+
+inline StablehloDotGeneralOptionsT *StablehloDotGeneralOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<StablehloDotGeneralOptionsT>(new StablehloDotGeneralOptionsT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void StablehloDotGeneralOptions::UnPackTo(StablehloDotGeneralOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = lhs_batching_dimensions(); if (_e) { _o->lhs_batching_dimensions.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->lhs_batching_dimensions[_i] = _e->Get(_i); } } else { _o->lhs_batching_dimensions.resize(0); } }
+  { auto _e = rhs_batching_dimensions(); if (_e) { _o->rhs_batching_dimensions.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->rhs_batching_dimensions[_i] = _e->Get(_i); } } else { _o->rhs_batching_dimensions.resize(0); } }
+  { auto _e = lhs_contracting_dimensions(); if (_e) { _o->lhs_contracting_dimensions.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->lhs_contracting_dimensions[_i] = _e->Get(_i); } } else { _o->lhs_contracting_dimensions.resize(0); } }
+  { auto _e = rhs_contracting_dimensions(); if (_e) { _o->rhs_contracting_dimensions.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->rhs_contracting_dimensions[_i] = _e->Get(_i); } } else { _o->rhs_contracting_dimensions.resize(0); } }
+  { auto _e = precision_config(); if (_e) { _o->precision_config.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->precision_config[_i] = static_cast<tflite::StablehloPrecisionConfig>(_e->Get(_i)); } } else { _o->precision_config.resize(0); } }
+}
+
+inline ::flatbuffers::Offset<StablehloDotGeneralOptions> StablehloDotGeneralOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloDotGeneralOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateStablehloDotGeneralOptions(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<StablehloDotGeneralOptions> CreateStablehloDotGeneralOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloDotGeneralOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloDotGeneralOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _lhs_batching_dimensions = _o->lhs_batching_dimensions.size() ? _fbb.CreateVector(_o->lhs_batching_dimensions) : 0;
+  auto _rhs_batching_dimensions = _o->rhs_batching_dimensions.size() ? _fbb.CreateVector(_o->rhs_batching_dimensions) : 0;
+  auto _lhs_contracting_dimensions = _o->lhs_contracting_dimensions.size() ? _fbb.CreateVector(_o->lhs_contracting_dimensions) : 0;
+  auto _rhs_contracting_dimensions = _o->rhs_contracting_dimensions.size() ? _fbb.CreateVector(_o->rhs_contracting_dimensions) : 0;
+  auto _precision_config = _o->precision_config.size() ? _fbb.CreateVectorScalarCast<uint32_t>(::flatbuffers::data(_o->precision_config), _o->precision_config.size()) : 0;
+  return tflite::CreateStablehloDotGeneralOptions(
+      _fbb,
+      _lhs_batching_dimensions,
+      _rhs_batching_dimensions,
+      _lhs_contracting_dimensions,
+      _rhs_contracting_dimensions,
+      _precision_config);
+}
+
+inline StablehloReduceWindowOptionsT *StablehloReduceWindowOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<StablehloReduceWindowOptionsT>(new StablehloReduceWindowOptionsT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void StablehloReduceWindowOptions::UnPackTo(StablehloReduceWindowOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = window_dimensions(); if (_e) { _o->window_dimensions.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->window_dimensions[_i] = _e->Get(_i); } } else { _o->window_dimensions.resize(0); } }
+  { auto _e = window_strides(); if (_e) { _o->window_strides.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->window_strides[_i] = _e->Get(_i); } } else { _o->window_strides.resize(0); } }
+  { auto _e = base_dilations(); if (_e) { _o->base_dilations.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->base_dilations[_i] = _e->Get(_i); } } else { _o->base_dilations.resize(0); } }
+  { auto _e = window_dilations(); if (_e) { _o->window_dilations.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->window_dilations[_i] = _e->Get(_i); } } else { _o->window_dilations.resize(0); } }
+  { auto _e = padding(); if (_e) { _o->padding.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->padding[_i] = _e->Get(_i); } } else { _o->padding.resize(0); } }
+  { auto _e = body_subgraph_index(); _o->body_subgraph_index = _e; }
+}
+
+inline ::flatbuffers::Offset<StablehloReduceWindowOptions> StablehloReduceWindowOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloReduceWindowOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateStablehloReduceWindowOptions(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<StablehloReduceWindowOptions> CreateStablehloReduceWindowOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloReduceWindowOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloReduceWindowOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _window_dimensions = _o->window_dimensions.size() ? _fbb.CreateVector(_o->window_dimensions) : 0;
+  auto _window_strides = _o->window_strides.size() ? _fbb.CreateVector(_o->window_strides) : 0;
+  auto _base_dilations = _o->base_dilations.size() ? _fbb.CreateVector(_o->base_dilations) : 0;
+  auto _window_dilations = _o->window_dilations.size() ? _fbb.CreateVector(_o->window_dilations) : 0;
+  auto _padding = _o->padding.size() ? _fbb.CreateVector(_o->padding) : 0;
+  auto _body_subgraph_index = _o->body_subgraph_index;
+  return tflite::CreateStablehloReduceWindowOptions(
+      _fbb,
+      _window_dimensions,
+      _window_strides,
+      _base_dilations,
+      _window_dilations,
+      _padding,
+      _body_subgraph_index);
+}
+
+inline StablehloWhileOptionsT *StablehloWhileOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<StablehloWhileOptionsT>(new StablehloWhileOptionsT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void StablehloWhileOptions::UnPackTo(StablehloWhileOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = cond_subgraph_index(); _o->cond_subgraph_index = _e; }
+  { auto _e = body_subgraph_index(); _o->body_subgraph_index = _e; }
+}
+
+inline ::flatbuffers::Offset<StablehloWhileOptions> StablehloWhileOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloWhileOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateStablehloWhileOptions(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<StablehloWhileOptions> CreateStablehloWhileOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloWhileOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloWhileOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _cond_subgraph_index = _o->cond_subgraph_index;
+  auto _body_subgraph_index = _o->body_subgraph_index;
+  return tflite::CreateStablehloWhileOptions(
+      _fbb,
+      _cond_subgraph_index,
+      _body_subgraph_index);
+}
+
+inline StablehloSortOptionsT *StablehloSortOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<StablehloSortOptionsT>(new StablehloSortOptionsT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void StablehloSortOptions::UnPackTo(StablehloSortOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = dimension(); _o->dimension = _e; }
+  { auto _e = is_stable(); _o->is_stable = _e; }
+  { auto _e = comparator_subgraph_index(); _o->comparator_subgraph_index = _e; }
+}
+
+inline ::flatbuffers::Offset<StablehloSortOptions> StablehloSortOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloSortOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateStablehloSortOptions(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<StablehloSortOptions> CreateStablehloSortOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloSortOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloSortOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _dimension = _o->dimension;
+  auto _is_stable = _o->is_stable;
+  auto _comparator_subgraph_index = _o->comparator_subgraph_index;
+  return tflite::CreateStablehloSortOptions(
+      _fbb,
+      _dimension,
+      _is_stable,
+      _comparator_subgraph_index);
+}
+
+inline StablehloConcatenateOptionsT *StablehloConcatenateOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<StablehloConcatenateOptionsT>(new StablehloConcatenateOptionsT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void StablehloConcatenateOptions::UnPackTo(StablehloConcatenateOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = dimension(); _o->dimension = _e; }
+}
+
+inline ::flatbuffers::Offset<StablehloConcatenateOptions> StablehloConcatenateOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloConcatenateOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateStablehloConcatenateOptions(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<StablehloConcatenateOptions> CreateStablehloConcatenateOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloConcatenateOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloConcatenateOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _dimension = _o->dimension;
+  return tflite::CreateStablehloConcatenateOptions(
+      _fbb,
+      _dimension);
+}
+
+inline StablehloBroadcastInDimOptionsT *StablehloBroadcastInDimOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<StablehloBroadcastInDimOptionsT>(new StablehloBroadcastInDimOptionsT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void StablehloBroadcastInDimOptions::UnPackTo(StablehloBroadcastInDimOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = broadcast_dimensions(); if (_e) { _o->broadcast_dimensions.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->broadcast_dimensions[_i] = _e->Get(_i); } } else { _o->broadcast_dimensions.resize(0); } }
+}
+
+inline ::flatbuffers::Offset<StablehloBroadcastInDimOptions> StablehloBroadcastInDimOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloBroadcastInDimOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateStablehloBroadcastInDimOptions(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<StablehloBroadcastInDimOptions> CreateStablehloBroadcastInDimOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloBroadcastInDimOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloBroadcastInDimOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _broadcast_dimensions = _o->broadcast_dimensions.size() ? _fbb.CreateVector(_o->broadcast_dimensions) : 0;
+  return tflite::CreateStablehloBroadcastInDimOptions(
+      _fbb,
+      _broadcast_dimensions);
+}
+
+inline StablehloCompareOptionsT *StablehloCompareOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<StablehloCompareOptionsT>(new StablehloCompareOptionsT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void StablehloCompareOptions::UnPackTo(StablehloCompareOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = comparison_direction(); _o->comparison_direction = _e; }
+  { auto _e = compare_type(); _o->compare_type = _e; }
+}
+
+inline ::flatbuffers::Offset<StablehloCompareOptions> StablehloCompareOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloCompareOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateStablehloCompareOptions(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<StablehloCompareOptions> CreateStablehloCompareOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloCompareOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloCompareOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _comparison_direction = _o->comparison_direction;
+  auto _compare_type = _o->compare_type;
+  return tflite::CreateStablehloCompareOptions(
+      _fbb,
+      _comparison_direction,
+      _compare_type);
+}
+
+inline StablehloDynamicSliceOptionsT *StablehloDynamicSliceOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<StablehloDynamicSliceOptionsT>(new StablehloDynamicSliceOptionsT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void StablehloDynamicSliceOptions::UnPackTo(StablehloDynamicSliceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = slice_sizes(); if (_e) { _o->slice_sizes.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->slice_sizes[_i] = _e->Get(_i); } } else { _o->slice_sizes.resize(0); } }
+}
+
+inline ::flatbuffers::Offset<StablehloDynamicSliceOptions> StablehloDynamicSliceOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloDynamicSliceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateStablehloDynamicSliceOptions(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<StablehloDynamicSliceOptions> CreateStablehloDynamicSliceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloDynamicSliceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloDynamicSliceOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _slice_sizes = _o->slice_sizes.size() ? _fbb.CreateVector(_o->slice_sizes) : 0;
+  return tflite::CreateStablehloDynamicSliceOptions(
+      _fbb,
+      _slice_sizes);
+}
+
+inline StablehloPadOptionsT *StablehloPadOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<StablehloPadOptionsT>(new StablehloPadOptionsT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void StablehloPadOptions::UnPackTo(StablehloPadOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = edge_padding_low(); if (_e) { _o->edge_padding_low.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->edge_padding_low[_i] = _e->Get(_i); } } else { _o->edge_padding_low.resize(0); } }
+  { auto _e = edge_padding_high(); if (_e) { _o->edge_padding_high.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->edge_padding_high[_i] = _e->Get(_i); } } else { _o->edge_padding_high.resize(0); } }
+  { auto _e = interior_padding(); if (_e) { _o->interior_padding.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->interior_padding[_i] = _e->Get(_i); } } else { _o->interior_padding.resize(0); } }
+}
+
+inline ::flatbuffers::Offset<StablehloPadOptions> StablehloPadOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloPadOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateStablehloPadOptions(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<StablehloPadOptions> CreateStablehloPadOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloPadOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloPadOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _edge_padding_low = _o->edge_padding_low.size() ? _fbb.CreateVector(_o->edge_padding_low) : 0;
+  auto _edge_padding_high = _o->edge_padding_high.size() ? _fbb.CreateVector(_o->edge_padding_high) : 0;
+  auto _interior_padding = _o->interior_padding.size() ? _fbb.CreateVector(_o->interior_padding) : 0;
+  return tflite::CreateStablehloPadOptions(
+      _fbb,
+      _edge_padding_low,
+      _edge_padding_high,
+      _interior_padding);
+}
+
+inline StablehloIotaOptionsT *StablehloIotaOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<StablehloIotaOptionsT>(new StablehloIotaOptionsT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void StablehloIotaOptions::UnPackTo(StablehloIotaOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = iota_dimension(); _o->iota_dimension = _e; }
+}
+
+inline ::flatbuffers::Offset<StablehloIotaOptions> StablehloIotaOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloIotaOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateStablehloIotaOptions(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<StablehloIotaOptions> CreateStablehloIotaOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloIotaOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloIotaOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _iota_dimension = _o->iota_dimension;
+  return tflite::CreateStablehloIotaOptions(
+      _fbb,
+      _iota_dimension);
+}
+
+inline StablehloCustomCallOptionsT *StablehloCustomCallOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<StablehloCustomCallOptionsT>(new StablehloCustomCallOptionsT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void StablehloCustomCallOptions::UnPackTo(StablehloCustomCallOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = call_target_name(); if (_e) _o->call_target_name = _e->str(); }
+  { auto _e = has_side_effect(); _o->has_side_effect = _e; }
+  { auto _e = backend_config(); if (_e) _o->backend_config = _e->str(); }
+  { auto _e = api_version(); _o->api_version = _e; }
+  { auto _e = called_computations(); if (_e) { _o->called_computations.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->called_computations[_i] = _e->Get(_i); } } else { _o->called_computations.resize(0); } }
+  { auto _e = custom_attributes(); if (_e) { _o->custom_attributes.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->custom_attributes.begin()); } }
+}
+
+inline ::flatbuffers::Offset<StablehloCustomCallOptions> StablehloCustomCallOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloCustomCallOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateStablehloCustomCallOptions(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<StablehloCustomCallOptions> CreateStablehloCustomCallOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloCustomCallOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloCustomCallOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _call_target_name = _o->call_target_name.empty() ? 0 : _fbb.CreateString(_o->call_target_name);
+  auto _has_side_effect = _o->has_side_effect;
+  auto _backend_config = _o->backend_config.empty() ? 0 : _fbb.CreateString(_o->backend_config);
+  auto _api_version = _o->api_version;
+  auto _called_computations = _o->called_computations.size() ? _fbb.CreateVector(_o->called_computations) : 0;
+  auto _custom_attributes = _o->custom_attributes.size() ? _fbb.CreateVector(_o->custom_attributes) : 0;
+  return tflite::CreateStablehloCustomCallOptions(
+      _fbb,
+      _call_target_name,
+      _has_side_effect,
+      _backend_config,
+      _api_version,
+      _called_computations,
+      _custom_attributes);
+}
+
+inline StablehloReduceOptionsT *StablehloReduceOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<StablehloReduceOptionsT>(new StablehloReduceOptionsT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void StablehloReduceOptions::UnPackTo(StablehloReduceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = dimensions(); if (_e) { _o->dimensions.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->dimensions[_i] = _e->Get(_i); } } else { _o->dimensions.resize(0); } }
+  { auto _e = body_subgraph_index(); _o->body_subgraph_index = _e; }
+}
+
+inline ::flatbuffers::Offset<StablehloReduceOptions> StablehloReduceOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloReduceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateStablehloReduceOptions(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<StablehloReduceOptions> CreateStablehloReduceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloReduceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloReduceOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _dimensions = _o->dimensions.size() ? _fbb.CreateVector(_o->dimensions) : 0;
+  auto _body_subgraph_index = _o->body_subgraph_index;
+  return tflite::CreateStablehloReduceOptions(
+      _fbb,
+      _dimensions,
+      _body_subgraph_index);
+}
+
+inline StablehloSliceOptionsT *StablehloSliceOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<StablehloSliceOptionsT>(new StablehloSliceOptionsT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void StablehloSliceOptions::UnPackTo(StablehloSliceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = start_indices(); if (_e) { _o->start_indices.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->start_indices[_i] = _e->Get(_i); } } else { _o->start_indices.resize(0); } }
+  { auto _e = limit_indices(); if (_e) { _o->limit_indices.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->limit_indices[_i] = _e->Get(_i); } } else { _o->limit_indices.resize(0); } }
+  { auto _e = strides(); if (_e) { _o->strides.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->strides[_i] = _e->Get(_i); } } else { _o->strides.resize(0); } }
+}
+
+inline ::flatbuffers::Offset<StablehloSliceOptions> StablehloSliceOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloSliceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateStablehloSliceOptions(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<StablehloSliceOptions> CreateStablehloSliceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloSliceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloSliceOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _start_indices = _o->start_indices.size() ? _fbb.CreateVector(_o->start_indices) : 0;
+  auto _limit_indices = _o->limit_indices.size() ? _fbb.CreateVector(_o->limit_indices) : 0;
+  auto _strides = _o->strides.size() ? _fbb.CreateVector(_o->strides) : 0;
+  return tflite::CreateStablehloSliceOptions(
+      _fbb,
+      _start_indices,
+      _limit_indices,
+      _strides);
+}
+
+inline StablehloConvolutionOptionsT *StablehloConvolutionOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<StablehloConvolutionOptionsT>(new StablehloConvolutionOptionsT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void StablehloConvolutionOptions::UnPackTo(StablehloConvolutionOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = window_strides(); if (_e) { _o->window_strides.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->window_strides[_i] = _e->Get(_i); } } else { _o->window_strides.resize(0); } }
+  { auto _e = padding(); if (_e) { _o->padding.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->padding[_i] = _e->Get(_i); } } else { _o->padding.resize(0); } }
+  { auto _e = lhs_dilation(); if (_e) { _o->lhs_dilation.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->lhs_dilation[_i] = _e->Get(_i); } } else { _o->lhs_dilation.resize(0); } }
+  { auto _e = rhs_dilation(); if (_e) { _o->rhs_dilation.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->rhs_dilation[_i] = _e->Get(_i); } } else { _o->rhs_dilation.resize(0); } }
+  { auto _e = window_reversal(); if (_e) { _o->window_reversal.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->window_reversal[_i] = _e->Get(_i) != 0; } } else { _o->window_reversal.resize(0); } }
+  { auto _e = input_batch_dimension(); _o->input_batch_dimension = _e; }
+  { auto _e = input_feature_dimension(); _o->input_feature_dimension = _e; }
+  { auto _e = input_spatial_dimensions(); if (_e) { _o->input_spatial_dimensions.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->input_spatial_dimensions[_i] = _e->Get(_i); } } else { _o->input_spatial_dimensions.resize(0); } }
+  { auto _e = kernel_input_feature_dimension(); _o->kernel_input_feature_dimension = _e; }
+  { auto _e = kernel_output_feature_dimension(); _o->kernel_output_feature_dimension = _e; }
+  { auto _e = kernel_spatial_dimensions(); if (_e) { _o->kernel_spatial_dimensions.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->kernel_spatial_dimensions[_i] = _e->Get(_i); } } else { _o->kernel_spatial_dimensions.resize(0); } }
+  { auto _e = output_batch_dimension(); _o->output_batch_dimension = _e; }
+  { auto _e = output_feature_dimension(); _o->output_feature_dimension = _e; }
+  { auto _e = output_spatial_dimensions(); if (_e) { _o->output_spatial_dimensions.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->output_spatial_dimensions[_i] = _e->Get(_i); } } else { _o->output_spatial_dimensions.resize(0); } }
+  { auto _e = feature_group_count(); _o->feature_group_count = _e; }
+  { auto _e = batch_group_count(); _o->batch_group_count = _e; }
+  { auto _e = precision_config(); if (_e) { _o->precision_config.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->precision_config[_i] = static_cast<tflite::StablehloPrecisionConfig>(_e->Get(_i)); } } else { _o->precision_config.resize(0); } }
+}
+
+inline ::flatbuffers::Offset<StablehloConvolutionOptions> StablehloConvolutionOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloConvolutionOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateStablehloConvolutionOptions(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<StablehloConvolutionOptions> CreateStablehloConvolutionOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloConvolutionOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloConvolutionOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _window_strides = _o->window_strides.size() ? _fbb.CreateVector(_o->window_strides) : 0;
+  auto _padding = _o->padding.size() ? _fbb.CreateVector(_o->padding) : 0;
+  auto _lhs_dilation = _o->lhs_dilation.size() ? _fbb.CreateVector(_o->lhs_dilation) : 0;
+  auto _rhs_dilation = _o->rhs_dilation.size() ? _fbb.CreateVector(_o->rhs_dilation) : 0;
+  auto _window_reversal = _o->window_reversal.size() ? _fbb.CreateVector(_o->window_reversal) : 0;
+  auto _input_batch_dimension = _o->input_batch_dimension;
+  auto _input_feature_dimension = _o->input_feature_dimension;
+  auto _input_spatial_dimensions = _o->input_spatial_dimensions.size() ? _fbb.CreateVector(_o->input_spatial_dimensions) : 0;
+  auto _kernel_input_feature_dimension = _o->kernel_input_feature_dimension;
+  auto _kernel_output_feature_dimension = _o->kernel_output_feature_dimension;
+  auto _kernel_spatial_dimensions = _o->kernel_spatial_dimensions.size() ? _fbb.CreateVector(_o->kernel_spatial_dimensions) : 0;
+  auto _output_batch_dimension = _o->output_batch_dimension;
+  auto _output_feature_dimension = _o->output_feature_dimension;
+  auto _output_spatial_dimensions = _o->output_spatial_dimensions.size() ? _fbb.CreateVector(_o->output_spatial_dimensions) : 0;
+  auto _feature_group_count = _o->feature_group_count;
+  auto _batch_group_count = _o->batch_group_count;
+  auto _precision_config = _o->precision_config.size() ? _fbb.CreateVectorScalarCast<uint32_t>(::flatbuffers::data(_o->precision_config), _o->precision_config.size()) : 0;
+  return tflite::CreateStablehloConvolutionOptions(
+      _fbb,
+      _window_strides,
+      _padding,
+      _lhs_dilation,
+      _rhs_dilation,
+      _window_reversal,
+      _input_batch_dimension,
+      _input_feature_dimension,
+      _input_spatial_dimensions,
+      _kernel_input_feature_dimension,
+      _kernel_output_feature_dimension,
+      _kernel_spatial_dimensions,
+      _output_batch_dimension,
+      _output_feature_dimension,
+      _output_spatial_dimensions,
+      _feature_group_count,
+      _batch_group_count,
+      _precision_config);
+}
+
+inline StablehloScatterOptionsT *StablehloScatterOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<StablehloScatterOptionsT>(new StablehloScatterOptionsT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void StablehloScatterOptions::UnPackTo(StablehloScatterOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = indices_are_sorted(); _o->indices_are_sorted = _e; }
+  { auto _e = update_window_dims(); if (_e) { _o->update_window_dims.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->update_window_dims[_i] = _e->Get(_i); } } else { _o->update_window_dims.resize(0); } }
+  { auto _e = inserted_window_dims(); if (_e) { _o->inserted_window_dims.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inserted_window_dims[_i] = _e->Get(_i); } } else { _o->inserted_window_dims.resize(0); } }
+  { auto _e = scatter_dims_to_operand_dims(); if (_e) { _o->scatter_dims_to_operand_dims.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->scatter_dims_to_operand_dims[_i] = _e->Get(_i); } } else { _o->scatter_dims_to_operand_dims.resize(0); } }
+  { auto _e = index_vector_dim(); _o->index_vector_dim = _e; }
+  { auto _e = unique_indices(); _o->unique_indices = _e; }
+  { auto _e = update_computation_subgraph_index(); _o->update_computation_subgraph_index = _e; }
+}
+
+inline ::flatbuffers::Offset<StablehloScatterOptions> StablehloScatterOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloScatterOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateStablehloScatterOptions(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<StablehloScatterOptions> CreateStablehloScatterOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloScatterOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloScatterOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _indices_are_sorted = _o->indices_are_sorted;
+  auto _update_window_dims = _o->update_window_dims.size() ? _fbb.CreateVector(_o->update_window_dims) : 0;
+  auto _inserted_window_dims = _o->inserted_window_dims.size() ? _fbb.CreateVector(_o->inserted_window_dims) : 0;
+  auto _scatter_dims_to_operand_dims = _o->scatter_dims_to_operand_dims.size() ? _fbb.CreateVector(_o->scatter_dims_to_operand_dims) : 0;
+  auto _index_vector_dim = _o->index_vector_dim;
+  auto _unique_indices = _o->unique_indices;
+  auto _update_computation_subgraph_index = _o->update_computation_subgraph_index;
+  return tflite::CreateStablehloScatterOptions(
+      _fbb,
+      _indices_are_sorted,
+      _update_window_dims,
+      _inserted_window_dims,
+      _scatter_dims_to_operand_dims,
+      _index_vector_dim,
+      _unique_indices,
+      _update_computation_subgraph_index);
+}
+
+inline StablehloRngBitGeneratorOptionsT *StablehloRngBitGeneratorOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<StablehloRngBitGeneratorOptionsT>(new StablehloRngBitGeneratorOptionsT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void StablehloRngBitGeneratorOptions::UnPackTo(StablehloRngBitGeneratorOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = algorithm(); _o->algorithm = _e; }
+}
+
+inline ::flatbuffers::Offset<StablehloRngBitGeneratorOptions> StablehloRngBitGeneratorOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloRngBitGeneratorOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateStablehloRngBitGeneratorOptions(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<StablehloRngBitGeneratorOptions> CreateStablehloRngBitGeneratorOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloRngBitGeneratorOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloRngBitGeneratorOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _algorithm = _o->algorithm;
+  return tflite::CreateStablehloRngBitGeneratorOptions(
+      _fbb,
+      _algorithm);
+}
+
+inline Conv2DOptionsT *Conv2DOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<Conv2DOptionsT>(new Conv2DOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void Conv2DOptions::UnPackTo(Conv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void Conv2DOptions::UnPackTo(Conv2DOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = padding(); _o->padding = _e; }
@@ -13964,22 +17633,24 @@
   { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }
   { auto _e = dilation_w_factor(); _o->dilation_w_factor = _e; }
   { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; }
+  { auto _e = quantized_bias_type(); _o->quantized_bias_type = _e; }
 }
 
-inline flatbuffers::Offset<Conv2DOptions> Conv2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Conv2DOptions> Conv2DOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateConv2DOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<Conv2DOptions> CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Conv2DOptions> CreateConv2DOptions(::flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Conv2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const Conv2DOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _padding = _o->padding;
   auto _stride_w = _o->stride_w;
   auto _stride_h = _o->stride_h;
   auto _fused_activation_function = _o->fused_activation_function;
   auto _dilation_w_factor = _o->dilation_w_factor;
   auto _dilation_h_factor = _o->dilation_h_factor;
+  auto _quantized_bias_type = _o->quantized_bias_type;
   return tflite::CreateConv2DOptions(
       _fbb,
       _padding,
@@ -13987,16 +17658,17 @@
       _stride_h,
       _fused_activation_function,
       _dilation_w_factor,
-      _dilation_h_factor);
+      _dilation_h_factor,
+      _quantized_bias_type);
 }
 
-inline Conv3DOptionsT *Conv3DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline Conv3DOptionsT *Conv3DOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<Conv3DOptionsT>(new Conv3DOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void Conv3DOptions::UnPackTo(Conv3DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void Conv3DOptions::UnPackTo(Conv3DOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = padding(); _o->padding = _e; }
@@ -14009,14 +17681,14 @@
   { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; }
 }
 
-inline flatbuffers::Offset<Conv3DOptions> Conv3DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Conv3DOptions> Conv3DOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateConv3DOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<Conv3DOptions> CreateConv3DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Conv3DOptions> CreateConv3DOptions(::flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Conv3DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const Conv3DOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _padding = _o->padding;
   auto _stride_d = _o->stride_d;
   auto _stride_w = _o->stride_w;
@@ -14037,13 +17709,13 @@
       _dilation_h_factor);
 }
 
-inline Pool2DOptionsT *Pool2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline Pool2DOptionsT *Pool2DOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<Pool2DOptionsT>(new Pool2DOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void Pool2DOptions::UnPackTo(Pool2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void Pool2DOptions::UnPackTo(Pool2DOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = padding(); _o->padding = _e; }
@@ -14054,14 +17726,14 @@
   { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }
 }
 
-inline flatbuffers::Offset<Pool2DOptions> Pool2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Pool2DOptions> Pool2DOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreatePool2DOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<Pool2DOptions> CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Pool2DOptions> CreatePool2DOptions(::flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Pool2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const Pool2DOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _padding = _o->padding;
   auto _stride_w = _o->stride_w;
   auto _stride_h = _o->stride_h;
@@ -14078,13 +17750,13 @@
       _fused_activation_function);
 }
 
-inline DepthwiseConv2DOptionsT *DepthwiseConv2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline DepthwiseConv2DOptionsT *DepthwiseConv2DOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<DepthwiseConv2DOptionsT>(new DepthwiseConv2DOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void DepthwiseConv2DOptions::UnPackTo(DepthwiseConv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void DepthwiseConv2DOptions::UnPackTo(DepthwiseConv2DOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = padding(); _o->padding = _e; }
@@ -14096,14 +17768,14 @@
   { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; }
 }
 
-inline flatbuffers::Offset<DepthwiseConv2DOptions> DepthwiseConv2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<DepthwiseConv2DOptions> DepthwiseConv2DOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateDepthwiseConv2DOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<DepthwiseConv2DOptions> CreateDepthwiseConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<DepthwiseConv2DOptions> CreateDepthwiseConv2DOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DepthwiseConv2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const DepthwiseConv2DOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _padding = _o->padding;
   auto _stride_w = _o->stride_w;
   auto _stride_h = _o->stride_h;
@@ -14122,28 +17794,28 @@
       _dilation_h_factor);
 }
 
-inline ConcatEmbeddingsOptionsT *ConcatEmbeddingsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline ConcatEmbeddingsOptionsT *ConcatEmbeddingsOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<ConcatEmbeddingsOptionsT>(new ConcatEmbeddingsOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void ConcatEmbeddingsOptions::UnPackTo(ConcatEmbeddingsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void ConcatEmbeddingsOptions::UnPackTo(ConcatEmbeddingsOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = num_channels(); _o->num_channels = _e; }
-  { auto _e = num_columns_per_channel(); if (_e) { _o->num_columns_per_channel.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->num_columns_per_channel[_i] = _e->Get(_i); } } }
-  { auto _e = embedding_dim_per_channel(); if (_e) { _o->embedding_dim_per_channel.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->embedding_dim_per_channel[_i] = _e->Get(_i); } } }
+  { auto _e = num_columns_per_channel(); if (_e) { _o->num_columns_per_channel.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->num_columns_per_channel[_i] = _e->Get(_i); } } else { _o->num_columns_per_channel.resize(0); } }
+  { auto _e = embedding_dim_per_channel(); if (_e) { _o->embedding_dim_per_channel.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->embedding_dim_per_channel[_i] = _e->Get(_i); } } else { _o->embedding_dim_per_channel.resize(0); } }
 }
 
-inline flatbuffers::Offset<ConcatEmbeddingsOptions> ConcatEmbeddingsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ConcatEmbeddingsOptions> ConcatEmbeddingsOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateConcatEmbeddingsOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<ConcatEmbeddingsOptions> CreateConcatEmbeddingsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ConcatEmbeddingsOptions> CreateConcatEmbeddingsOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConcatEmbeddingsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ConcatEmbeddingsOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _num_channels = _o->num_channels;
   auto _num_columns_per_channel = _o->num_columns_per_channel.size() ? _fbb.CreateVector(_o->num_columns_per_channel) : 0;
   auto _embedding_dim_per_channel = _o->embedding_dim_per_channel.size() ? _fbb.CreateVector(_o->embedding_dim_per_channel) : 0;
@@ -14154,39 +17826,39 @@
       _embedding_dim_per_channel);
 }
 
-inline LSHProjectionOptionsT *LSHProjectionOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline LSHProjectionOptionsT *LSHProjectionOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<LSHProjectionOptionsT>(new LSHProjectionOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void LSHProjectionOptions::UnPackTo(LSHProjectionOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void LSHProjectionOptions::UnPackTo(LSHProjectionOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = type(); _o->type = _e; }
 }
 
-inline flatbuffers::Offset<LSHProjectionOptions> LSHProjectionOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<LSHProjectionOptions> LSHProjectionOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateLSHProjectionOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<LSHProjectionOptions> CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<LSHProjectionOptions> CreateLSHProjectionOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LSHProjectionOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const LSHProjectionOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _type = _o->type;
   return tflite::CreateLSHProjectionOptions(
       _fbb,
       _type);
 }
 
-inline SVDFOptionsT *SVDFOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline SVDFOptionsT *SVDFOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<SVDFOptionsT>(new SVDFOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void SVDFOptions::UnPackTo(SVDFOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void SVDFOptions::UnPackTo(SVDFOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = rank(); _o->rank = _e; }
@@ -14194,14 +17866,14 @@
   { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; }
 }
 
-inline flatbuffers::Offset<SVDFOptions> SVDFOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SVDFOptions> SVDFOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateSVDFOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<SVDFOptions> CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SVDFOptions> CreateSVDFOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SVDFOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SVDFOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _rank = _o->rank;
   auto _fused_activation_function = _o->fused_activation_function;
   auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs;
@@ -14212,27 +17884,27 @@
       _asymmetric_quantize_inputs);
 }
 
-inline RNNOptionsT *RNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline RNNOptionsT *RNNOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<RNNOptionsT>(new RNNOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void RNNOptions::UnPackTo(RNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void RNNOptions::UnPackTo(RNNOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }
   { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; }
 }
 
-inline flatbuffers::Offset<RNNOptions> RNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<RNNOptions> RNNOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateRNNOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<RNNOptions> CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<RNNOptions> CreateRNNOptions(::flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const RNNOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _fused_activation_function = _o->fused_activation_function;
   auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs;
   return tflite::CreateRNNOptions(
@@ -14241,13 +17913,13 @@
       _asymmetric_quantize_inputs);
 }
 
-inline SequenceRNNOptionsT *SequenceRNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline SequenceRNNOptionsT *SequenceRNNOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<SequenceRNNOptionsT>(new SequenceRNNOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void SequenceRNNOptions::UnPackTo(SequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void SequenceRNNOptions::UnPackTo(SequenceRNNOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = time_major(); _o->time_major = _e; }
@@ -14255,14 +17927,14 @@
   { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; }
 }
 
-inline flatbuffers::Offset<SequenceRNNOptions> SequenceRNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SequenceRNNOptions> SequenceRNNOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateSequenceRNNOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<SequenceRNNOptions> CreateSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SequenceRNNOptions> CreateSequenceRNNOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SequenceRNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SequenceRNNOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _time_major = _o->time_major;
   auto _fused_activation_function = _o->fused_activation_function;
   auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs;
@@ -14273,13 +17945,13 @@
       _asymmetric_quantize_inputs);
 }
 
-inline BidirectionalSequenceRNNOptionsT *BidirectionalSequenceRNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline BidirectionalSequenceRNNOptionsT *BidirectionalSequenceRNNOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<BidirectionalSequenceRNNOptionsT>(new BidirectionalSequenceRNNOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void BidirectionalSequenceRNNOptions::UnPackTo(BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void BidirectionalSequenceRNNOptions::UnPackTo(BidirectionalSequenceRNNOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = time_major(); _o->time_major = _e; }
@@ -14288,14 +17960,14 @@
   { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; }
 }
 
-inline flatbuffers::Offset<BidirectionalSequenceRNNOptions> BidirectionalSequenceRNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<BidirectionalSequenceRNNOptions> BidirectionalSequenceRNNOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateBidirectionalSequenceRNNOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<BidirectionalSequenceRNNOptions> CreateBidirectionalSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<BidirectionalSequenceRNNOptions> CreateBidirectionalSequenceRNNOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BidirectionalSequenceRNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BidirectionalSequenceRNNOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _time_major = _o->time_major;
   auto _fused_activation_function = _o->fused_activation_function;
   auto _merge_outputs = _o->merge_outputs;
@@ -14308,88 +17980,91 @@
       _asymmetric_quantize_inputs);
 }
 
-inline FullyConnectedOptionsT *FullyConnectedOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline FullyConnectedOptionsT *FullyConnectedOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<FullyConnectedOptionsT>(new FullyConnectedOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void FullyConnectedOptions::UnPackTo(FullyConnectedOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void FullyConnectedOptions::UnPackTo(FullyConnectedOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }
   { auto _e = weights_format(); _o->weights_format = _e; }
   { auto _e = keep_num_dims(); _o->keep_num_dims = _e; }
   { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; }
+  { auto _e = quantized_bias_type(); _o->quantized_bias_type = _e; }
 }
 
-inline flatbuffers::Offset<FullyConnectedOptions> FullyConnectedOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<FullyConnectedOptions> FullyConnectedOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateFullyConnectedOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<FullyConnectedOptions> CreateFullyConnectedOptions(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<FullyConnectedOptions> CreateFullyConnectedOptions(::flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FullyConnectedOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const FullyConnectedOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _fused_activation_function = _o->fused_activation_function;
   auto _weights_format = _o->weights_format;
   auto _keep_num_dims = _o->keep_num_dims;
   auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs;
+  auto _quantized_bias_type = _o->quantized_bias_type;
   return tflite::CreateFullyConnectedOptions(
       _fbb,
       _fused_activation_function,
       _weights_format,
       _keep_num_dims,
-      _asymmetric_quantize_inputs);
+      _asymmetric_quantize_inputs,
+      _quantized_bias_type);
 }
 
-inline SoftmaxOptionsT *SoftmaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline SoftmaxOptionsT *SoftmaxOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<SoftmaxOptionsT>(new SoftmaxOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void SoftmaxOptions::UnPackTo(SoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void SoftmaxOptions::UnPackTo(SoftmaxOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = beta(); _o->beta = _e; }
 }
 
-inline flatbuffers::Offset<SoftmaxOptions> SoftmaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SoftmaxOptions> SoftmaxOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateSoftmaxOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<SoftmaxOptions> CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SoftmaxOptions> CreateSoftmaxOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SoftmaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SoftmaxOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _beta = _o->beta;
   return tflite::CreateSoftmaxOptions(
       _fbb,
       _beta);
 }
 
-inline ConcatenationOptionsT *ConcatenationOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline ConcatenationOptionsT *ConcatenationOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<ConcatenationOptionsT>(new ConcatenationOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void ConcatenationOptions::UnPackTo(ConcatenationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void ConcatenationOptions::UnPackTo(ConcatenationOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = axis(); _o->axis = _e; }
   { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }
 }
 
-inline flatbuffers::Offset<ConcatenationOptions> ConcatenationOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ConcatenationOptions> ConcatenationOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateConcatenationOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<ConcatenationOptions> CreateConcatenationOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ConcatenationOptions> CreateConcatenationOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConcatenationOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ConcatenationOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _axis = _o->axis;
   auto _fused_activation_function = _o->fused_activation_function;
   return tflite::CreateConcatenationOptions(
@@ -14398,27 +18073,27 @@
       _fused_activation_function);
 }
 
-inline AddOptionsT *AddOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline AddOptionsT *AddOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<AddOptionsT>(new AddOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void AddOptions::UnPackTo(AddOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void AddOptions::UnPackTo(AddOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }
   { auto _e = pot_scale_int16(); _o->pot_scale_int16 = _e; }
 }
 
-inline flatbuffers::Offset<AddOptions> AddOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<AddOptions> AddOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateAddOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<AddOptions> CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<AddOptions> CreateAddOptions(::flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AddOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const AddOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _fused_activation_function = _o->fused_activation_function;
   auto _pot_scale_int16 = _o->pot_scale_int16;
   return tflite::CreateAddOptions(
@@ -14427,65 +18102,65 @@
       _pot_scale_int16);
 }
 
-inline MulOptionsT *MulOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline MulOptionsT *MulOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<MulOptionsT>(new MulOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void MulOptions::UnPackTo(MulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void MulOptions::UnPackTo(MulOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }
 }
 
-inline flatbuffers::Offset<MulOptions> MulOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<MulOptions> MulOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateMulOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<MulOptions> CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<MulOptions> CreateMulOptions(::flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MulOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const MulOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _fused_activation_function = _o->fused_activation_function;
   return tflite::CreateMulOptions(
       _fbb,
       _fused_activation_function);
 }
 
-inline L2NormOptionsT *L2NormOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline L2NormOptionsT *L2NormOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<L2NormOptionsT>(new L2NormOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void L2NormOptions::UnPackTo(L2NormOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void L2NormOptions::UnPackTo(L2NormOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }
 }
 
-inline flatbuffers::Offset<L2NormOptions> L2NormOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<L2NormOptions> L2NormOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateL2NormOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<L2NormOptions> CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<L2NormOptions> CreateL2NormOptions(::flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const L2NormOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const L2NormOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _fused_activation_function = _o->fused_activation_function;
   return tflite::CreateL2NormOptions(
       _fbb,
       _fused_activation_function);
 }
 
-inline LocalResponseNormalizationOptionsT *LocalResponseNormalizationOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline LocalResponseNormalizationOptionsT *LocalResponseNormalizationOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<LocalResponseNormalizationOptionsT>(new LocalResponseNormalizationOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void LocalResponseNormalizationOptions::UnPackTo(LocalResponseNormalizationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void LocalResponseNormalizationOptions::UnPackTo(LocalResponseNormalizationOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = radius(); _o->radius = _e; }
@@ -14494,14 +18169,14 @@
   { auto _e = beta(); _o->beta = _e; }
 }
 
-inline flatbuffers::Offset<LocalResponseNormalizationOptions> LocalResponseNormalizationOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<LocalResponseNormalizationOptions> LocalResponseNormalizationOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateLocalResponseNormalizationOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<LocalResponseNormalizationOptions> CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<LocalResponseNormalizationOptions> CreateLocalResponseNormalizationOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LocalResponseNormalizationOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const LocalResponseNormalizationOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _radius = _o->radius;
   auto _bias = _o->bias;
   auto _alpha = _o->alpha;
@@ -14514,13 +18189,13 @@
       _beta);
 }
 
-inline LSTMOptionsT *LSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline LSTMOptionsT *LSTMOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<LSTMOptionsT>(new LSTMOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void LSTMOptions::UnPackTo(LSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void LSTMOptions::UnPackTo(LSTMOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }
@@ -14530,14 +18205,14 @@
   { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; }
 }
 
-inline flatbuffers::Offset<LSTMOptions> LSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<LSTMOptions> LSTMOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateLSTMOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<LSTMOptions> CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<LSTMOptions> CreateLSTMOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const LSTMOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _fused_activation_function = _o->fused_activation_function;
   auto _cell_clip = _o->cell_clip;
   auto _proj_clip = _o->proj_clip;
@@ -14552,13 +18227,13 @@
       _asymmetric_quantize_inputs);
 }
 
-inline UnidirectionalSequenceLSTMOptionsT *UnidirectionalSequenceLSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline UnidirectionalSequenceLSTMOptionsT *UnidirectionalSequenceLSTMOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<UnidirectionalSequenceLSTMOptionsT>(new UnidirectionalSequenceLSTMOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void UnidirectionalSequenceLSTMOptions::UnPackTo(UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void UnidirectionalSequenceLSTMOptions::UnPackTo(UnidirectionalSequenceLSTMOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }
@@ -14569,14 +18244,14 @@
   { auto _e = diagonal_recurrent_tensors(); _o->diagonal_recurrent_tensors = _e; }
 }
 
-inline flatbuffers::Offset<UnidirectionalSequenceLSTMOptions> UnidirectionalSequenceLSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<UnidirectionalSequenceLSTMOptions> UnidirectionalSequenceLSTMOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateUnidirectionalSequenceLSTMOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<UnidirectionalSequenceLSTMOptions> CreateUnidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<UnidirectionalSequenceLSTMOptions> CreateUnidirectionalSequenceLSTMOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnidirectionalSequenceLSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const UnidirectionalSequenceLSTMOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _fused_activation_function = _o->fused_activation_function;
   auto _cell_clip = _o->cell_clip;
   auto _proj_clip = _o->proj_clip;
@@ -14593,13 +18268,13 @@
       _diagonal_recurrent_tensors);
 }
 
-inline BidirectionalSequenceLSTMOptionsT *BidirectionalSequenceLSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline BidirectionalSequenceLSTMOptionsT *BidirectionalSequenceLSTMOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<BidirectionalSequenceLSTMOptionsT>(new BidirectionalSequenceLSTMOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void BidirectionalSequenceLSTMOptions::UnPackTo(BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void BidirectionalSequenceLSTMOptions::UnPackTo(BidirectionalSequenceLSTMOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }
@@ -14610,14 +18285,14 @@
   { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; }
 }
 
-inline flatbuffers::Offset<BidirectionalSequenceLSTMOptions> BidirectionalSequenceLSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<BidirectionalSequenceLSTMOptions> BidirectionalSequenceLSTMOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateBidirectionalSequenceLSTMOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<BidirectionalSequenceLSTMOptions> CreateBidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<BidirectionalSequenceLSTMOptions> CreateBidirectionalSequenceLSTMOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BidirectionalSequenceLSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BidirectionalSequenceLSTMOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _fused_activation_function = _o->fused_activation_function;
   auto _cell_clip = _o->cell_clip;
   auto _proj_clip = _o->proj_clip;
@@ -14634,27 +18309,27 @@
       _asymmetric_quantize_inputs);
 }
 
-inline ResizeBilinearOptionsT *ResizeBilinearOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline ResizeBilinearOptionsT *ResizeBilinearOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<ResizeBilinearOptionsT>(new ResizeBilinearOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void ResizeBilinearOptions::UnPackTo(ResizeBilinearOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void ResizeBilinearOptions::UnPackTo(ResizeBilinearOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = align_corners(); _o->align_corners = _e; }
   { auto _e = half_pixel_centers(); _o->half_pixel_centers = _e; }
 }
 
-inline flatbuffers::Offset<ResizeBilinearOptions> ResizeBilinearOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ResizeBilinearOptions> ResizeBilinearOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateResizeBilinearOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<ResizeBilinearOptions> CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ResizeBilinearOptions> CreateResizeBilinearOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ResizeBilinearOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ResizeBilinearOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _align_corners = _o->align_corners;
   auto _half_pixel_centers = _o->half_pixel_centers;
   return tflite::CreateResizeBilinearOptions(
@@ -14663,27 +18338,27 @@
       _half_pixel_centers);
 }
 
-inline ResizeNearestNeighborOptionsT *ResizeNearestNeighborOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline ResizeNearestNeighborOptionsT *ResizeNearestNeighborOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<ResizeNearestNeighborOptionsT>(new ResizeNearestNeighborOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void ResizeNearestNeighborOptions::UnPackTo(ResizeNearestNeighborOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void ResizeNearestNeighborOptions::UnPackTo(ResizeNearestNeighborOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = align_corners(); _o->align_corners = _e; }
   { auto _e = half_pixel_centers(); _o->half_pixel_centers = _e; }
 }
 
-inline flatbuffers::Offset<ResizeNearestNeighborOptions> ResizeNearestNeighborOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ResizeNearestNeighborOptions> ResizeNearestNeighborOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateResizeNearestNeighborOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<ResizeNearestNeighborOptions> CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ResizeNearestNeighborOptions> CreateResizeNearestNeighborOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ResizeNearestNeighborOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ResizeNearestNeighborOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _align_corners = _o->align_corners;
   auto _half_pixel_centers = _o->half_pixel_centers;
   return tflite::CreateResizeNearestNeighborOptions(
@@ -14692,157 +18367,157 @@
       _half_pixel_centers);
 }
 
-inline CallOptionsT *CallOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline CallOptionsT *CallOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<CallOptionsT>(new CallOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void CallOptions::UnPackTo(CallOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void CallOptions::UnPackTo(CallOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = subgraph(); _o->subgraph = _e; }
 }
 
-inline flatbuffers::Offset<CallOptions> CallOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<CallOptions> CallOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateCallOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<CallOptions> CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<CallOptions> CreateCallOptions(::flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CallOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const CallOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _subgraph = _o->subgraph;
   return tflite::CreateCallOptions(
       _fbb,
       _subgraph);
 }
 
-inline PadOptionsT *PadOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline PadOptionsT *PadOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<PadOptionsT>(new PadOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void PadOptions::UnPackTo(PadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void PadOptions::UnPackTo(PadOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<PadOptions> PadOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<PadOptions> PadOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreatePadOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<PadOptions> CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<PadOptions> CreatePadOptions(::flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PadOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const PadOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreatePadOptions(
       _fbb);
 }
 
-inline PadV2OptionsT *PadV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline PadV2OptionsT *PadV2Options::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<PadV2OptionsT>(new PadV2OptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void PadV2Options::UnPackTo(PadV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void PadV2Options::UnPackTo(PadV2OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<PadV2Options> PadV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<PadV2Options> PadV2Options::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreatePadV2Options(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<PadV2Options> CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<PadV2Options> CreatePadV2Options(::flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PadV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const PadV2OptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreatePadV2Options(
       _fbb);
 }
 
-inline ReshapeOptionsT *ReshapeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline ReshapeOptionsT *ReshapeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<ReshapeOptionsT>(new ReshapeOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void ReshapeOptions::UnPackTo(ReshapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void ReshapeOptions::UnPackTo(ReshapeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
-  { auto _e = new_shape(); if (_e) { _o->new_shape.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->new_shape[_i] = _e->Get(_i); } } }
+  { auto _e = new_shape(); if (_e) { _o->new_shape.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->new_shape[_i] = _e->Get(_i); } } else { _o->new_shape.resize(0); } }
 }
 
-inline flatbuffers::Offset<ReshapeOptions> ReshapeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ReshapeOptions> ReshapeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateReshapeOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<ReshapeOptions> CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ReshapeOptions> CreateReshapeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReshapeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ReshapeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _new_shape = _o->new_shape.size() ? _fbb.CreateVector(_o->new_shape) : 0;
   return tflite::CreateReshapeOptions(
       _fbb,
       _new_shape);
 }
 
-inline SpaceToBatchNDOptionsT *SpaceToBatchNDOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline SpaceToBatchNDOptionsT *SpaceToBatchNDOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<SpaceToBatchNDOptionsT>(new SpaceToBatchNDOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void SpaceToBatchNDOptions::UnPackTo(SpaceToBatchNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void SpaceToBatchNDOptions::UnPackTo(SpaceToBatchNDOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<SpaceToBatchNDOptions> SpaceToBatchNDOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SpaceToBatchNDOptions> SpaceToBatchNDOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateSpaceToBatchNDOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<SpaceToBatchNDOptions> CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SpaceToBatchNDOptions> CreateSpaceToBatchNDOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SpaceToBatchNDOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SpaceToBatchNDOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateSpaceToBatchNDOptions(
       _fbb);
 }
 
-inline BatchToSpaceNDOptionsT *BatchToSpaceNDOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline BatchToSpaceNDOptionsT *BatchToSpaceNDOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<BatchToSpaceNDOptionsT>(new BatchToSpaceNDOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void BatchToSpaceNDOptions::UnPackTo(BatchToSpaceNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void BatchToSpaceNDOptions::UnPackTo(BatchToSpaceNDOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<BatchToSpaceNDOptions> BatchToSpaceNDOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<BatchToSpaceNDOptions> BatchToSpaceNDOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateBatchToSpaceNDOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<BatchToSpaceNDOptions> CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<BatchToSpaceNDOptions> CreateBatchToSpaceNDOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BatchToSpaceNDOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BatchToSpaceNDOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateBatchToSpaceNDOptions(
       _fbb);
 }
 
-inline SkipGramOptionsT *SkipGramOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline SkipGramOptionsT *SkipGramOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<SkipGramOptionsT>(new SkipGramOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void SkipGramOptions::UnPackTo(SkipGramOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void SkipGramOptions::UnPackTo(SkipGramOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = ngram_size(); _o->ngram_size = _e; }
@@ -14850,14 +18525,14 @@
   { auto _e = include_all_ngrams(); _o->include_all_ngrams = _e; }
 }
 
-inline flatbuffers::Offset<SkipGramOptions> SkipGramOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SkipGramOptions> SkipGramOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateSkipGramOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<SkipGramOptions> CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SkipGramOptions> CreateSkipGramOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SkipGramOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SkipGramOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _ngram_size = _o->ngram_size;
   auto _max_skip_size = _o->max_skip_size;
   auto _include_all_ngrams = _o->include_all_ngrams;
@@ -14868,79 +18543,79 @@
       _include_all_ngrams);
 }
 
-inline SpaceToDepthOptionsT *SpaceToDepthOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline SpaceToDepthOptionsT *SpaceToDepthOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<SpaceToDepthOptionsT>(new SpaceToDepthOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void SpaceToDepthOptions::UnPackTo(SpaceToDepthOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void SpaceToDepthOptions::UnPackTo(SpaceToDepthOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = block_size(); _o->block_size = _e; }
 }
 
-inline flatbuffers::Offset<SpaceToDepthOptions> SpaceToDepthOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SpaceToDepthOptions> SpaceToDepthOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateSpaceToDepthOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<SpaceToDepthOptions> CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SpaceToDepthOptions> CreateSpaceToDepthOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SpaceToDepthOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SpaceToDepthOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _block_size = _o->block_size;
   return tflite::CreateSpaceToDepthOptions(
       _fbb,
       _block_size);
 }
 
-inline DepthToSpaceOptionsT *DepthToSpaceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline DepthToSpaceOptionsT *DepthToSpaceOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<DepthToSpaceOptionsT>(new DepthToSpaceOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void DepthToSpaceOptions::UnPackTo(DepthToSpaceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void DepthToSpaceOptions::UnPackTo(DepthToSpaceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = block_size(); _o->block_size = _e; }
 }
 
-inline flatbuffers::Offset<DepthToSpaceOptions> DepthToSpaceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<DepthToSpaceOptions> DepthToSpaceOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateDepthToSpaceOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<DepthToSpaceOptions> CreateDepthToSpaceOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<DepthToSpaceOptions> CreateDepthToSpaceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DepthToSpaceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const DepthToSpaceOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _block_size = _o->block_size;
   return tflite::CreateDepthToSpaceOptions(
       _fbb,
       _block_size);
 }
 
-inline SubOptionsT *SubOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline SubOptionsT *SubOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<SubOptionsT>(new SubOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void SubOptions::UnPackTo(SubOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void SubOptions::UnPackTo(SubOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }
   { auto _e = pot_scale_int16(); _o->pot_scale_int16 = _e; }
 }
 
-inline flatbuffers::Offset<SubOptions> SubOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SubOptions> SubOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateSubOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<SubOptions> CreateSubOptions(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SubOptions> CreateSubOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SubOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SubOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _fused_activation_function = _o->fused_activation_function;
   auto _pot_scale_int16 = _o->pot_scale_int16;
   return tflite::CreateSubOptions(
@@ -14949,102 +18624,102 @@
       _pot_scale_int16);
 }
 
-inline DivOptionsT *DivOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline DivOptionsT *DivOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<DivOptionsT>(new DivOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void DivOptions::UnPackTo(DivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void DivOptions::UnPackTo(DivOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }
 }
 
-inline flatbuffers::Offset<DivOptions> DivOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<DivOptions> DivOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateDivOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<DivOptions> CreateDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<DivOptions> CreateDivOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DivOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const DivOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _fused_activation_function = _o->fused_activation_function;
   return tflite::CreateDivOptions(
       _fbb,
       _fused_activation_function);
 }
 
-inline TopKV2OptionsT *TopKV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline TopKV2OptionsT *TopKV2Options::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<TopKV2OptionsT>(new TopKV2OptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void TopKV2Options::UnPackTo(TopKV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void TopKV2Options::UnPackTo(TopKV2OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<TopKV2Options> TopKV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<TopKV2Options> TopKV2Options::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateTopKV2Options(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<TopKV2Options> CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<TopKV2Options> CreateTopKV2Options(::flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TopKV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const TopKV2OptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateTopKV2Options(
       _fbb);
 }
 
-inline EmbeddingLookupSparseOptionsT *EmbeddingLookupSparseOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline EmbeddingLookupSparseOptionsT *EmbeddingLookupSparseOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<EmbeddingLookupSparseOptionsT>(new EmbeddingLookupSparseOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void EmbeddingLookupSparseOptions::UnPackTo(EmbeddingLookupSparseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void EmbeddingLookupSparseOptions::UnPackTo(EmbeddingLookupSparseOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = combiner(); _o->combiner = _e; }
 }
 
-inline flatbuffers::Offset<EmbeddingLookupSparseOptions> EmbeddingLookupSparseOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<EmbeddingLookupSparseOptions> EmbeddingLookupSparseOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateEmbeddingLookupSparseOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<EmbeddingLookupSparseOptions> CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<EmbeddingLookupSparseOptions> CreateEmbeddingLookupSparseOptions(::flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EmbeddingLookupSparseOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const EmbeddingLookupSparseOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _combiner = _o->combiner;
   return tflite::CreateEmbeddingLookupSparseOptions(
       _fbb,
       _combiner);
 }
 
-inline GatherOptionsT *GatherOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline GatherOptionsT *GatherOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<GatherOptionsT>(new GatherOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void GatherOptions::UnPackTo(GatherOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void GatherOptions::UnPackTo(GatherOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = axis(); _o->axis = _e; }
   { auto _e = batch_dims(); _o->batch_dims = _e; }
 }
 
-inline flatbuffers::Offset<GatherOptions> GatherOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<GatherOptions> GatherOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateGatherOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<GatherOptions> CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<GatherOptions> CreateGatherOptions(::flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GatherOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const GatherOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _axis = _o->axis;
   auto _batch_dims = _o->batch_dims;
   return tflite::CreateGatherOptions(
@@ -15053,186 +18728,186 @@
       _batch_dims);
 }
 
-inline TransposeOptionsT *TransposeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline TransposeOptionsT *TransposeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<TransposeOptionsT>(new TransposeOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void TransposeOptions::UnPackTo(TransposeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void TransposeOptions::UnPackTo(TransposeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<TransposeOptions> TransposeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<TransposeOptions> TransposeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateTransposeOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<TransposeOptions> CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<TransposeOptions> CreateTransposeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TransposeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const TransposeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateTransposeOptions(
       _fbb);
 }
 
-inline ExpOptionsT *ExpOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline ExpOptionsT *ExpOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<ExpOptionsT>(new ExpOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void ExpOptions::UnPackTo(ExpOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void ExpOptions::UnPackTo(ExpOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<ExpOptions> ExpOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ExpOptions> ExpOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateExpOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<ExpOptions> CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ExpOptions> CreateExpOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ExpOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ExpOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateExpOptions(
       _fbb);
 }
 
-inline CosOptionsT *CosOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline CosOptionsT *CosOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<CosOptionsT>(new CosOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void CosOptions::UnPackTo(CosOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void CosOptions::UnPackTo(CosOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<CosOptions> CosOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<CosOptions> CosOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateCosOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<CosOptions> CreateCosOptions(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<CosOptions> CreateCosOptions(::flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CosOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const CosOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateCosOptions(
       _fbb);
 }
 
-inline ReducerOptionsT *ReducerOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline ReducerOptionsT *ReducerOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<ReducerOptionsT>(new ReducerOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void ReducerOptions::UnPackTo(ReducerOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void ReducerOptions::UnPackTo(ReducerOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = keep_dims(); _o->keep_dims = _e; }
 }
 
-inline flatbuffers::Offset<ReducerOptions> ReducerOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ReducerOptions> ReducerOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateReducerOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<ReducerOptions> CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ReducerOptions> CreateReducerOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReducerOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ReducerOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _keep_dims = _o->keep_dims;
   return tflite::CreateReducerOptions(
       _fbb,
       _keep_dims);
 }
 
-inline SqueezeOptionsT *SqueezeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline SqueezeOptionsT *SqueezeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<SqueezeOptionsT>(new SqueezeOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void SqueezeOptions::UnPackTo(SqueezeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void SqueezeOptions::UnPackTo(SqueezeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
-  { auto _e = squeeze_dims(); if (_e) { _o->squeeze_dims.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->squeeze_dims[_i] = _e->Get(_i); } } }
+  { auto _e = squeeze_dims(); if (_e) { _o->squeeze_dims.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->squeeze_dims[_i] = _e->Get(_i); } } else { _o->squeeze_dims.resize(0); } }
 }
 
-inline flatbuffers::Offset<SqueezeOptions> SqueezeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SqueezeOptions> SqueezeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateSqueezeOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<SqueezeOptions> CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SqueezeOptions> CreateSqueezeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SqueezeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SqueezeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _squeeze_dims = _o->squeeze_dims.size() ? _fbb.CreateVector(_o->squeeze_dims) : 0;
   return tflite::CreateSqueezeOptions(
       _fbb,
       _squeeze_dims);
 }
 
-inline SplitOptionsT *SplitOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline SplitOptionsT *SplitOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<SplitOptionsT>(new SplitOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void SplitOptions::UnPackTo(SplitOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void SplitOptions::UnPackTo(SplitOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = num_splits(); _o->num_splits = _e; }
 }
 
-inline flatbuffers::Offset<SplitOptions> SplitOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SplitOptions> SplitOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateSplitOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<SplitOptions> CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SplitOptions> CreateSplitOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SplitOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SplitOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _num_splits = _o->num_splits;
   return tflite::CreateSplitOptions(
       _fbb,
       _num_splits);
 }
 
-inline SplitVOptionsT *SplitVOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline SplitVOptionsT *SplitVOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<SplitVOptionsT>(new SplitVOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void SplitVOptions::UnPackTo(SplitVOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void SplitVOptions::UnPackTo(SplitVOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = num_splits(); _o->num_splits = _e; }
 }
 
-inline flatbuffers::Offset<SplitVOptions> SplitVOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SplitVOptions> SplitVOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateSplitVOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<SplitVOptions> CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SplitVOptions> CreateSplitVOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SplitVOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SplitVOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _num_splits = _o->num_splits;
   return tflite::CreateSplitVOptions(
       _fbb,
       _num_splits);
 }
 
-inline StridedSliceOptionsT *StridedSliceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline StridedSliceOptionsT *StridedSliceOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<StridedSliceOptionsT>(new StridedSliceOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void StridedSliceOptions::UnPackTo(StridedSliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void StridedSliceOptions::UnPackTo(StridedSliceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = begin_mask(); _o->begin_mask = _e; }
@@ -15243,14 +18918,14 @@
   { auto _e = offset(); _o->offset = _e; }
 }
 
-inline flatbuffers::Offset<StridedSliceOptions> StridedSliceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<StridedSliceOptions> StridedSliceOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateStridedSliceOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<StridedSliceOptions> CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<StridedSliceOptions> CreateStridedSliceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const StridedSliceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StridedSliceOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _begin_mask = _o->begin_mask;
   auto _end_mask = _o->end_mask;
   auto _ellipsis_mask = _o->ellipsis_mask;
@@ -15267,50 +18942,50 @@
       _offset);
 }
 
-inline LogSoftmaxOptionsT *LogSoftmaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline LogSoftmaxOptionsT *LogSoftmaxOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<LogSoftmaxOptionsT>(new LogSoftmaxOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void LogSoftmaxOptions::UnPackTo(LogSoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void LogSoftmaxOptions::UnPackTo(LogSoftmaxOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<LogSoftmaxOptions> LogSoftmaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<LogSoftmaxOptions> LogSoftmaxOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateLogSoftmaxOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<LogSoftmaxOptions> CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<LogSoftmaxOptions> CreateLogSoftmaxOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogSoftmaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const LogSoftmaxOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateLogSoftmaxOptions(
       _fbb);
 }
 
-inline CastOptionsT *CastOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline CastOptionsT *CastOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<CastOptionsT>(new CastOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void CastOptions::UnPackTo(CastOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void CastOptions::UnPackTo(CastOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = in_data_type(); _o->in_data_type = _e; }
   { auto _e = out_data_type(); _o->out_data_type = _e; }
 }
 
-inline flatbuffers::Offset<CastOptions> CastOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<CastOptions> CastOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateCastOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<CastOptions> CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<CastOptions> CreateCastOptions(::flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CastOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const CastOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _in_data_type = _o->in_data_type;
   auto _out_data_type = _o->out_data_type;
   return tflite::CreateCastOptions(
@@ -15319,497 +18994,500 @@
       _out_data_type);
 }
 
-inline DequantizeOptionsT *DequantizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline DequantizeOptionsT *DequantizeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<DequantizeOptionsT>(new DequantizeOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void DequantizeOptions::UnPackTo(DequantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void DequantizeOptions::UnPackTo(DequantizeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<DequantizeOptions> DequantizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<DequantizeOptions> DequantizeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateDequantizeOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<DequantizeOptions> CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<DequantizeOptions> CreateDequantizeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DequantizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const DequantizeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateDequantizeOptions(
       _fbb);
 }
 
-inline MaximumMinimumOptionsT *MaximumMinimumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline MaximumMinimumOptionsT *MaximumMinimumOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<MaximumMinimumOptionsT>(new MaximumMinimumOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void MaximumMinimumOptions::UnPackTo(MaximumMinimumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void MaximumMinimumOptions::UnPackTo(MaximumMinimumOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<MaximumMinimumOptions> MaximumMinimumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<MaximumMinimumOptions> MaximumMinimumOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateMaximumMinimumOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<MaximumMinimumOptions> CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<MaximumMinimumOptions> CreateMaximumMinimumOptions(::flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MaximumMinimumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const MaximumMinimumOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateMaximumMinimumOptions(
       _fbb);
 }
 
-inline TileOptionsT *TileOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline TileOptionsT *TileOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<TileOptionsT>(new TileOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void TileOptions::UnPackTo(TileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void TileOptions::UnPackTo(TileOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<TileOptions> TileOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<TileOptions> TileOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateTileOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<TileOptions> CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<TileOptions> CreateTileOptions(::flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TileOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const TileOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateTileOptions(
       _fbb);
 }
 
-inline ArgMaxOptionsT *ArgMaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline ArgMaxOptionsT *ArgMaxOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<ArgMaxOptionsT>(new ArgMaxOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void ArgMaxOptions::UnPackTo(ArgMaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void ArgMaxOptions::UnPackTo(ArgMaxOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = output_type(); _o->output_type = _e; }
 }
 
-inline flatbuffers::Offset<ArgMaxOptions> ArgMaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ArgMaxOptions> ArgMaxOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateArgMaxOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<ArgMaxOptions> CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ArgMaxOptions> CreateArgMaxOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ArgMaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ArgMaxOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _output_type = _o->output_type;
   return tflite::CreateArgMaxOptions(
       _fbb,
       _output_type);
 }
 
-inline ArgMinOptionsT *ArgMinOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline ArgMinOptionsT *ArgMinOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<ArgMinOptionsT>(new ArgMinOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void ArgMinOptions::UnPackTo(ArgMinOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void ArgMinOptions::UnPackTo(ArgMinOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = output_type(); _o->output_type = _e; }
 }
 
-inline flatbuffers::Offset<ArgMinOptions> ArgMinOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ArgMinOptions> ArgMinOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateArgMinOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<ArgMinOptions> CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ArgMinOptions> CreateArgMinOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ArgMinOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ArgMinOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _output_type = _o->output_type;
   return tflite::CreateArgMinOptions(
       _fbb,
       _output_type);
 }
 
-inline GreaterOptionsT *GreaterOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline GreaterOptionsT *GreaterOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<GreaterOptionsT>(new GreaterOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void GreaterOptions::UnPackTo(GreaterOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void GreaterOptions::UnPackTo(GreaterOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<GreaterOptions> GreaterOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<GreaterOptions> GreaterOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateGreaterOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<GreaterOptions> CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<GreaterOptions> CreateGreaterOptions(::flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GreaterOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const GreaterOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateGreaterOptions(
       _fbb);
 }
 
-inline GreaterEqualOptionsT *GreaterEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline GreaterEqualOptionsT *GreaterEqualOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<GreaterEqualOptionsT>(new GreaterEqualOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void GreaterEqualOptions::UnPackTo(GreaterEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void GreaterEqualOptions::UnPackTo(GreaterEqualOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<GreaterEqualOptions> GreaterEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<GreaterEqualOptions> GreaterEqualOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateGreaterEqualOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<GreaterEqualOptions> CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<GreaterEqualOptions> CreateGreaterEqualOptions(::flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GreaterEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const GreaterEqualOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateGreaterEqualOptions(
       _fbb);
 }
 
-inline LessOptionsT *LessOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline LessOptionsT *LessOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<LessOptionsT>(new LessOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void LessOptions::UnPackTo(LessOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void LessOptions::UnPackTo(LessOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<LessOptions> LessOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<LessOptions> LessOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateLessOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<LessOptions> CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<LessOptions> CreateLessOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LessOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const LessOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateLessOptions(
       _fbb);
 }
 
-inline LessEqualOptionsT *LessEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline LessEqualOptionsT *LessEqualOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<LessEqualOptionsT>(new LessEqualOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void LessEqualOptions::UnPackTo(LessEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void LessEqualOptions::UnPackTo(LessEqualOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<LessEqualOptions> LessEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<LessEqualOptions> LessEqualOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateLessEqualOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<LessEqualOptions> CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<LessEqualOptions> CreateLessEqualOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LessEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const LessEqualOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateLessEqualOptions(
       _fbb);
 }
 
-inline NegOptionsT *NegOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline NegOptionsT *NegOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<NegOptionsT>(new NegOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void NegOptions::UnPackTo(NegOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void NegOptions::UnPackTo(NegOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<NegOptions> NegOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<NegOptions> NegOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateNegOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<NegOptions> CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<NegOptions> CreateNegOptions(::flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NegOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const NegOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateNegOptions(
       _fbb);
 }
 
-inline SelectOptionsT *SelectOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline SelectOptionsT *SelectOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<SelectOptionsT>(new SelectOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void SelectOptions::UnPackTo(SelectOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void SelectOptions::UnPackTo(SelectOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<SelectOptions> SelectOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SelectOptions> SelectOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateSelectOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<SelectOptions> CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SelectOptions> CreateSelectOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SelectOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SelectOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateSelectOptions(
       _fbb);
 }
 
-inline SliceOptionsT *SliceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline SliceOptionsT *SliceOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<SliceOptionsT>(new SliceOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void SliceOptions::UnPackTo(SliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void SliceOptions::UnPackTo(SliceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<SliceOptions> SliceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SliceOptions> SliceOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateSliceOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<SliceOptions> CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SliceOptions> CreateSliceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SliceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SliceOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateSliceOptions(
       _fbb);
 }
 
-inline TransposeConvOptionsT *TransposeConvOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline TransposeConvOptionsT *TransposeConvOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<TransposeConvOptionsT>(new TransposeConvOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void TransposeConvOptions::UnPackTo(TransposeConvOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void TransposeConvOptions::UnPackTo(TransposeConvOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = padding(); _o->padding = _e; }
   { auto _e = stride_w(); _o->stride_w = _e; }
   { auto _e = stride_h(); _o->stride_h = _e; }
   { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }
+  { auto _e = quantized_bias_type(); _o->quantized_bias_type = _e; }
 }
 
-inline flatbuffers::Offset<TransposeConvOptions> TransposeConvOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<TransposeConvOptions> TransposeConvOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateTransposeConvOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<TransposeConvOptions> CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<TransposeConvOptions> CreateTransposeConvOptions(::flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TransposeConvOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const TransposeConvOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _padding = _o->padding;
   auto _stride_w = _o->stride_w;
   auto _stride_h = _o->stride_h;
   auto _fused_activation_function = _o->fused_activation_function;
+  auto _quantized_bias_type = _o->quantized_bias_type;
   return tflite::CreateTransposeConvOptions(
       _fbb,
       _padding,
       _stride_w,
       _stride_h,
-      _fused_activation_function);
+      _fused_activation_function,
+      _quantized_bias_type);
 }
 
-inline ExpandDimsOptionsT *ExpandDimsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline ExpandDimsOptionsT *ExpandDimsOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<ExpandDimsOptionsT>(new ExpandDimsOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void ExpandDimsOptions::UnPackTo(ExpandDimsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void ExpandDimsOptions::UnPackTo(ExpandDimsOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<ExpandDimsOptions> ExpandDimsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ExpandDimsOptions> ExpandDimsOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateExpandDimsOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<ExpandDimsOptions> CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ExpandDimsOptions> CreateExpandDimsOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ExpandDimsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ExpandDimsOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateExpandDimsOptions(
       _fbb);
 }
 
-inline SparseToDenseOptionsT *SparseToDenseOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline SparseToDenseOptionsT *SparseToDenseOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<SparseToDenseOptionsT>(new SparseToDenseOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void SparseToDenseOptions::UnPackTo(SparseToDenseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void SparseToDenseOptions::UnPackTo(SparseToDenseOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = validate_indices(); _o->validate_indices = _e; }
 }
 
-inline flatbuffers::Offset<SparseToDenseOptions> SparseToDenseOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SparseToDenseOptions> SparseToDenseOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateSparseToDenseOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<SparseToDenseOptions> CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SparseToDenseOptions> CreateSparseToDenseOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SparseToDenseOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SparseToDenseOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _validate_indices = _o->validate_indices;
   return tflite::CreateSparseToDenseOptions(
       _fbb,
       _validate_indices);
 }
 
-inline EqualOptionsT *EqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline EqualOptionsT *EqualOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<EqualOptionsT>(new EqualOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void EqualOptions::UnPackTo(EqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void EqualOptions::UnPackTo(EqualOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<EqualOptions> EqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<EqualOptions> EqualOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateEqualOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<EqualOptions> CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<EqualOptions> CreateEqualOptions(::flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const EqualOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateEqualOptions(
       _fbb);
 }
 
-inline NotEqualOptionsT *NotEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline NotEqualOptionsT *NotEqualOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<NotEqualOptionsT>(new NotEqualOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void NotEqualOptions::UnPackTo(NotEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void NotEqualOptions::UnPackTo(NotEqualOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<NotEqualOptions> NotEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<NotEqualOptions> NotEqualOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateNotEqualOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<NotEqualOptions> CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<NotEqualOptions> CreateNotEqualOptions(::flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NotEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const NotEqualOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateNotEqualOptions(
       _fbb);
 }
 
-inline ShapeOptionsT *ShapeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline ShapeOptionsT *ShapeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<ShapeOptionsT>(new ShapeOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void ShapeOptions::UnPackTo(ShapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void ShapeOptions::UnPackTo(ShapeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = out_type(); _o->out_type = _e; }
 }
 
-inline flatbuffers::Offset<ShapeOptions> ShapeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ShapeOptions> ShapeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateShapeOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<ShapeOptions> CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ShapeOptions> CreateShapeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ShapeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ShapeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _out_type = _o->out_type;
   return tflite::CreateShapeOptions(
       _fbb,
       _out_type);
 }
 
-inline RankOptionsT *RankOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline RankOptionsT *RankOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<RankOptionsT>(new RankOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void RankOptions::UnPackTo(RankOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void RankOptions::UnPackTo(RankOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<RankOptions> RankOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<RankOptions> RankOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateRankOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<RankOptions> CreateRankOptions(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<RankOptions> CreateRankOptions(::flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RankOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const RankOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateRankOptions(
       _fbb);
 }
 
-inline PowOptionsT *PowOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline PowOptionsT *PowOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<PowOptionsT>(new PowOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void PowOptions::UnPackTo(PowOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void PowOptions::UnPackTo(PowOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<PowOptions> PowOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<PowOptions> PowOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreatePowOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<PowOptions> CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<PowOptions> CreatePowOptions(::flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PowOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const PowOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreatePowOptions(
       _fbb);
 }
 
-inline FakeQuantOptionsT *FakeQuantOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline FakeQuantOptionsT *FakeQuantOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<FakeQuantOptionsT>(new FakeQuantOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void FakeQuantOptions::UnPackTo(FakeQuantOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void FakeQuantOptions::UnPackTo(FakeQuantOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = min(); _o->min = _e; }
@@ -15818,14 +19496,14 @@
   { auto _e = narrow_range(); _o->narrow_range = _e; }
 }
 
-inline flatbuffers::Offset<FakeQuantOptions> FakeQuantOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<FakeQuantOptions> FakeQuantOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateFakeQuantOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<FakeQuantOptions> CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<FakeQuantOptions> CreateFakeQuantOptions(::flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FakeQuantOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const FakeQuantOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _min = _o->min;
   auto _max = _o->max;
   auto _num_bits = _o->num_bits;
@@ -15838,27 +19516,27 @@
       _narrow_range);
 }
 
-inline PackOptionsT *PackOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline PackOptionsT *PackOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<PackOptionsT>(new PackOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void PackOptions::UnPackTo(PackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void PackOptions::UnPackTo(PackOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = values_count(); _o->values_count = _e; }
   { auto _e = axis(); _o->axis = _e; }
 }
 
-inline flatbuffers::Offset<PackOptions> PackOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<PackOptions> PackOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreatePackOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<PackOptions> CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<PackOptions> CreatePackOptions(::flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PackOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const PackOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _values_count = _o->values_count;
   auto _axis = _o->axis;
   return tflite::CreatePackOptions(
@@ -15867,168 +19545,168 @@
       _axis);
 }
 
-inline LogicalOrOptionsT *LogicalOrOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline LogicalOrOptionsT *LogicalOrOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<LogicalOrOptionsT>(new LogicalOrOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void LogicalOrOptions::UnPackTo(LogicalOrOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void LogicalOrOptions::UnPackTo(LogicalOrOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<LogicalOrOptions> LogicalOrOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<LogicalOrOptions> LogicalOrOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateLogicalOrOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<LogicalOrOptions> CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<LogicalOrOptions> CreateLogicalOrOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalOrOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const LogicalOrOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateLogicalOrOptions(
       _fbb);
 }
 
-inline OneHotOptionsT *OneHotOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline OneHotOptionsT *OneHotOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<OneHotOptionsT>(new OneHotOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void OneHotOptions::UnPackTo(OneHotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void OneHotOptions::UnPackTo(OneHotOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = axis(); _o->axis = _e; }
 }
 
-inline flatbuffers::Offset<OneHotOptions> OneHotOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<OneHotOptions> OneHotOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateOneHotOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<OneHotOptions> CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<OneHotOptions> CreateOneHotOptions(::flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OneHotOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const OneHotOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _axis = _o->axis;
   return tflite::CreateOneHotOptions(
       _fbb,
       _axis);
 }
 
-inline AbsOptionsT *AbsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline AbsOptionsT *AbsOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<AbsOptionsT>(new AbsOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void AbsOptions::UnPackTo(AbsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void AbsOptions::UnPackTo(AbsOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<AbsOptions> AbsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<AbsOptions> AbsOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateAbsOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<AbsOptions> CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<AbsOptions> CreateAbsOptions(::flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AbsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const AbsOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateAbsOptions(
       _fbb);
 }
 
-inline HardSwishOptionsT *HardSwishOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline HardSwishOptionsT *HardSwishOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<HardSwishOptionsT>(new HardSwishOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void HardSwishOptions::UnPackTo(HardSwishOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void HardSwishOptions::UnPackTo(HardSwishOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<HardSwishOptions> HardSwishOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<HardSwishOptions> HardSwishOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateHardSwishOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<HardSwishOptions> CreateHardSwishOptions(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<HardSwishOptions> CreateHardSwishOptions(::flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HardSwishOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const HardSwishOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateHardSwishOptions(
       _fbb);
 }
 
-inline LogicalAndOptionsT *LogicalAndOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline LogicalAndOptionsT *LogicalAndOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<LogicalAndOptionsT>(new LogicalAndOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void LogicalAndOptions::UnPackTo(LogicalAndOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void LogicalAndOptions::UnPackTo(LogicalAndOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<LogicalAndOptions> LogicalAndOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<LogicalAndOptions> LogicalAndOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateLogicalAndOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<LogicalAndOptions> CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<LogicalAndOptions> CreateLogicalAndOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalAndOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const LogicalAndOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateLogicalAndOptions(
       _fbb);
 }
 
-inline LogicalNotOptionsT *LogicalNotOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline LogicalNotOptionsT *LogicalNotOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<LogicalNotOptionsT>(new LogicalNotOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void LogicalNotOptions::UnPackTo(LogicalNotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void LogicalNotOptions::UnPackTo(LogicalNotOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<LogicalNotOptions> LogicalNotOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<LogicalNotOptions> LogicalNotOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateLogicalNotOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<LogicalNotOptions> CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<LogicalNotOptions> CreateLogicalNotOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalNotOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const LogicalNotOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateLogicalNotOptions(
       _fbb);
 }
 
-inline UnpackOptionsT *UnpackOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline UnpackOptionsT *UnpackOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<UnpackOptionsT>(new UnpackOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void UnpackOptions::UnPackTo(UnpackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void UnpackOptions::UnPackTo(UnpackOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = num(); _o->num = _e; }
   { auto _e = axis(); _o->axis = _e; }
 }
 
-inline flatbuffers::Offset<UnpackOptions> UnpackOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<UnpackOptions> UnpackOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateUnpackOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<UnpackOptions> CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<UnpackOptions> CreateUnpackOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnpackOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const UnpackOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _num = _o->num;
   auto _axis = _o->axis;
   return tflite::CreateUnpackOptions(
@@ -16037,358 +19715,358 @@
       _axis);
 }
 
-inline FloorDivOptionsT *FloorDivOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline FloorDivOptionsT *FloorDivOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<FloorDivOptionsT>(new FloorDivOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void FloorDivOptions::UnPackTo(FloorDivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void FloorDivOptions::UnPackTo(FloorDivOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<FloorDivOptions> FloorDivOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<FloorDivOptions> FloorDivOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateFloorDivOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<FloorDivOptions> CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<FloorDivOptions> CreateFloorDivOptions(::flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FloorDivOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const FloorDivOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateFloorDivOptions(
       _fbb);
 }
 
-inline SquareOptionsT *SquareOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline SquareOptionsT *SquareOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<SquareOptionsT>(new SquareOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void SquareOptions::UnPackTo(SquareOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void SquareOptions::UnPackTo(SquareOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<SquareOptions> SquareOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SquareOptions> SquareOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateSquareOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<SquareOptions> CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SquareOptions> CreateSquareOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SquareOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SquareOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateSquareOptions(
       _fbb);
 }
 
-inline ZerosLikeOptionsT *ZerosLikeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline ZerosLikeOptionsT *ZerosLikeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<ZerosLikeOptionsT>(new ZerosLikeOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void ZerosLikeOptions::UnPackTo(ZerosLikeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void ZerosLikeOptions::UnPackTo(ZerosLikeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<ZerosLikeOptions> ZerosLikeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ZerosLikeOptions> ZerosLikeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateZerosLikeOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<ZerosLikeOptions> CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ZerosLikeOptions> CreateZerosLikeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ZerosLikeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ZerosLikeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateZerosLikeOptions(
       _fbb);
 }
 
-inline FillOptionsT *FillOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline FillOptionsT *FillOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<FillOptionsT>(new FillOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void FillOptions::UnPackTo(FillOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void FillOptions::UnPackTo(FillOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<FillOptions> FillOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<FillOptions> FillOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateFillOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<FillOptions> CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<FillOptions> CreateFillOptions(::flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FillOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const FillOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateFillOptions(
       _fbb);
 }
 
-inline FloorModOptionsT *FloorModOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline FloorModOptionsT *FloorModOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<FloorModOptionsT>(new FloorModOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void FloorModOptions::UnPackTo(FloorModOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void FloorModOptions::UnPackTo(FloorModOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<FloorModOptions> FloorModOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<FloorModOptions> FloorModOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateFloorModOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<FloorModOptions> CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<FloorModOptions> CreateFloorModOptions(::flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FloorModOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const FloorModOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateFloorModOptions(
       _fbb);
 }
 
-inline RangeOptionsT *RangeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline RangeOptionsT *RangeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<RangeOptionsT>(new RangeOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void RangeOptions::UnPackTo(RangeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void RangeOptions::UnPackTo(RangeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<RangeOptions> RangeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<RangeOptions> RangeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateRangeOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<RangeOptions> CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<RangeOptions> CreateRangeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RangeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const RangeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateRangeOptions(
       _fbb);
 }
 
-inline LeakyReluOptionsT *LeakyReluOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline LeakyReluOptionsT *LeakyReluOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<LeakyReluOptionsT>(new LeakyReluOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void LeakyReluOptions::UnPackTo(LeakyReluOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void LeakyReluOptions::UnPackTo(LeakyReluOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = alpha(); _o->alpha = _e; }
 }
 
-inline flatbuffers::Offset<LeakyReluOptions> LeakyReluOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<LeakyReluOptions> LeakyReluOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateLeakyReluOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<LeakyReluOptions> CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<LeakyReluOptions> CreateLeakyReluOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LeakyReluOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const LeakyReluOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _alpha = _o->alpha;
   return tflite::CreateLeakyReluOptions(
       _fbb,
       _alpha);
 }
 
-inline SquaredDifferenceOptionsT *SquaredDifferenceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline SquaredDifferenceOptionsT *SquaredDifferenceOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<SquaredDifferenceOptionsT>(new SquaredDifferenceOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void SquaredDifferenceOptions::UnPackTo(SquaredDifferenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void SquaredDifferenceOptions::UnPackTo(SquaredDifferenceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<SquaredDifferenceOptions> SquaredDifferenceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SquaredDifferenceOptions> SquaredDifferenceOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateSquaredDifferenceOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<SquaredDifferenceOptions> CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SquaredDifferenceOptions> CreateSquaredDifferenceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SquaredDifferenceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SquaredDifferenceOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateSquaredDifferenceOptions(
       _fbb);
 }
 
-inline MirrorPadOptionsT *MirrorPadOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline MirrorPadOptionsT *MirrorPadOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<MirrorPadOptionsT>(new MirrorPadOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void MirrorPadOptions::UnPackTo(MirrorPadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void MirrorPadOptions::UnPackTo(MirrorPadOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = mode(); _o->mode = _e; }
 }
 
-inline flatbuffers::Offset<MirrorPadOptions> MirrorPadOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<MirrorPadOptions> MirrorPadOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateMirrorPadOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<MirrorPadOptions> CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<MirrorPadOptions> CreateMirrorPadOptions(::flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MirrorPadOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const MirrorPadOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _mode = _o->mode;
   return tflite::CreateMirrorPadOptions(
       _fbb,
       _mode);
 }
 
-inline UniqueOptionsT *UniqueOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline UniqueOptionsT *UniqueOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<UniqueOptionsT>(new UniqueOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void UniqueOptions::UnPackTo(UniqueOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void UniqueOptions::UnPackTo(UniqueOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = idx_out_type(); _o->idx_out_type = _e; }
 }
 
-inline flatbuffers::Offset<UniqueOptions> UniqueOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<UniqueOptions> UniqueOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateUniqueOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<UniqueOptions> CreateUniqueOptions(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<UniqueOptions> CreateUniqueOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UniqueOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const UniqueOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _idx_out_type = _o->idx_out_type;
   return tflite::CreateUniqueOptions(
       _fbb,
       _idx_out_type);
 }
 
-inline ReverseV2OptionsT *ReverseV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline ReverseV2OptionsT *ReverseV2Options::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<ReverseV2OptionsT>(new ReverseV2OptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void ReverseV2Options::UnPackTo(ReverseV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void ReverseV2Options::UnPackTo(ReverseV2OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<ReverseV2Options> ReverseV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ReverseV2Options> ReverseV2Options::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateReverseV2Options(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<ReverseV2Options> CreateReverseV2Options(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ReverseV2Options> CreateReverseV2Options(::flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReverseV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ReverseV2OptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateReverseV2Options(
       _fbb);
 }
 
-inline AddNOptionsT *AddNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline AddNOptionsT *AddNOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<AddNOptionsT>(new AddNOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void AddNOptions::UnPackTo(AddNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void AddNOptions::UnPackTo(AddNOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<AddNOptions> AddNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<AddNOptions> AddNOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateAddNOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<AddNOptions> CreateAddNOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<AddNOptions> CreateAddNOptions(::flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AddNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const AddNOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateAddNOptions(
       _fbb);
 }
 
-inline GatherNdOptionsT *GatherNdOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline GatherNdOptionsT *GatherNdOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<GatherNdOptionsT>(new GatherNdOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void GatherNdOptions::UnPackTo(GatherNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void GatherNdOptions::UnPackTo(GatherNdOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<GatherNdOptions> GatherNdOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<GatherNdOptions> GatherNdOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateGatherNdOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<GatherNdOptions> CreateGatherNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<GatherNdOptions> CreateGatherNdOptions(::flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GatherNdOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const GatherNdOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateGatherNdOptions(
       _fbb);
 }
 
-inline WhereOptionsT *WhereOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline WhereOptionsT *WhereOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<WhereOptionsT>(new WhereOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void WhereOptions::UnPackTo(WhereOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void WhereOptions::UnPackTo(WhereOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<WhereOptions> WhereOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<WhereOptions> WhereOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateWhereOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<WhereOptions> CreateWhereOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<WhereOptions> CreateWhereOptions(::flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const WhereOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const WhereOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateWhereOptions(
       _fbb);
 }
 
-inline ReverseSequenceOptionsT *ReverseSequenceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline ReverseSequenceOptionsT *ReverseSequenceOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<ReverseSequenceOptionsT>(new ReverseSequenceOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void ReverseSequenceOptions::UnPackTo(ReverseSequenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void ReverseSequenceOptions::UnPackTo(ReverseSequenceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = seq_dim(); _o->seq_dim = _e; }
   { auto _e = batch_dim(); _o->batch_dim = _e; }
 }
 
-inline flatbuffers::Offset<ReverseSequenceOptions> ReverseSequenceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ReverseSequenceOptions> ReverseSequenceOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateReverseSequenceOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<ReverseSequenceOptions> CreateReverseSequenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ReverseSequenceOptions> CreateReverseSequenceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReverseSequenceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ReverseSequenceOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _seq_dim = _o->seq_dim;
   auto _batch_dim = _o->batch_dim;
   return tflite::CreateReverseSequenceOptions(
@@ -16397,96 +20075,96 @@
       _batch_dim);
 }
 
-inline MatrixDiagOptionsT *MatrixDiagOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline MatrixDiagOptionsT *MatrixDiagOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<MatrixDiagOptionsT>(new MatrixDiagOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void MatrixDiagOptions::UnPackTo(MatrixDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void MatrixDiagOptions::UnPackTo(MatrixDiagOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<MatrixDiagOptions> MatrixDiagOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<MatrixDiagOptions> MatrixDiagOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateMatrixDiagOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<MatrixDiagOptions> CreateMatrixDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<MatrixDiagOptions> CreateMatrixDiagOptions(::flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MatrixDiagOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const MatrixDiagOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateMatrixDiagOptions(
       _fbb);
 }
 
-inline QuantizeOptionsT *QuantizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline QuantizeOptionsT *QuantizeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<QuantizeOptionsT>(new QuantizeOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void QuantizeOptions::UnPackTo(QuantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void QuantizeOptions::UnPackTo(QuantizeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<QuantizeOptions> QuantizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<QuantizeOptions> QuantizeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateQuantizeOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<QuantizeOptions> CreateQuantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<QuantizeOptions> CreateQuantizeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const QuantizeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateQuantizeOptions(
       _fbb);
 }
 
-inline MatrixSetDiagOptionsT *MatrixSetDiagOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline MatrixSetDiagOptionsT *MatrixSetDiagOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<MatrixSetDiagOptionsT>(new MatrixSetDiagOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void MatrixSetDiagOptions::UnPackTo(MatrixSetDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void MatrixSetDiagOptions::UnPackTo(MatrixSetDiagOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<MatrixSetDiagOptions> MatrixSetDiagOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<MatrixSetDiagOptions> MatrixSetDiagOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateMatrixSetDiagOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<MatrixSetDiagOptions> CreateMatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<MatrixSetDiagOptions> CreateMatrixSetDiagOptions(::flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MatrixSetDiagOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const MatrixSetDiagOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateMatrixSetDiagOptions(
       _fbb);
 }
 
-inline IfOptionsT *IfOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline IfOptionsT *IfOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<IfOptionsT>(new IfOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void IfOptions::UnPackTo(IfOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void IfOptions::UnPackTo(IfOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = then_subgraph_index(); _o->then_subgraph_index = _e; }
   { auto _e = else_subgraph_index(); _o->else_subgraph_index = _e; }
 }
 
-inline flatbuffers::Offset<IfOptions> IfOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<IfOptions> IfOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateIfOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<IfOptions> CreateIfOptions(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<IfOptions> CreateIfOptions(::flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const IfOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const IfOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _then_subgraph_index = _o->then_subgraph_index;
   auto _else_subgraph_index = _o->else_subgraph_index;
   return tflite::CreateIfOptions(
@@ -16495,53 +20173,53 @@
       _else_subgraph_index);
 }
 
-inline CallOnceOptionsT *CallOnceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline CallOnceOptionsT *CallOnceOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<CallOnceOptionsT>(new CallOnceOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void CallOnceOptions::UnPackTo(CallOnceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void CallOnceOptions::UnPackTo(CallOnceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = init_subgraph_index(); _o->init_subgraph_index = _e; }
 }
 
-inline flatbuffers::Offset<CallOnceOptions> CallOnceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<CallOnceOptions> CallOnceOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateCallOnceOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<CallOnceOptions> CreateCallOnceOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<CallOnceOptions> CreateCallOnceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CallOnceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const CallOnceOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _init_subgraph_index = _o->init_subgraph_index;
   return tflite::CreateCallOnceOptions(
       _fbb,
       _init_subgraph_index);
 }
 
-inline WhileOptionsT *WhileOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline WhileOptionsT *WhileOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<WhileOptionsT>(new WhileOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void WhileOptions::UnPackTo(WhileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void WhileOptions::UnPackTo(WhileOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = cond_subgraph_index(); _o->cond_subgraph_index = _e; }
   { auto _e = body_subgraph_index(); _o->body_subgraph_index = _e; }
 }
 
-inline flatbuffers::Offset<WhileOptions> WhileOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<WhileOptions> WhileOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateWhileOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<WhileOptions> CreateWhileOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<WhileOptions> CreateWhileOptions(::flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const WhileOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const WhileOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _cond_subgraph_index = _o->cond_subgraph_index;
   auto _body_subgraph_index = _o->body_subgraph_index;
   return tflite::CreateWhileOptions(
@@ -16550,151 +20228,151 @@
       _body_subgraph_index);
 }
 
-inline NonMaxSuppressionV4OptionsT *NonMaxSuppressionV4Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline NonMaxSuppressionV4OptionsT *NonMaxSuppressionV4Options::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<NonMaxSuppressionV4OptionsT>(new NonMaxSuppressionV4OptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void NonMaxSuppressionV4Options::UnPackTo(NonMaxSuppressionV4OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void NonMaxSuppressionV4Options::UnPackTo(NonMaxSuppressionV4OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<NonMaxSuppressionV4Options> NonMaxSuppressionV4Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<NonMaxSuppressionV4Options> NonMaxSuppressionV4Options::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateNonMaxSuppressionV4Options(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<NonMaxSuppressionV4Options> CreateNonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<NonMaxSuppressionV4Options> CreateNonMaxSuppressionV4Options(::flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NonMaxSuppressionV4OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const NonMaxSuppressionV4OptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateNonMaxSuppressionV4Options(
       _fbb);
 }
 
-inline NonMaxSuppressionV5OptionsT *NonMaxSuppressionV5Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline NonMaxSuppressionV5OptionsT *NonMaxSuppressionV5Options::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<NonMaxSuppressionV5OptionsT>(new NonMaxSuppressionV5OptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void NonMaxSuppressionV5Options::UnPackTo(NonMaxSuppressionV5OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void NonMaxSuppressionV5Options::UnPackTo(NonMaxSuppressionV5OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<NonMaxSuppressionV5Options> NonMaxSuppressionV5Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<NonMaxSuppressionV5Options> NonMaxSuppressionV5Options::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateNonMaxSuppressionV5Options(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<NonMaxSuppressionV5Options> CreateNonMaxSuppressionV5Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<NonMaxSuppressionV5Options> CreateNonMaxSuppressionV5Options(::flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NonMaxSuppressionV5OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const NonMaxSuppressionV5OptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateNonMaxSuppressionV5Options(
       _fbb);
 }
 
-inline ScatterNdOptionsT *ScatterNdOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline ScatterNdOptionsT *ScatterNdOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<ScatterNdOptionsT>(new ScatterNdOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void ScatterNdOptions::UnPackTo(ScatterNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void ScatterNdOptions::UnPackTo(ScatterNdOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<ScatterNdOptions> ScatterNdOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ScatterNdOptions> ScatterNdOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateScatterNdOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<ScatterNdOptions> CreateScatterNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ScatterNdOptions> CreateScatterNdOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ScatterNdOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ScatterNdOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateScatterNdOptions(
       _fbb);
 }
 
-inline SelectV2OptionsT *SelectV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline SelectV2OptionsT *SelectV2Options::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<SelectV2OptionsT>(new SelectV2OptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void SelectV2Options::UnPackTo(SelectV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void SelectV2Options::UnPackTo(SelectV2OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<SelectV2Options> SelectV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SelectV2Options> SelectV2Options::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateSelectV2Options(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<SelectV2Options> CreateSelectV2Options(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SelectV2Options> CreateSelectV2Options(::flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SelectV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SelectV2OptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateSelectV2Options(
       _fbb);
 }
 
-inline DensifyOptionsT *DensifyOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline DensifyOptionsT *DensifyOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<DensifyOptionsT>(new DensifyOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void DensifyOptions::UnPackTo(DensifyOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void DensifyOptions::UnPackTo(DensifyOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<DensifyOptions> DensifyOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<DensifyOptions> DensifyOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateDensifyOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<DensifyOptions> CreateDensifyOptions(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<DensifyOptions> CreateDensifyOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DensifyOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const DensifyOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateDensifyOptions(
       _fbb);
 }
 
-inline SegmentSumOptionsT *SegmentSumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline SegmentSumOptionsT *SegmentSumOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<SegmentSumOptionsT>(new SegmentSumOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void SegmentSumOptions::UnPackTo(SegmentSumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void SegmentSumOptions::UnPackTo(SegmentSumOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<SegmentSumOptions> SegmentSumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SegmentSumOptions> SegmentSumOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateSegmentSumOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<SegmentSumOptions> CreateSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SegmentSumOptions> CreateSegmentSumOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SegmentSumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SegmentSumOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateSegmentSumOptions(
       _fbb);
 }
 
-inline BatchMatMulOptionsT *BatchMatMulOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline BatchMatMulOptionsT *BatchMatMulOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<BatchMatMulOptionsT>(new BatchMatMulOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void BatchMatMulOptions::UnPackTo(BatchMatMulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void BatchMatMulOptions::UnPackTo(BatchMatMulOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = adj_x(); _o->adj_x = _e; }
@@ -16702,14 +20380,14 @@
   { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; }
 }
 
-inline flatbuffers::Offset<BatchMatMulOptions> BatchMatMulOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<BatchMatMulOptions> BatchMatMulOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateBatchMatMulOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<BatchMatMulOptions> CreateBatchMatMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<BatchMatMulOptions> CreateBatchMatMulOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BatchMatMulOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BatchMatMulOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _adj_x = _o->adj_x;
   auto _adj_y = _o->adj_y;
   auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs;
@@ -16720,27 +20398,27 @@
       _asymmetric_quantize_inputs);
 }
 
-inline CumsumOptionsT *CumsumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline CumsumOptionsT *CumsumOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<CumsumOptionsT>(new CumsumOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void CumsumOptions::UnPackTo(CumsumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void CumsumOptions::UnPackTo(CumsumOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = exclusive(); _o->exclusive = _e; }
   { auto _e = reverse(); _o->reverse = _e; }
 }
 
-inline flatbuffers::Offset<CumsumOptions> CumsumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<CumsumOptions> CumsumOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateCumsumOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<CumsumOptions> CreateCumsumOptions(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<CumsumOptions> CreateCumsumOptions(::flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CumsumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const CumsumOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _exclusive = _o->exclusive;
   auto _reverse = _o->reverse;
   return tflite::CreateCumsumOptions(
@@ -16749,59 +20427,59 @@
       _reverse);
 }
 
-inline BroadcastToOptionsT *BroadcastToOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline BroadcastToOptionsT *BroadcastToOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<BroadcastToOptionsT>(new BroadcastToOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void BroadcastToOptions::UnPackTo(BroadcastToOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void BroadcastToOptions::UnPackTo(BroadcastToOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<BroadcastToOptions> BroadcastToOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<BroadcastToOptions> BroadcastToOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateBroadcastToOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<BroadcastToOptions> CreateBroadcastToOptions(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<BroadcastToOptions> CreateBroadcastToOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BroadcastToOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BroadcastToOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateBroadcastToOptions(
       _fbb);
 }
 
-inline Rfft2dOptionsT *Rfft2dOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline Rfft2dOptionsT *Rfft2dOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<Rfft2dOptionsT>(new Rfft2dOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void Rfft2dOptions::UnPackTo(Rfft2dOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void Rfft2dOptions::UnPackTo(Rfft2dOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<Rfft2dOptions> Rfft2dOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Rfft2dOptions> Rfft2dOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateRfft2dOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<Rfft2dOptions> CreateRfft2dOptions(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Rfft2dOptions> CreateRfft2dOptions(::flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Rfft2dOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const Rfft2dOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateRfft2dOptions(
       _fbb);
 }
 
-inline HashtableOptionsT *HashtableOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline HashtableOptionsT *HashtableOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<HashtableOptionsT>(new HashtableOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void HashtableOptions::UnPackTo(HashtableOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void HashtableOptions::UnPackTo(HashtableOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = table_id(); _o->table_id = _e; }
@@ -16809,14 +20487,14 @@
   { auto _e = value_dtype(); _o->value_dtype = _e; }
 }
 
-inline flatbuffers::Offset<HashtableOptions> HashtableOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<HashtableOptions> HashtableOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateHashtableOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<HashtableOptions> CreateHashtableOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<HashtableOptions> CreateHashtableOptions(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HashtableOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const HashtableOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _table_id = _o->table_id;
   auto _key_dtype = _o->key_dtype;
   auto _value_dtype = _o->value_dtype;
@@ -16827,96 +20505,96 @@
       _value_dtype);
 }
 
-inline HashtableFindOptionsT *HashtableFindOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline HashtableFindOptionsT *HashtableFindOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<HashtableFindOptionsT>(new HashtableFindOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void HashtableFindOptions::UnPackTo(HashtableFindOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void HashtableFindOptions::UnPackTo(HashtableFindOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<HashtableFindOptions> HashtableFindOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<HashtableFindOptions> HashtableFindOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateHashtableFindOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<HashtableFindOptions> CreateHashtableFindOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<HashtableFindOptions> CreateHashtableFindOptions(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HashtableFindOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const HashtableFindOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateHashtableFindOptions(
       _fbb);
 }
 
-inline HashtableImportOptionsT *HashtableImportOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline HashtableImportOptionsT *HashtableImportOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<HashtableImportOptionsT>(new HashtableImportOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void HashtableImportOptions::UnPackTo(HashtableImportOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void HashtableImportOptions::UnPackTo(HashtableImportOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<HashtableImportOptions> HashtableImportOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<HashtableImportOptions> HashtableImportOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateHashtableImportOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<HashtableImportOptions> CreateHashtableImportOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<HashtableImportOptions> CreateHashtableImportOptions(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HashtableImportOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const HashtableImportOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateHashtableImportOptions(
       _fbb);
 }
 
-inline HashtableSizeOptionsT *HashtableSizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline HashtableSizeOptionsT *HashtableSizeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<HashtableSizeOptionsT>(new HashtableSizeOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void HashtableSizeOptions::UnPackTo(HashtableSizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void HashtableSizeOptions::UnPackTo(HashtableSizeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<HashtableSizeOptions> HashtableSizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<HashtableSizeOptions> HashtableSizeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateHashtableSizeOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<HashtableSizeOptions> CreateHashtableSizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<HashtableSizeOptions> CreateHashtableSizeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HashtableSizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const HashtableSizeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateHashtableSizeOptions(
       _fbb);
 }
 
-inline VarHandleOptionsT *VarHandleOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline VarHandleOptionsT *VarHandleOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<VarHandleOptionsT>(new VarHandleOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void VarHandleOptions::UnPackTo(VarHandleOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void VarHandleOptions::UnPackTo(VarHandleOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = container(); if (_e) _o->container = _e->str(); }
   { auto _e = shared_name(); if (_e) _o->shared_name = _e->str(); }
 }
 
-inline flatbuffers::Offset<VarHandleOptions> VarHandleOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<VarHandleOptions> VarHandleOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateVarHandleOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<VarHandleOptions> CreateVarHandleOptions(flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<VarHandleOptions> CreateVarHandleOptions(::flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const VarHandleOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const VarHandleOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _container = _o->container.empty() ? 0 : _fbb.CreateString(_o->container);
   auto _shared_name = _o->shared_name.empty() ? 0 : _fbb.CreateString(_o->shared_name);
   return tflite::CreateVarHandleOptions(
@@ -16925,73 +20603,73 @@
       _shared_name);
 }
 
-inline ReadVariableOptionsT *ReadVariableOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline ReadVariableOptionsT *ReadVariableOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<ReadVariableOptionsT>(new ReadVariableOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void ReadVariableOptions::UnPackTo(ReadVariableOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void ReadVariableOptions::UnPackTo(ReadVariableOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<ReadVariableOptions> ReadVariableOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ReadVariableOptions> ReadVariableOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateReadVariableOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<ReadVariableOptions> CreateReadVariableOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ReadVariableOptions> CreateReadVariableOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReadVariableOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ReadVariableOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateReadVariableOptions(
       _fbb);
 }
 
-inline AssignVariableOptionsT *AssignVariableOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline AssignVariableOptionsT *AssignVariableOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<AssignVariableOptionsT>(new AssignVariableOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void AssignVariableOptions::UnPackTo(AssignVariableOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void AssignVariableOptions::UnPackTo(AssignVariableOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<AssignVariableOptions> AssignVariableOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<AssignVariableOptions> AssignVariableOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateAssignVariableOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<AssignVariableOptions> CreateAssignVariableOptions(flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<AssignVariableOptions> CreateAssignVariableOptions(::flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AssignVariableOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const AssignVariableOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateAssignVariableOptions(
       _fbb);
 }
 
-inline RandomOptionsT *RandomOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline RandomOptionsT *RandomOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<RandomOptionsT>(new RandomOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void RandomOptions::UnPackTo(RandomOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void RandomOptions::UnPackTo(RandomOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = seed(); _o->seed = _e; }
   { auto _e = seed2(); _o->seed2 = _e; }
 }
 
-inline flatbuffers::Offset<RandomOptions> RandomOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<RandomOptions> RandomOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateRandomOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<RandomOptions> CreateRandomOptions(flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<RandomOptions> CreateRandomOptions(::flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RandomOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const RandomOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _seed = _o->seed;
   auto _seed2 = _o->seed2;
   return tflite::CreateRandomOptions(
@@ -17000,295 +20678,344 @@
       _seed2);
 }
 
-inline BucketizeOptionsT *BucketizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline BucketizeOptionsT *BucketizeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<BucketizeOptionsT>(new BucketizeOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void BucketizeOptions::UnPackTo(BucketizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void BucketizeOptions::UnPackTo(BucketizeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
-  { auto _e = boundaries(); if (_e) { _o->boundaries.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->boundaries[_i] = _e->Get(_i); } } }
+  { auto _e = boundaries(); if (_e) { _o->boundaries.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->boundaries[_i] = _e->Get(_i); } } else { _o->boundaries.resize(0); } }
 }
 
-inline flatbuffers::Offset<BucketizeOptions> BucketizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BucketizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<BucketizeOptions> BucketizeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BucketizeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateBucketizeOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<BucketizeOptions> CreateBucketizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const BucketizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<BucketizeOptions> CreateBucketizeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BucketizeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BucketizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BucketizeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _boundaries = _o->boundaries.size() ? _fbb.CreateVector(_o->boundaries) : 0;
   return tflite::CreateBucketizeOptions(
       _fbb,
       _boundaries);
 }
 
-inline GeluOptionsT *GeluOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline GeluOptionsT *GeluOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<GeluOptionsT>(new GeluOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void GeluOptions::UnPackTo(GeluOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void GeluOptions::UnPackTo(GeluOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = approximate(); _o->approximate = _e; }
 }
 
-inline flatbuffers::Offset<GeluOptions> GeluOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GeluOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<GeluOptions> GeluOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GeluOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateGeluOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<GeluOptions> CreateGeluOptions(flatbuffers::FlatBufferBuilder &_fbb, const GeluOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<GeluOptions> CreateGeluOptions(::flatbuffers::FlatBufferBuilder &_fbb, const GeluOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GeluOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const GeluOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _approximate = _o->approximate;
   return tflite::CreateGeluOptions(
       _fbb,
       _approximate);
 }
 
-inline DynamicUpdateSliceOptionsT *DynamicUpdateSliceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline DynamicUpdateSliceOptionsT *DynamicUpdateSliceOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<DynamicUpdateSliceOptionsT>(new DynamicUpdateSliceOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void DynamicUpdateSliceOptions::UnPackTo(DynamicUpdateSliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void DynamicUpdateSliceOptions::UnPackTo(DynamicUpdateSliceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<DynamicUpdateSliceOptions> DynamicUpdateSliceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DynamicUpdateSliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<DynamicUpdateSliceOptions> DynamicUpdateSliceOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DynamicUpdateSliceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateDynamicUpdateSliceOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<DynamicUpdateSliceOptions> CreateDynamicUpdateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const DynamicUpdateSliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<DynamicUpdateSliceOptions> CreateDynamicUpdateSliceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DynamicUpdateSliceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DynamicUpdateSliceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const DynamicUpdateSliceOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateDynamicUpdateSliceOptions(
       _fbb);
 }
 
-inline UnsortedSegmentProdOptionsT *UnsortedSegmentProdOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline UnsortedSegmentProdOptionsT *UnsortedSegmentProdOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<UnsortedSegmentProdOptionsT>(new UnsortedSegmentProdOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void UnsortedSegmentProdOptions::UnPackTo(UnsortedSegmentProdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void UnsortedSegmentProdOptions::UnPackTo(UnsortedSegmentProdOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<UnsortedSegmentProdOptions> UnsortedSegmentProdOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentProdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<UnsortedSegmentProdOptions> UnsortedSegmentProdOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentProdOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateUnsortedSegmentProdOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<UnsortedSegmentProdOptions> CreateUnsortedSegmentProdOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentProdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<UnsortedSegmentProdOptions> CreateUnsortedSegmentProdOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentProdOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnsortedSegmentProdOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const UnsortedSegmentProdOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateUnsortedSegmentProdOptions(
       _fbb);
 }
 
-inline UnsortedSegmentMaxOptionsT *UnsortedSegmentMaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline UnsortedSegmentMaxOptionsT *UnsortedSegmentMaxOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<UnsortedSegmentMaxOptionsT>(new UnsortedSegmentMaxOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void UnsortedSegmentMaxOptions::UnPackTo(UnsortedSegmentMaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void UnsortedSegmentMaxOptions::UnPackTo(UnsortedSegmentMaxOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<UnsortedSegmentMaxOptions> UnsortedSegmentMaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<UnsortedSegmentMaxOptions> UnsortedSegmentMaxOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMaxOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateUnsortedSegmentMaxOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<UnsortedSegmentMaxOptions> CreateUnsortedSegmentMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<UnsortedSegmentMaxOptions> CreateUnsortedSegmentMaxOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMaxOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnsortedSegmentMaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const UnsortedSegmentMaxOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateUnsortedSegmentMaxOptions(
       _fbb);
 }
 
-inline UnsortedSegmentSumOptionsT *UnsortedSegmentSumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline UnsortedSegmentSumOptionsT *UnsortedSegmentSumOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<UnsortedSegmentSumOptionsT>(new UnsortedSegmentSumOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void UnsortedSegmentSumOptions::UnPackTo(UnsortedSegmentSumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void UnsortedSegmentSumOptions::UnPackTo(UnsortedSegmentSumOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<UnsortedSegmentSumOptions> UnsortedSegmentSumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentSumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<UnsortedSegmentSumOptions> UnsortedSegmentSumOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentSumOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateUnsortedSegmentSumOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<UnsortedSegmentSumOptions> CreateUnsortedSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentSumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<UnsortedSegmentSumOptions> CreateUnsortedSegmentSumOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentSumOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnsortedSegmentSumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const UnsortedSegmentSumOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateUnsortedSegmentSumOptions(
       _fbb);
 }
 
-inline ATan2OptionsT *ATan2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline ATan2OptionsT *ATan2Options::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<ATan2OptionsT>(new ATan2OptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void ATan2Options::UnPackTo(ATan2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void ATan2Options::UnPackTo(ATan2OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<ATan2Options> ATan2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ATan2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ATan2Options> ATan2Options::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ATan2OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateATan2Options(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<ATan2Options> CreateATan2Options(flatbuffers::FlatBufferBuilder &_fbb, const ATan2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<ATan2Options> CreateATan2Options(::flatbuffers::FlatBufferBuilder &_fbb, const ATan2OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ATan2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ATan2OptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateATan2Options(
       _fbb);
 }
 
-inline UnsortedSegmentMinOptionsT *UnsortedSegmentMinOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline UnsortedSegmentMinOptionsT *UnsortedSegmentMinOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<UnsortedSegmentMinOptionsT>(new UnsortedSegmentMinOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void UnsortedSegmentMinOptions::UnPackTo(UnsortedSegmentMinOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void UnsortedSegmentMinOptions::UnPackTo(UnsortedSegmentMinOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<UnsortedSegmentMinOptions> UnsortedSegmentMinOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMinOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<UnsortedSegmentMinOptions> UnsortedSegmentMinOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMinOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateUnsortedSegmentMinOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<UnsortedSegmentMinOptions> CreateUnsortedSegmentMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMinOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<UnsortedSegmentMinOptions> CreateUnsortedSegmentMinOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMinOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnsortedSegmentMinOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const UnsortedSegmentMinOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateUnsortedSegmentMinOptions(
       _fbb);
 }
 
-inline SignOptionsT *SignOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline SignOptionsT *SignOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<SignOptionsT>(new SignOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void SignOptions::UnPackTo(SignOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void SignOptions::UnPackTo(SignOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<SignOptions> SignOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SignOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SignOptions> SignOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SignOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateSignOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<SignOptions> CreateSignOptions(flatbuffers::FlatBufferBuilder &_fbb, const SignOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SignOptions> CreateSignOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SignOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SignOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SignOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateSignOptions(
       _fbb);
 }
 
-inline BitcastOptionsT *BitcastOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline BitcastOptionsT *BitcastOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<BitcastOptionsT>(new BitcastOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void BitcastOptions::UnPackTo(BitcastOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void BitcastOptions::UnPackTo(BitcastOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<BitcastOptions> BitcastOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BitcastOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<BitcastOptions> BitcastOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BitcastOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateBitcastOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<BitcastOptions> CreateBitcastOptions(flatbuffers::FlatBufferBuilder &_fbb, const BitcastOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<BitcastOptions> CreateBitcastOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BitcastOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BitcastOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BitcastOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateBitcastOptions(
       _fbb);
 }
 
-inline BitwiseXorOptionsT *BitwiseXorOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline BitwiseXorOptionsT *BitwiseXorOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<BitwiseXorOptionsT>(new BitwiseXorOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void BitwiseXorOptions::UnPackTo(BitwiseXorOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void BitwiseXorOptions::UnPackTo(BitwiseXorOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<BitwiseXorOptions> BitwiseXorOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BitwiseXorOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<BitwiseXorOptions> BitwiseXorOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BitwiseXorOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateBitwiseXorOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<BitwiseXorOptions> CreateBitwiseXorOptions(flatbuffers::FlatBufferBuilder &_fbb, const BitwiseXorOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<BitwiseXorOptions> CreateBitwiseXorOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BitwiseXorOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BitwiseXorOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BitwiseXorOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateBitwiseXorOptions(
       _fbb);
 }
 
-inline RightShiftOptionsT *RightShiftOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline RightShiftOptionsT *RightShiftOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<RightShiftOptionsT>(new RightShiftOptionsT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void RightShiftOptions::UnPackTo(RightShiftOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void RightShiftOptions::UnPackTo(RightShiftOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
 }
 
-inline flatbuffers::Offset<RightShiftOptions> RightShiftOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RightShiftOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<RightShiftOptions> RightShiftOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const RightShiftOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateRightShiftOptions(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<RightShiftOptions> CreateRightShiftOptions(flatbuffers::FlatBufferBuilder &_fbb, const RightShiftOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<RightShiftOptions> CreateRightShiftOptions(::flatbuffers::FlatBufferBuilder &_fbb, const RightShiftOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RightShiftOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const RightShiftOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   return tflite::CreateRightShiftOptions(
       _fbb);
 }
 
-inline OperatorCodeT *OperatorCode::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline DilateOptionsT *DilateOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<DilateOptionsT>(new DilateOptionsT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void DilateOptions::UnPackTo(DilateOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+}
+
+inline ::flatbuffers::Offset<DilateOptions> DilateOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DilateOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateDilateOptions(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<DilateOptions> CreateDilateOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DilateOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const DilateOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  return tflite::CreateDilateOptions(
+      _fbb);
+}
+
+inline ReduceWindowOptionsT *ReduceWindowOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<ReduceWindowOptionsT>(new ReduceWindowOptionsT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void ReduceWindowOptions::UnPackTo(ReduceWindowOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = reduce_function(); _o->reduce_function = _e; }
+}
+
+inline ::flatbuffers::Offset<ReduceWindowOptions> ReduceWindowOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReduceWindowOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateReduceWindowOptions(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<ReduceWindowOptions> CreateReduceWindowOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ReduceWindowOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ReduceWindowOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _reduce_function = _o->reduce_function;
+  return tflite::CreateReduceWindowOptions(
+      _fbb,
+      _reduce_function);
+}
+
+inline OperatorCodeT *OperatorCode::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<OperatorCodeT>(new OperatorCodeT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void OperatorCode::UnPackTo(OperatorCodeT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void OperatorCode::UnPackTo(OperatorCodeT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = deprecated_builtin_code(); _o->deprecated_builtin_code = _e; }
@@ -17297,14 +21024,14 @@
   { auto _e = builtin_code(); _o->builtin_code = _e; }
 }
 
-inline flatbuffers::Offset<OperatorCode> OperatorCode::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<OperatorCode> OperatorCode::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateOperatorCode(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<OperatorCode> CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<OperatorCode> CreateOperatorCode(::flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OperatorCodeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const OperatorCodeT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _deprecated_builtin_code = _o->deprecated_builtin_code;
   auto _custom_code = _o->custom_code.empty() ? 0 : _fbb.CreateString(_o->custom_code);
   auto _version = _o->version;
@@ -17317,36 +21044,76 @@
       _builtin_code);
 }
 
-inline OperatorT *Operator::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline StableHLOCompositeOptionsT *StableHLOCompositeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
+  auto _o = std::unique_ptr<StableHLOCompositeOptionsT>(new StableHLOCompositeOptionsT());
+  UnPackTo(_o.get(), _resolver);
+  return _o.release();
+}
+
+inline void StableHLOCompositeOptions::UnPackTo(StableHLOCompositeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
+  (void)_o;
+  (void)_resolver;
+  { auto _e = name(); if (_e) _o->name = _e->str(); }
+  { auto _e = decomposition_subgraph_index(); _o->decomposition_subgraph_index = _e; }
+  { auto _e = composite_attributes(); if (_e) { _o->composite_attributes.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->composite_attributes.begin()); } }
+  { auto _e = composite_attributes_format(); _o->composite_attributes_format = _e; }
+  { auto _e = version(); _o->version = _e; }
+}
+
+inline ::flatbuffers::Offset<StableHLOCompositeOptions> StableHLOCompositeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StableHLOCompositeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  return CreateStableHLOCompositeOptions(_fbb, _o, _rehasher);
+}
+
+inline ::flatbuffers::Offset<StableHLOCompositeOptions> CreateStableHLOCompositeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StableHLOCompositeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
+  (void)_rehasher;
+  (void)_o;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StableHLOCompositeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name);
+  auto _decomposition_subgraph_index = _o->decomposition_subgraph_index;
+  auto _composite_attributes = _o->composite_attributes.size() ? _fbb.CreateVector(_o->composite_attributes) : 0;
+  auto _composite_attributes_format = _o->composite_attributes_format;
+  auto _version = _o->version;
+  return tflite::CreateStableHLOCompositeOptions(
+      _fbb,
+      _name,
+      _decomposition_subgraph_index,
+      _composite_attributes,
+      _composite_attributes_format,
+      _version);
+}
+
+inline OperatorT *Operator::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<OperatorT>(new OperatorT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void Operator::UnPackTo(OperatorT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void Operator::UnPackTo(OperatorT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = opcode_index(); _o->opcode_index = _e; }
-  { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = _e->Get(_i); } } }
-  { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = _e->Get(_i); } } }
+  { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = _e->Get(_i); } } else { _o->inputs.resize(0); } }
+  { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = _e->Get(_i); } } else { _o->outputs.resize(0); } }
   { auto _e = builtin_options_type(); _o->builtin_options.type = _e; }
   { auto _e = builtin_options(); if (_e) _o->builtin_options.value = tflite::BuiltinOptionsUnion::UnPack(_e, builtin_options_type(), _resolver); }
   { auto _e = custom_options(); if (_e) { _o->custom_options.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->custom_options.begin()); } }
   { auto _e = custom_options_format(); _o->custom_options_format = _e; }
-  { auto _e = mutating_variable_inputs(); if (_e) { _o->mutating_variable_inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->mutating_variable_inputs[_i] = _e->Get(_i) != 0; } } }
-  { auto _e = intermediates(); if (_e) { _o->intermediates.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->intermediates[_i] = _e->Get(_i); } } }
+  { auto _e = mutating_variable_inputs(); if (_e) { _o->mutating_variable_inputs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->mutating_variable_inputs[_i] = _e->Get(_i) != 0; } } else { _o->mutating_variable_inputs.resize(0); } }
+  { auto _e = intermediates(); if (_e) { _o->intermediates.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->intermediates[_i] = _e->Get(_i); } } else { _o->intermediates.resize(0); } }
   { auto _e = large_custom_options_offset(); _o->large_custom_options_offset = _e; }
   { auto _e = large_custom_options_size(); _o->large_custom_options_size = _e; }
+  { auto _e = builtin_options_2_type(); _o->builtin_options_2.type = _e; }
+  { auto _e = builtin_options_2(); if (_e) _o->builtin_options_2.value = tflite::BuiltinOptions2Union::UnPack(_e, builtin_options_2_type(), _resolver); }
 }
 
-inline flatbuffers::Offset<Operator> Operator::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Operator> Operator::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const OperatorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateOperator(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<Operator> CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Operator> CreateOperator(::flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OperatorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const OperatorT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _opcode_index = _o->opcode_index;
   auto _inputs = _o->inputs.size() ? _fbb.CreateVector(_o->inputs) : 0;
   auto _outputs = _o->outputs.size() ? _fbb.CreateVector(_o->outputs) : 0;
@@ -17358,6 +21125,8 @@
   auto _intermediates = _o->intermediates.size() ? _fbb.CreateVector(_o->intermediates) : 0;
   auto _large_custom_options_offset = _o->large_custom_options_offset;
   auto _large_custom_options_size = _o->large_custom_options_size;
+  auto _builtin_options_2_type = _o->builtin_options_2.type;
+  auto _builtin_options_2 = _o->builtin_options_2.Pack(_fbb);
   return tflite::CreateOperator(
       _fbb,
       _opcode_index,
@@ -17370,7 +21139,9 @@
       _mutating_variable_inputs,
       _intermediates,
       _large_custom_options_offset,
-      _large_custom_options_size);
+      _large_custom_options_size,
+      _builtin_options_2_type,
+      _builtin_options_2);
 }
 
 inline SubGraphT::SubGraphT(const SubGraphT &o)
@@ -17392,34 +21163,34 @@
   return *this;
 }
 
-inline SubGraphT *SubGraph::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline SubGraphT *SubGraph::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<SubGraphT>(new SubGraphT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void SubGraph::UnPackTo(SubGraphT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void SubGraph::UnPackTo(SubGraphT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
-  { auto _e = tensors(); if (_e) { _o->tensors.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->tensors[_i]) { _e->Get(_i)->UnPackTo(_o->tensors[_i].get(), _resolver); } else { _o->tensors[_i] = std::unique_ptr<tflite::TensorT>(_e->Get(_i)->UnPack(_resolver)); }; } } }
-  { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = _e->Get(_i); } } }
-  { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = _e->Get(_i); } } }
-  { auto _e = operators(); if (_e) { _o->operators.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->operators[_i]) { _e->Get(_i)->UnPackTo(_o->operators[_i].get(), _resolver); } else { _o->operators[_i] = std::unique_ptr<tflite::OperatorT>(_e->Get(_i)->UnPack(_resolver)); }; } } }
+  { auto _e = tensors(); if (_e) { _o->tensors.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->tensors[_i]) { _e->Get(_i)->UnPackTo(_o->tensors[_i].get(), _resolver); } else { _o->tensors[_i] = std::unique_ptr<tflite::TensorT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->tensors.resize(0); } }
+  { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = _e->Get(_i); } } else { _o->inputs.resize(0); } }
+  { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = _e->Get(_i); } } else { _o->outputs.resize(0); } }
+  { auto _e = operators(); if (_e) { _o->operators.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->operators[_i]) { _e->Get(_i)->UnPackTo(_o->operators[_i].get(), _resolver); } else { _o->operators[_i] = std::unique_ptr<tflite::OperatorT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->operators.resize(0); } }
   { auto _e = name(); if (_e) _o->name = _e->str(); }
 }
 
-inline flatbuffers::Offset<SubGraph> SubGraph::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SubGraph> SubGraph::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateSubGraph(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<SubGraph> CreateSubGraph(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SubGraph> CreateSubGraph(::flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SubGraphT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
-  auto _tensors = _o->tensors.size() ? _fbb.CreateVector<flatbuffers::Offset<tflite::Tensor>> (_o->tensors.size(), [](size_t i, _VectorArgs *__va) { return CreateTensor(*__va->__fbb, __va->__o->tensors[i].get(), __va->__rehasher); }, &_va ) : 0;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SubGraphT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _tensors = _o->tensors.size() ? _fbb.CreateVector<::flatbuffers::Offset<tflite::Tensor>> (_o->tensors.size(), [](size_t i, _VectorArgs *__va) { return CreateTensor(*__va->__fbb, __va->__o->tensors[i].get(), __va->__rehasher); }, &_va ) : 0;
   auto _inputs = _o->inputs.size() ? _fbb.CreateVector(_o->inputs) : 0;
   auto _outputs = _o->outputs.size() ? _fbb.CreateVector(_o->outputs) : 0;
-  auto _operators = _o->operators.size() ? _fbb.CreateVector<flatbuffers::Offset<tflite::Operator>> (_o->operators.size(), [](size_t i, _VectorArgs *__va) { return CreateOperator(*__va->__fbb, __va->__o->operators[i].get(), __va->__rehasher); }, &_va ) : 0;
+  auto _operators = _o->operators.size() ? _fbb.CreateVector<::flatbuffers::Offset<tflite::Operator>> (_o->operators.size(), [](size_t i, _VectorArgs *__va) { return CreateOperator(*__va->__fbb, __va->__o->operators[i].get(), __va->__rehasher); }, &_va ) : 0;
   auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name);
   return tflite::CreateSubGraph(
       _fbb,
@@ -17430,13 +21201,13 @@
       _name);
 }
 
-inline BufferT *Buffer::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline BufferT *Buffer::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<BufferT>(new BufferT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void Buffer::UnPackTo(BufferT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void Buffer::UnPackTo(BufferT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = data(); if (_e) { _o->data.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->data.begin()); } }
@@ -17444,14 +21215,14 @@
   { auto _e = size(); _o->size = _e; }
 }
 
-inline flatbuffers::Offset<Buffer> Buffer::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Buffer> Buffer::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateBuffer(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<Buffer> CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Buffer> CreateBuffer(::flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BufferT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BufferT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   _fbb.ForceVectorAlignment(_o->data.size(), sizeof(uint8_t), 16);
   auto _data = _o->data.size() ? _fbb.CreateVector(_o->data) : 0;
   auto _offset = _o->offset;
@@ -17463,27 +21234,27 @@
       _size);
 }
 
-inline MetadataT *Metadata::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline MetadataT *Metadata::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<MetadataT>(new MetadataT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void Metadata::UnPackTo(MetadataT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void Metadata::UnPackTo(MetadataT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = name(); if (_e) _o->name = _e->str(); }
   { auto _e = buffer(); _o->buffer = _e; }
 }
 
-inline flatbuffers::Offset<Metadata> Metadata::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Metadata> Metadata::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateMetadata(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<Metadata> CreateMetadata(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Metadata> CreateMetadata(::flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MetadataT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const MetadataT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name);
   auto _buffer = _o->buffer;
   return tflite::CreateMetadata(
@@ -17492,27 +21263,27 @@
       _buffer);
 }
 
-inline TensorMapT *TensorMap::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline TensorMapT *TensorMap::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<TensorMapT>(new TensorMapT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void TensorMap::UnPackTo(TensorMapT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void TensorMap::UnPackTo(TensorMapT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = name(); if (_e) _o->name = _e->str(); }
   { auto _e = tensor_index(); _o->tensor_index = _e; }
 }
 
-inline flatbuffers::Offset<TensorMap> TensorMap::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<TensorMap> TensorMap::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateTensorMap(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<TensorMap> CreateTensorMap(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<TensorMap> CreateTensorMap(::flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TensorMapT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const TensorMapT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name);
   auto _tensor_index = _o->tensor_index;
   return tflite::CreateTensorMap(
@@ -17538,31 +21309,31 @@
   return *this;
 }
 
-inline SignatureDefT *SignatureDef::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline SignatureDefT *SignatureDef::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<SignatureDefT>(new SignatureDefT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void SignatureDef::UnPackTo(SignatureDefT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void SignatureDef::UnPackTo(SignatureDefT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
-  { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->inputs[_i]) { _e->Get(_i)->UnPackTo(_o->inputs[_i].get(), _resolver); } else { _o->inputs[_i] = std::unique_ptr<tflite::TensorMapT>(_e->Get(_i)->UnPack(_resolver)); }; } } }
-  { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->outputs[_i]) { _e->Get(_i)->UnPackTo(_o->outputs[_i].get(), _resolver); } else { _o->outputs[_i] = std::unique_ptr<tflite::TensorMapT>(_e->Get(_i)->UnPack(_resolver)); }; } } }
+  { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->inputs[_i]) { _e->Get(_i)->UnPackTo(_o->inputs[_i].get(), _resolver); } else { _o->inputs[_i] = std::unique_ptr<tflite::TensorMapT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->inputs.resize(0); } }
+  { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->outputs[_i]) { _e->Get(_i)->UnPackTo(_o->outputs[_i].get(), _resolver); } else { _o->outputs[_i] = std::unique_ptr<tflite::TensorMapT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->outputs.resize(0); } }
   { auto _e = signature_key(); if (_e) _o->signature_key = _e->str(); }
   { auto _e = subgraph_index(); _o->subgraph_index = _e; }
 }
 
-inline flatbuffers::Offset<SignatureDef> SignatureDef::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SignatureDef> SignatureDef::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateSignatureDef(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<SignatureDef> CreateSignatureDef(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<SignatureDef> CreateSignatureDef(::flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SignatureDefT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
-  auto _inputs = _o->inputs.size() ? _fbb.CreateVector<flatbuffers::Offset<tflite::TensorMap>> (_o->inputs.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorMap(*__va->__fbb, __va->__o->inputs[i].get(), __va->__rehasher); }, &_va ) : 0;
-  auto _outputs = _o->outputs.size() ? _fbb.CreateVector<flatbuffers::Offset<tflite::TensorMap>> (_o->outputs.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorMap(*__va->__fbb, __va->__o->outputs[i].get(), __va->__rehasher); }, &_va ) : 0;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SignatureDefT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  auto _inputs = _o->inputs.size() ? _fbb.CreateVector<::flatbuffers::Offset<tflite::TensorMap>> (_o->inputs.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorMap(*__va->__fbb, __va->__o->inputs[i].get(), __va->__rehasher); }, &_va ) : 0;
+  auto _outputs = _o->outputs.size() ? _fbb.CreateVector<::flatbuffers::Offset<tflite::TensorMap>> (_o->outputs.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorMap(*__va->__fbb, __va->__o->outputs[i].get(), __va->__rehasher); }, &_va ) : 0;
   auto _signature_key = _o->signature_key.empty() ? 0 : _fbb.CreateString(_o->signature_key);
   auto _subgraph_index = _o->subgraph_index;
   return tflite::CreateSignatureDef(
@@ -17601,41 +21372,41 @@
   return *this;
 }
 
-inline ModelT *Model::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+inline ModelT *Model::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
   auto _o = std::unique_ptr<ModelT>(new ModelT());
   UnPackTo(_o.get(), _resolver);
   return _o.release();
 }
 
-inline void Model::UnPackTo(ModelT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+inline void Model::UnPackTo(ModelT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
   (void)_o;
   (void)_resolver;
   { auto _e = version(); _o->version = _e; }
-  { auto _e = operator_codes(); if (_e) { _o->operator_codes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->operator_codes[_i]) { _e->Get(_i)->UnPackTo(_o->operator_codes[_i].get(), _resolver); } else { _o->operator_codes[_i] = std::unique_ptr<tflite::OperatorCodeT>(_e->Get(_i)->UnPack(_resolver)); }; } } }
-  { auto _e = subgraphs(); if (_e) { _o->subgraphs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->subgraphs[_i]) { _e->Get(_i)->UnPackTo(_o->subgraphs[_i].get(), _resolver); } else { _o->subgraphs[_i] = std::unique_ptr<tflite::SubGraphT>(_e->Get(_i)->UnPack(_resolver)); }; } } }
+  { auto _e = operator_codes(); if (_e) { _o->operator_codes.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->operator_codes[_i]) { _e->Get(_i)->UnPackTo(_o->operator_codes[_i].get(), _resolver); } else { _o->operator_codes[_i] = std::unique_ptr<tflite::OperatorCodeT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->operator_codes.resize(0); } }
+  { auto _e = subgraphs(); if (_e) { _o->subgraphs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->subgraphs[_i]) { _e->Get(_i)->UnPackTo(_o->subgraphs[_i].get(), _resolver); } else { _o->subgraphs[_i] = std::unique_ptr<tflite::SubGraphT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->subgraphs.resize(0); } }
   { auto _e = description(); if (_e) _o->description = _e->str(); }
-  { auto _e = buffers(); if (_e) { _o->buffers.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->buffers[_i]) { _e->Get(_i)->UnPackTo(_o->buffers[_i].get(), _resolver); } else { _o->buffers[_i] = std::unique_ptr<tflite::BufferT>(_e->Get(_i)->UnPack(_resolver)); }; } } }
-  { auto _e = metadata_buffer(); if (_e) { _o->metadata_buffer.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->metadata_buffer[_i] = _e->Get(_i); } } }
-  { auto _e = metadata(); if (_e) { _o->metadata.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->metadata[_i]) { _e->Get(_i)->UnPackTo(_o->metadata[_i].get(), _resolver); } else { _o->metadata[_i] = std::unique_ptr<tflite::MetadataT>(_e->Get(_i)->UnPack(_resolver)); }; } } }
-  { auto _e = signature_defs(); if (_e) { _o->signature_defs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->signature_defs[_i]) { _e->Get(_i)->UnPackTo(_o->signature_defs[_i].get(), _resolver); } else { _o->signature_defs[_i] = std::unique_ptr<tflite::SignatureDefT>(_e->Get(_i)->UnPack(_resolver)); }; } } }
+  { auto _e = buffers(); if (_e) { _o->buffers.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->buffers[_i]) { _e->Get(_i)->UnPackTo(_o->buffers[_i].get(), _resolver); } else { _o->buffers[_i] = std::unique_ptr<tflite::BufferT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->buffers.resize(0); } }
+  { auto _e = metadata_buffer(); if (_e) { _o->metadata_buffer.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->metadata_buffer[_i] = _e->Get(_i); } } else { _o->metadata_buffer.resize(0); } }
+  { auto _e = metadata(); if (_e) { _o->metadata.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->metadata[_i]) { _e->Get(_i)->UnPackTo(_o->metadata[_i].get(), _resolver); } else { _o->metadata[_i] = std::unique_ptr<tflite::MetadataT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->metadata.resize(0); } }
+  { auto _e = signature_defs(); if (_e) { _o->signature_defs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->signature_defs[_i]) { _e->Get(_i)->UnPackTo(_o->signature_defs[_i].get(), _resolver); } else { _o->signature_defs[_i] = std::unique_ptr<tflite::SignatureDefT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->signature_defs.resize(0); } }
 }
 
-inline flatbuffers::Offset<Model> Model::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ModelT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Model> Model::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ModelT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   return CreateModel(_fbb, _o, _rehasher);
 }
 
-inline flatbuffers::Offset<Model> CreateModel(flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+inline ::flatbuffers::Offset<Model> CreateModel(::flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
   (void)_rehasher;
   (void)_o;
-  struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ModelT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ModelT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
   auto _version = _o->version;
-  auto _operator_codes = _o->operator_codes.size() ? _fbb.CreateVector<flatbuffers::Offset<tflite::OperatorCode>> (_o->operator_codes.size(), [](size_t i, _VectorArgs *__va) { return CreateOperatorCode(*__va->__fbb, __va->__o->operator_codes[i].get(), __va->__rehasher); }, &_va ) : 0;
-  auto _subgraphs = _o->subgraphs.size() ? _fbb.CreateVector<flatbuffers::Offset<tflite::SubGraph>> (_o->subgraphs.size(), [](size_t i, _VectorArgs *__va) { return CreateSubGraph(*__va->__fbb, __va->__o->subgraphs[i].get(), __va->__rehasher); }, &_va ) : 0;
+  auto _operator_codes = _o->operator_codes.size() ? _fbb.CreateVector<::flatbuffers::Offset<tflite::OperatorCode>> (_o->operator_codes.size(), [](size_t i, _VectorArgs *__va) { return CreateOperatorCode(*__va->__fbb, __va->__o->operator_codes[i].get(), __va->__rehasher); }, &_va ) : 0;
+  auto _subgraphs = _o->subgraphs.size() ? _fbb.CreateVector<::flatbuffers::Offset<tflite::SubGraph>> (_o->subgraphs.size(), [](size_t i, _VectorArgs *__va) { return CreateSubGraph(*__va->__fbb, __va->__o->subgraphs[i].get(), __va->__rehasher); }, &_va ) : 0;
   auto _description = _o->description.empty() ? 0 : _fbb.CreateString(_o->description);
-  auto _buffers = _o->buffers.size() ? _fbb.CreateVector<flatbuffers::Offset<tflite::Buffer>> (_o->buffers.size(), [](size_t i, _VectorArgs *__va) { return CreateBuffer(*__va->__fbb, __va->__o->buffers[i].get(), __va->__rehasher); }, &_va ) : 0;
+  auto _buffers = _o->buffers.size() ? _fbb.CreateVector<::flatbuffers::Offset<tflite::Buffer>> (_o->buffers.size(), [](size_t i, _VectorArgs *__va) { return CreateBuffer(*__va->__fbb, __va->__o->buffers[i].get(), __va->__rehasher); }, &_va ) : 0;
   auto _metadata_buffer = _o->metadata_buffer.size() ? _fbb.CreateVector(_o->metadata_buffer) : 0;
-  auto _metadata = _o->metadata.size() ? _fbb.CreateVector<flatbuffers::Offset<tflite::Metadata>> (_o->metadata.size(), [](size_t i, _VectorArgs *__va) { return CreateMetadata(*__va->__fbb, __va->__o->metadata[i].get(), __va->__rehasher); }, &_va ) : 0;
-  auto _signature_defs = _o->signature_defs.size() ? _fbb.CreateVector<flatbuffers::Offset<tflite::SignatureDef>> (_o->signature_defs.size(), [](size_t i, _VectorArgs *__va) { return CreateSignatureDef(*__va->__fbb, __va->__o->signature_defs[i].get(), __va->__rehasher); }, &_va ) : 0;
+  auto _metadata = _o->metadata.size() ? _fbb.CreateVector<::flatbuffers::Offset<tflite::Metadata>> (_o->metadata.size(), [](size_t i, _VectorArgs *__va) { return CreateMetadata(*__va->__fbb, __va->__o->metadata[i].get(), __va->__rehasher); }, &_va ) : 0;
+  auto _signature_defs = _o->signature_defs.size() ? _fbb.CreateVector<::flatbuffers::Offset<tflite::SignatureDef>> (_o->signature_defs.size(), [](size_t i, _VectorArgs *__va) { return CreateSignatureDef(*__va->__fbb, __va->__o->signature_defs[i].get(), __va->__rehasher); }, &_va ) : 0;
   return tflite::CreateModel(
       _fbb,
       _version,
@@ -17648,7 +21419,7 @@
       _signature_defs);
 }
 
-inline bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj, QuantizationDetails type) {
+inline bool VerifyQuantizationDetails(::flatbuffers::Verifier &verifier, const void *obj, QuantizationDetails type) {
   switch (type) {
     case QuantizationDetails_NONE: {
       return true;
@@ -17661,10 +21432,10 @@
   }
 }
 
-inline bool VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types) {
+inline bool VerifyQuantizationDetailsVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset<void>> *values, const ::flatbuffers::Vector<uint8_t> *types) {
   if (!values || !types) return !values && !types;
   if (values->size() != types->size()) return false;
-  for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {
+  for (::flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {
     if (!VerifyQuantizationDetails(
         verifier,  values->Get(i), types->GetEnum<QuantizationDetails>(i))) {
       return false;
@@ -17673,7 +21444,7 @@
   return true;
 }
 
-inline void *QuantizationDetailsUnion::UnPack(const void *obj, QuantizationDetails type, const flatbuffers::resolver_function_t *resolver) {
+inline void *QuantizationDetailsUnion::UnPack(const void *obj, QuantizationDetails type, const ::flatbuffers::resolver_function_t *resolver) {
   (void)resolver;
   switch (type) {
     case QuantizationDetails_CustomQuantization: {
@@ -17684,7 +21455,7 @@
   }
 }
 
-inline flatbuffers::Offset<void> QuantizationDetailsUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const {
+inline ::flatbuffers::Offset<void> QuantizationDetailsUnion::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ::flatbuffers::rehasher_function_t *_rehasher) const {
   (void)_rehasher;
   switch (type) {
     case QuantizationDetails_CustomQuantization: {
@@ -17719,7 +21490,7 @@
   type = QuantizationDetails_NONE;
 }
 
-inline bool VerifySparseIndexVector(flatbuffers::Verifier &verifier, const void *obj, SparseIndexVector type) {
+inline bool VerifySparseIndexVector(::flatbuffers::Verifier &verifier, const void *obj, SparseIndexVector type) {
   switch (type) {
     case SparseIndexVector_NONE: {
       return true;
@@ -17740,10 +21511,10 @@
   }
 }
 
-inline bool VerifySparseIndexVectorVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types) {
+inline bool VerifySparseIndexVectorVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset<void>> *values, const ::flatbuffers::Vector<uint8_t> *types) {
   if (!values || !types) return !values && !types;
   if (values->size() != types->size()) return false;
-  for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {
+  for (::flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {
     if (!VerifySparseIndexVector(
         verifier,  values->Get(i), types->GetEnum<SparseIndexVector>(i))) {
       return false;
@@ -17752,7 +21523,7 @@
   return true;
 }
 
-inline void *SparseIndexVectorUnion::UnPack(const void *obj, SparseIndexVector type, const flatbuffers::resolver_function_t *resolver) {
+inline void *SparseIndexVectorUnion::UnPack(const void *obj, SparseIndexVector type, const ::flatbuffers::resolver_function_t *resolver) {
   (void)resolver;
   switch (type) {
     case SparseIndexVector_Int32Vector: {
@@ -17771,7 +21542,7 @@
   }
 }
 
-inline flatbuffers::Offset<void> SparseIndexVectorUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const {
+inline ::flatbuffers::Offset<void> SparseIndexVectorUnion::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ::flatbuffers::rehasher_function_t *_rehasher) const {
   (void)_rehasher;
   switch (type) {
     case SparseIndexVector_Int32Vector: {
@@ -17832,7 +21603,7 @@
   type = SparseIndexVector_NONE;
 }
 
-inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type) {
+inline bool VerifyBuiltinOptions(::flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type) {
   switch (type) {
     case BuiltinOptions_NONE: {
       return true;
@@ -18345,10 +22116,10 @@
   }
 }
 
-inline bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types) {
+inline bool VerifyBuiltinOptionsVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset<void>> *values, const ::flatbuffers::Vector<uint8_t> *types) {
   if (!values || !types) return !values && !types;
   if (values->size() != types->size()) return false;
-  for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {
+  for (::flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {
     if (!VerifyBuiltinOptions(
         verifier,  values->Get(i), types->GetEnum<BuiltinOptions>(i))) {
       return false;
@@ -18357,7 +22128,7 @@
   return true;
 }
 
-inline void *BuiltinOptionsUnion::UnPack(const void *obj, BuiltinOptions type, const flatbuffers::resolver_function_t *resolver) {
+inline void *BuiltinOptionsUnion::UnPack(const void *obj, BuiltinOptions type, const ::flatbuffers::resolver_function_t *resolver) {
   (void)resolver;
   switch (type) {
     case BuiltinOptions_Conv2DOptions: {
@@ -18868,7 +22639,7 @@
   }
 }
 
-inline flatbuffers::Offset<void> BuiltinOptionsUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const {
+inline ::flatbuffers::Offset<void> BuiltinOptionsUnion::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ::flatbuffers::rehasher_function_t *_rehasher) const {
   (void)_rehasher;
   switch (type) {
     case BuiltinOptions_Conv2DOptions: {
@@ -20528,12 +24299,503 @@
   type = BuiltinOptions_NONE;
 }
 
+inline bool VerifyBuiltinOptions2(::flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions2 type) {
+  switch (type) {
+    case BuiltinOptions2_NONE: {
+      return true;
+    }
+    case BuiltinOptions2_StablehloConcatenateOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloConcatenateOptions *>(obj);
+      return verifier.VerifyTable(ptr);
+    }
+    case BuiltinOptions2_StablehloBroadcastInDimOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloBroadcastInDimOptions *>(obj);
+      return verifier.VerifyTable(ptr);
+    }
+    case BuiltinOptions2_StablehloSliceOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloSliceOptions *>(obj);
+      return verifier.VerifyTable(ptr);
+    }
+    case BuiltinOptions2_StablehloConvolutionOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloConvolutionOptions *>(obj);
+      return verifier.VerifyTable(ptr);
+    }
+    case BuiltinOptions2_StablehloCustomCallOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloCustomCallOptions *>(obj);
+      return verifier.VerifyTable(ptr);
+    }
+    case BuiltinOptions2_StablehloReduceOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloReduceOptions *>(obj);
+      return verifier.VerifyTable(ptr);
+    }
+    case BuiltinOptions2_StablehloScatterOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloScatterOptions *>(obj);
+      return verifier.VerifyTable(ptr);
+    }
+    case BuiltinOptions2_StablehloCompareOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloCompareOptions *>(obj);
+      return verifier.VerifyTable(ptr);
+    }
+    case BuiltinOptions2_StablehloDynamicSliceOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloDynamicSliceOptions *>(obj);
+      return verifier.VerifyTable(ptr);
+    }
+    case BuiltinOptions2_StablehloPadOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloPadOptions *>(obj);
+      return verifier.VerifyTable(ptr);
+    }
+    case BuiltinOptions2_StablehloIotaOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloIotaOptions *>(obj);
+      return verifier.VerifyTable(ptr);
+    }
+    case BuiltinOptions2_StablehloDotGeneralOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloDotGeneralOptions *>(obj);
+      return verifier.VerifyTable(ptr);
+    }
+    case BuiltinOptions2_StablehloReduceWindowOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloReduceWindowOptions *>(obj);
+      return verifier.VerifyTable(ptr);
+    }
+    case BuiltinOptions2_StablehloSortOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloSortOptions *>(obj);
+      return verifier.VerifyTable(ptr);
+    }
+    case BuiltinOptions2_StablehloWhileOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloWhileOptions *>(obj);
+      return verifier.VerifyTable(ptr);
+    }
+    case BuiltinOptions2_StablehloGatherOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloGatherOptions *>(obj);
+      return verifier.VerifyTable(ptr);
+    }
+    case BuiltinOptions2_StablehloTransposeOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloTransposeOptions *>(obj);
+      return verifier.VerifyTable(ptr);
+    }
+    case BuiltinOptions2_DilateOptions: {
+      auto ptr = reinterpret_cast<const tflite::DilateOptions *>(obj);
+      return verifier.VerifyTable(ptr);
+    }
+    case BuiltinOptions2_StablehloRngBitGeneratorOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloRngBitGeneratorOptions *>(obj);
+      return verifier.VerifyTable(ptr);
+    }
+    case BuiltinOptions2_ReduceWindowOptions: {
+      auto ptr = reinterpret_cast<const tflite::ReduceWindowOptions *>(obj);
+      return verifier.VerifyTable(ptr);
+    }
+    case BuiltinOptions2_StableHLOCompositeOptions: {
+      auto ptr = reinterpret_cast<const tflite::StableHLOCompositeOptions *>(obj);
+      return verifier.VerifyTable(ptr);
+    }
+    default: return true;
+  }
+}
+
+inline bool VerifyBuiltinOptions2Vector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset<void>> *values, const ::flatbuffers::Vector<uint8_t> *types) {
+  if (!values || !types) return !values && !types;
+  if (values->size() != types->size()) return false;
+  for (::flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {
+    if (!VerifyBuiltinOptions2(
+        verifier,  values->Get(i), types->GetEnum<BuiltinOptions2>(i))) {
+      return false;
+    }
+  }
+  return true;
+}
+
+inline void *BuiltinOptions2Union::UnPack(const void *obj, BuiltinOptions2 type, const ::flatbuffers::resolver_function_t *resolver) {
+  (void)resolver;
+  switch (type) {
+    case BuiltinOptions2_StablehloConcatenateOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloConcatenateOptions *>(obj);
+      return ptr->UnPack(resolver);
+    }
+    case BuiltinOptions2_StablehloBroadcastInDimOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloBroadcastInDimOptions *>(obj);
+      return ptr->UnPack(resolver);
+    }
+    case BuiltinOptions2_StablehloSliceOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloSliceOptions *>(obj);
+      return ptr->UnPack(resolver);
+    }
+    case BuiltinOptions2_StablehloConvolutionOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloConvolutionOptions *>(obj);
+      return ptr->UnPack(resolver);
+    }
+    case BuiltinOptions2_StablehloCustomCallOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloCustomCallOptions *>(obj);
+      return ptr->UnPack(resolver);
+    }
+    case BuiltinOptions2_StablehloReduceOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloReduceOptions *>(obj);
+      return ptr->UnPack(resolver);
+    }
+    case BuiltinOptions2_StablehloScatterOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloScatterOptions *>(obj);
+      return ptr->UnPack(resolver);
+    }
+    case BuiltinOptions2_StablehloCompareOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloCompareOptions *>(obj);
+      return ptr->UnPack(resolver);
+    }
+    case BuiltinOptions2_StablehloDynamicSliceOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloDynamicSliceOptions *>(obj);
+      return ptr->UnPack(resolver);
+    }
+    case BuiltinOptions2_StablehloPadOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloPadOptions *>(obj);
+      return ptr->UnPack(resolver);
+    }
+    case BuiltinOptions2_StablehloIotaOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloIotaOptions *>(obj);
+      return ptr->UnPack(resolver);
+    }
+    case BuiltinOptions2_StablehloDotGeneralOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloDotGeneralOptions *>(obj);
+      return ptr->UnPack(resolver);
+    }
+    case BuiltinOptions2_StablehloReduceWindowOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloReduceWindowOptions *>(obj);
+      return ptr->UnPack(resolver);
+    }
+    case BuiltinOptions2_StablehloSortOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloSortOptions *>(obj);
+      return ptr->UnPack(resolver);
+    }
+    case BuiltinOptions2_StablehloWhileOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloWhileOptions *>(obj);
+      return ptr->UnPack(resolver);
+    }
+    case BuiltinOptions2_StablehloGatherOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloGatherOptions *>(obj);
+      return ptr->UnPack(resolver);
+    }
+    case BuiltinOptions2_StablehloTransposeOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloTransposeOptions *>(obj);
+      return ptr->UnPack(resolver);
+    }
+    case BuiltinOptions2_DilateOptions: {
+      auto ptr = reinterpret_cast<const tflite::DilateOptions *>(obj);
+      return ptr->UnPack(resolver);
+    }
+    case BuiltinOptions2_StablehloRngBitGeneratorOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloRngBitGeneratorOptions *>(obj);
+      return ptr->UnPack(resolver);
+    }
+    case BuiltinOptions2_ReduceWindowOptions: {
+      auto ptr = reinterpret_cast<const tflite::ReduceWindowOptions *>(obj);
+      return ptr->UnPack(resolver);
+    }
+    case BuiltinOptions2_StableHLOCompositeOptions: {
+      auto ptr = reinterpret_cast<const tflite::StableHLOCompositeOptions *>(obj);
+      return ptr->UnPack(resolver);
+    }
+    default: return nullptr;
+  }
+}
+
+inline ::flatbuffers::Offset<void> BuiltinOptions2Union::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ::flatbuffers::rehasher_function_t *_rehasher) const {
+  (void)_rehasher;
+  switch (type) {
+    case BuiltinOptions2_StablehloConcatenateOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloConcatenateOptionsT *>(value);
+      return CreateStablehloConcatenateOptions(_fbb, ptr, _rehasher).Union();
+    }
+    case BuiltinOptions2_StablehloBroadcastInDimOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloBroadcastInDimOptionsT *>(value);
+      return CreateStablehloBroadcastInDimOptions(_fbb, ptr, _rehasher).Union();
+    }
+    case BuiltinOptions2_StablehloSliceOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloSliceOptionsT *>(value);
+      return CreateStablehloSliceOptions(_fbb, ptr, _rehasher).Union();
+    }
+    case BuiltinOptions2_StablehloConvolutionOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloConvolutionOptionsT *>(value);
+      return CreateStablehloConvolutionOptions(_fbb, ptr, _rehasher).Union();
+    }
+    case BuiltinOptions2_StablehloCustomCallOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloCustomCallOptionsT *>(value);
+      return CreateStablehloCustomCallOptions(_fbb, ptr, _rehasher).Union();
+    }
+    case BuiltinOptions2_StablehloReduceOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloReduceOptionsT *>(value);
+      return CreateStablehloReduceOptions(_fbb, ptr, _rehasher).Union();
+    }
+    case BuiltinOptions2_StablehloScatterOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloScatterOptionsT *>(value);
+      return CreateStablehloScatterOptions(_fbb, ptr, _rehasher).Union();
+    }
+    case BuiltinOptions2_StablehloCompareOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloCompareOptionsT *>(value);
+      return CreateStablehloCompareOptions(_fbb, ptr, _rehasher).Union();
+    }
+    case BuiltinOptions2_StablehloDynamicSliceOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloDynamicSliceOptionsT *>(value);
+      return CreateStablehloDynamicSliceOptions(_fbb, ptr, _rehasher).Union();
+    }
+    case BuiltinOptions2_StablehloPadOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloPadOptionsT *>(value);
+      return CreateStablehloPadOptions(_fbb, ptr, _rehasher).Union();
+    }
+    case BuiltinOptions2_StablehloIotaOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloIotaOptionsT *>(value);
+      return CreateStablehloIotaOptions(_fbb, ptr, _rehasher).Union();
+    }
+    case BuiltinOptions2_StablehloDotGeneralOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloDotGeneralOptionsT *>(value);
+      return CreateStablehloDotGeneralOptions(_fbb, ptr, _rehasher).Union();
+    }
+    case BuiltinOptions2_StablehloReduceWindowOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloReduceWindowOptionsT *>(value);
+      return CreateStablehloReduceWindowOptions(_fbb, ptr, _rehasher).Union();
+    }
+    case BuiltinOptions2_StablehloSortOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloSortOptionsT *>(value);
+      return CreateStablehloSortOptions(_fbb, ptr, _rehasher).Union();
+    }
+    case BuiltinOptions2_StablehloWhileOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloWhileOptionsT *>(value);
+      return CreateStablehloWhileOptions(_fbb, ptr, _rehasher).Union();
+    }
+    case BuiltinOptions2_StablehloGatherOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloGatherOptionsT *>(value);
+      return CreateStablehloGatherOptions(_fbb, ptr, _rehasher).Union();
+    }
+    case BuiltinOptions2_StablehloTransposeOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloTransposeOptionsT *>(value);
+      return CreateStablehloTransposeOptions(_fbb, ptr, _rehasher).Union();
+    }
+    case BuiltinOptions2_DilateOptions: {
+      auto ptr = reinterpret_cast<const tflite::DilateOptionsT *>(value);
+      return CreateDilateOptions(_fbb, ptr, _rehasher).Union();
+    }
+    case BuiltinOptions2_StablehloRngBitGeneratorOptions: {
+      auto ptr = reinterpret_cast<const tflite::StablehloRngBitGeneratorOptionsT *>(value);
+      return CreateStablehloRngBitGeneratorOptions(_fbb, ptr, _rehasher).Union();
+    }
+    case BuiltinOptions2_ReduceWindowOptions: {
+      auto ptr = reinterpret_cast<const tflite::ReduceWindowOptionsT *>(value);
+      return CreateReduceWindowOptions(_fbb, ptr, _rehasher).Union();
+    }
+    case BuiltinOptions2_StableHLOCompositeOptions: {
+      auto ptr = reinterpret_cast<const tflite::StableHLOCompositeOptionsT *>(value);
+      return CreateStableHLOCompositeOptions(_fbb, ptr, _rehasher).Union();
+    }
+    default: return 0;
+  }
+}
+
+inline BuiltinOptions2Union::BuiltinOptions2Union(const BuiltinOptions2Union &u) : type(u.type), value(nullptr) {
+  switch (type) {
+    case BuiltinOptions2_StablehloConcatenateOptions: {
+      value = new tflite::StablehloConcatenateOptionsT(*reinterpret_cast<tflite::StablehloConcatenateOptionsT *>(u.value));
+      break;
+    }
+    case BuiltinOptions2_StablehloBroadcastInDimOptions: {
+      value = new tflite::StablehloBroadcastInDimOptionsT(*reinterpret_cast<tflite::StablehloBroadcastInDimOptionsT *>(u.value));
+      break;
+    }
+    case BuiltinOptions2_StablehloSliceOptions: {
+      value = new tflite::StablehloSliceOptionsT(*reinterpret_cast<tflite::StablehloSliceOptionsT *>(u.value));
+      break;
+    }
+    case BuiltinOptions2_StablehloConvolutionOptions: {
+      value = new tflite::StablehloConvolutionOptionsT(*reinterpret_cast<tflite::StablehloConvolutionOptionsT *>(u.value));
+      break;
+    }
+    case BuiltinOptions2_StablehloCustomCallOptions: {
+      value = new tflite::StablehloCustomCallOptionsT(*reinterpret_cast<tflite::StablehloCustomCallOptionsT *>(u.value));
+      break;
+    }
+    case BuiltinOptions2_StablehloReduceOptions: {
+      value = new tflite::StablehloReduceOptionsT(*reinterpret_cast<tflite::StablehloReduceOptionsT *>(u.value));
+      break;
+    }
+    case BuiltinOptions2_StablehloScatterOptions: {
+      value = new tflite::StablehloScatterOptionsT(*reinterpret_cast<tflite::StablehloScatterOptionsT *>(u.value));
+      break;
+    }
+    case BuiltinOptions2_StablehloCompareOptions: {
+      value = new tflite::StablehloCompareOptionsT(*reinterpret_cast<tflite::StablehloCompareOptionsT *>(u.value));
+      break;
+    }
+    case BuiltinOptions2_StablehloDynamicSliceOptions: {
+      value = new tflite::StablehloDynamicSliceOptionsT(*reinterpret_cast<tflite::StablehloDynamicSliceOptionsT *>(u.value));
+      break;
+    }
+    case BuiltinOptions2_StablehloPadOptions: {
+      value = new tflite::StablehloPadOptionsT(*reinterpret_cast<tflite::StablehloPadOptionsT *>(u.value));
+      break;
+    }
+    case BuiltinOptions2_StablehloIotaOptions: {
+      value = new tflite::StablehloIotaOptionsT(*reinterpret_cast<tflite::StablehloIotaOptionsT *>(u.value));
+      break;
+    }
+    case BuiltinOptions2_StablehloDotGeneralOptions: {
+      value = new tflite::StablehloDotGeneralOptionsT(*reinterpret_cast<tflite::StablehloDotGeneralOptionsT *>(u.value));
+      break;
+    }
+    case BuiltinOptions2_StablehloReduceWindowOptions: {
+      value = new tflite::StablehloReduceWindowOptionsT(*reinterpret_cast<tflite::StablehloReduceWindowOptionsT *>(u.value));
+      break;
+    }
+    case BuiltinOptions2_StablehloSortOptions: {
+      value = new tflite::StablehloSortOptionsT(*reinterpret_cast<tflite::StablehloSortOptionsT *>(u.value));
+      break;
+    }
+    case BuiltinOptions2_StablehloWhileOptions: {
+      value = new tflite::StablehloWhileOptionsT(*reinterpret_cast<tflite::StablehloWhileOptionsT *>(u.value));
+      break;
+    }
+    case BuiltinOptions2_StablehloGatherOptions: {
+      value = new tflite::StablehloGatherOptionsT(*reinterpret_cast<tflite::StablehloGatherOptionsT *>(u.value));
+      break;
+    }
+    case BuiltinOptions2_StablehloTransposeOptions: {
+      value = new tflite::StablehloTransposeOptionsT(*reinterpret_cast<tflite::StablehloTransposeOptionsT *>(u.value));
+      break;
+    }
+    case BuiltinOptions2_DilateOptions: {
+      value = new tflite::DilateOptionsT(*reinterpret_cast<tflite::DilateOptionsT *>(u.value));
+      break;
+    }
+    case BuiltinOptions2_StablehloRngBitGeneratorOptions: {
+      value = new tflite::StablehloRngBitGeneratorOptionsT(*reinterpret_cast<tflite::StablehloRngBitGeneratorOptionsT *>(u.value));
+      break;
+    }
+    case BuiltinOptions2_ReduceWindowOptions: {
+      value = new tflite::ReduceWindowOptionsT(*reinterpret_cast<tflite::ReduceWindowOptionsT *>(u.value));
+      break;
+    }
+    case BuiltinOptions2_StableHLOCompositeOptions: {
+      value = new tflite::StableHLOCompositeOptionsT(*reinterpret_cast<tflite::StableHLOCompositeOptionsT *>(u.value));
+      break;
+    }
+    default:
+      break;
+  }
+}
+
+inline void BuiltinOptions2Union::Reset() {
+  switch (type) {
+    case BuiltinOptions2_StablehloConcatenateOptions: {
+      auto ptr = reinterpret_cast<tflite::StablehloConcatenateOptionsT *>(value);
+      delete ptr;
+      break;
+    }
+    case BuiltinOptions2_StablehloBroadcastInDimOptions: {
+      auto ptr = reinterpret_cast<tflite::StablehloBroadcastInDimOptionsT *>(value);
+      delete ptr;
+      break;
+    }
+    case BuiltinOptions2_StablehloSliceOptions: {
+      auto ptr = reinterpret_cast<tflite::StablehloSliceOptionsT *>(value);
+      delete ptr;
+      break;
+    }
+    case BuiltinOptions2_StablehloConvolutionOptions: {
+      auto ptr = reinterpret_cast<tflite::StablehloConvolutionOptionsT *>(value);
+      delete ptr;
+      break;
+    }
+    case BuiltinOptions2_StablehloCustomCallOptions: {
+      auto ptr = reinterpret_cast<tflite::StablehloCustomCallOptionsT *>(value);
+      delete ptr;
+      break;
+    }
+    case BuiltinOptions2_StablehloReduceOptions: {
+      auto ptr = reinterpret_cast<tflite::StablehloReduceOptionsT *>(value);
+      delete ptr;
+      break;
+    }
+    case BuiltinOptions2_StablehloScatterOptions: {
+      auto ptr = reinterpret_cast<tflite::StablehloScatterOptionsT *>(value);
+      delete ptr;
+      break;
+    }
+    case BuiltinOptions2_StablehloCompareOptions: {
+      auto ptr = reinterpret_cast<tflite::StablehloCompareOptionsT *>(value);
+      delete ptr;
+      break;
+    }
+    case BuiltinOptions2_StablehloDynamicSliceOptions: {
+      auto ptr = reinterpret_cast<tflite::StablehloDynamicSliceOptionsT *>(value);
+      delete ptr;
+      break;
+    }
+    case BuiltinOptions2_StablehloPadOptions: {
+      auto ptr = reinterpret_cast<tflite::StablehloPadOptionsT *>(value);
+      delete ptr;
+      break;
+    }
+    case BuiltinOptions2_StablehloIotaOptions: {
+      auto ptr = reinterpret_cast<tflite::StablehloIotaOptionsT *>(value);
+      delete ptr;
+      break;
+    }
+    case BuiltinOptions2_StablehloDotGeneralOptions: {
+      auto ptr = reinterpret_cast<tflite::StablehloDotGeneralOptionsT *>(value);
+      delete ptr;
+      break;
+    }
+    case BuiltinOptions2_StablehloReduceWindowOptions: {
+      auto ptr = reinterpret_cast<tflite::StablehloReduceWindowOptionsT *>(value);
+      delete ptr;
+      break;
+    }
+    case BuiltinOptions2_StablehloSortOptions: {
+      auto ptr = reinterpret_cast<tflite::StablehloSortOptionsT *>(value);
+      delete ptr;
+      break;
+    }
+    case BuiltinOptions2_StablehloWhileOptions: {
+      auto ptr = reinterpret_cast<tflite::StablehloWhileOptionsT *>(value);
+      delete ptr;
+      break;
+    }
+    case BuiltinOptions2_StablehloGatherOptions: {
+      auto ptr = reinterpret_cast<tflite::StablehloGatherOptionsT *>(value);
+      delete ptr;
+      break;
+    }
+    case BuiltinOptions2_StablehloTransposeOptions: {
+      auto ptr = reinterpret_cast<tflite::StablehloTransposeOptionsT *>(value);
+      delete ptr;
+      break;
+    }
+    case BuiltinOptions2_DilateOptions: {
+      auto ptr = reinterpret_cast<tflite::DilateOptionsT *>(value);
+      delete ptr;
+      break;
+    }
+    case BuiltinOptions2_StablehloRngBitGeneratorOptions: {
+      auto ptr = reinterpret_cast<tflite::StablehloRngBitGeneratorOptionsT *>(value);
+      delete ptr;
+      break;
+    }
+    case BuiltinOptions2_ReduceWindowOptions: {
+      auto ptr = reinterpret_cast<tflite::ReduceWindowOptionsT *>(value);
+      delete ptr;
+      break;
+    }
+    case BuiltinOptions2_StableHLOCompositeOptions: {
+      auto ptr = reinterpret_cast<tflite::StableHLOCompositeOptionsT *>(value);
+      delete ptr;
+      break;
+    }
+    default: break;
+  }
+  value = nullptr;
+  type = BuiltinOptions2_NONE;
+}
+
 inline const tflite::Model *GetModel(const void *buf) {
-  return flatbuffers::GetRoot<tflite::Model>(buf);
+  return ::flatbuffers::GetRoot<tflite::Model>(buf);
 }
 
 inline const tflite::Model *GetSizePrefixedModel(const void *buf) {
-  return flatbuffers::GetSizePrefixedRoot<tflite::Model>(buf);
+  return ::flatbuffers::GetSizePrefixedRoot<tflite::Model>(buf);
 }
 
 inline const char *ModelIdentifier() {
@@ -20541,22 +24803,22 @@
 }
 
 inline bool ModelBufferHasIdentifier(const void *buf) {
-  return flatbuffers::BufferHasIdentifier(
+  return ::flatbuffers::BufferHasIdentifier(
       buf, ModelIdentifier());
 }
 
 inline bool SizePrefixedModelBufferHasIdentifier(const void *buf) {
-  return flatbuffers::BufferHasIdentifier(
+  return ::flatbuffers::BufferHasIdentifier(
       buf, ModelIdentifier(), true);
 }
 
 inline bool VerifyModelBuffer(
-    flatbuffers::Verifier &verifier) {
+    ::flatbuffers::Verifier &verifier) {
   return verifier.VerifyBuffer<tflite::Model>(ModelIdentifier());
 }
 
 inline bool VerifySizePrefixedModelBuffer(
-    flatbuffers::Verifier &verifier) {
+    ::flatbuffers::Verifier &verifier) {
   return verifier.VerifySizePrefixedBuffer<tflite::Model>(ModelIdentifier());
 }
 
@@ -20565,26 +24827,26 @@
 }
 
 inline void FinishModelBuffer(
-    flatbuffers::FlatBufferBuilder &fbb,
-    flatbuffers::Offset<tflite::Model> root) {
+    ::flatbuffers::FlatBufferBuilder &fbb,
+    ::flatbuffers::Offset<tflite::Model> root) {
   fbb.Finish(root, ModelIdentifier());
 }
 
 inline void FinishSizePrefixedModelBuffer(
-    flatbuffers::FlatBufferBuilder &fbb,
-    flatbuffers::Offset<tflite::Model> root) {
+    ::flatbuffers::FlatBufferBuilder &fbb,
+    ::flatbuffers::Offset<tflite::Model> root) {
   fbb.FinishSizePrefixed(root, ModelIdentifier());
 }
 
 inline std::unique_ptr<tflite::ModelT> UnPackModel(
     const void *buf,
-    const flatbuffers::resolver_function_t *res = nullptr) {
+    const ::flatbuffers::resolver_function_t *res = nullptr) {
   return std::unique_ptr<tflite::ModelT>(GetModel(buf)->UnPack(res));
 }
 
 inline std::unique_ptr<tflite::ModelT> UnPackSizePrefixedModel(
     const void *buf,
-    const flatbuffers::resolver_function_t *res = nullptr) {
+    const ::flatbuffers::resolver_function_t *res = nullptr) {
   return std::unique_ptr<tflite::ModelT>(GetSizePrefixedModel(buf)->UnPack(res));
 }
 
diff --git a/tensorflow/lite/schema/schema_utils.h b/tensorflow/lite/schema/schema_utils.h
index 9cca36c..ff04cf1 100644
--- a/tensorflow/lite/schema/schema_utils.h
+++ b/tensorflow/lite/schema/schema_utils.h
@@ -15,19 +15,6 @@
 #ifndef TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_
 #define TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_
 
-#include "flatbuffers/flatbuffers.h"
-#include "tensorflow/lite/schema/schema_generated.h"
-
-namespace tflite {
-
-// The following methods are introduced to resolve op builtin code shortage
-// problem. The new builtin operator will be assigned to the extended builtin
-// code field in the flatbuffer schema. Those methods helps to hide builtin code
-// details.
-BuiltinOperator GetBuiltinCode(const OperatorCode *op_code);
-
-BuiltinOperator GetBuiltinCode(const OperatorCodeT *op_code);
-
-}  // namespace tflite
+#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h"  // IWYU pragma: keep
 
 #endif  // TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_
diff --git a/tensorflow/lite/tools/BUILD b/tensorflow/lite/tools/BUILD
index b5073c9..e7d5114 100644
--- a/tensorflow/lite/tools/BUILD
+++ b/tensorflow/lite/tools/BUILD
@@ -7,7 +7,7 @@
     visibility = ["//:__subpackages__"],
     deps = [
         "@flatbuffers//:runtime_py",
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
         "//tensorflow/lite/python:schema_py",
         "//tensorflow/lite/python:schema_util",
     ],
@@ -19,7 +19,7 @@
     srcs_version = "PY3",
     deps = [
         "@flatbuffers//:runtime_py",
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
         "//tensorflow/lite/python:schema_py",
     ],
 )
@@ -56,7 +56,7 @@
     deps = [
         ":flatbuffer_utils",
         ":test_utils",
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
     ],
 )
 
@@ -68,6 +68,6 @@
     deps = [
         ":test_utils",
         ":visualize",
-        requirement("tensorflow-cpu"),
+        requirement("tensorflow"),
     ],
 )
diff --git a/tensorflow/lite/tools/flatbuffer_utils.py b/tensorflow/lite/tools/flatbuffer_utils.py
index f448bfa..a7d1dd1 100644
--- a/tensorflow/lite/tools/flatbuffer_utils.py
+++ b/tensorflow/lite/tools/flatbuffer_utils.py
@@ -58,9 +58,38 @@
     raise RuntimeError('Input file not found at %r\n' % input_tflite_file)
   with gfile.GFile(input_tflite_file, 'rb') as input_file_handle:
     model_bytearray = bytearray(input_file_handle.read())
+  return read_model_from_bytearray(model_bytearray)
+
+
+def read_model_from_bytearray(model_bytearray):
+  """Reads a tflite model as a python object.
+
+  Args:
+    model_bytearray: TFLite model in bytearray format.
+
+  Returns:
+    A python object corresponding to the input tflite file.
+  """
   model = convert_bytearray_to_object(model_bytearray)
   if sys.byteorder == 'big':
     byte_swap_tflite_model_obj(model, 'little', 'big')
+
+  # Offset handling for models > 2GB
+  for buffer in model.buffers:
+    if buffer.offset:
+      buffer.data = model_bytearray[buffer.offset : buffer.offset + buffer.size]
+      buffer.offset = 0
+      buffer.size = 0
+  for subgraph in model.subgraphs:
+    for op in subgraph.operators:
+      if op.largeCustomOptionsOffset:
+        op.customOptions = model_bytearray[
+            op.largeCustomOptionsOffset : op.largeCustomOptionsOffset
+            + op.largeCustomOptionsSize
+        ]
+        op.largeCustomOptionsOffset = 0
+        op.largeCustomOptionsSize = 0
+
   return model
 
 
@@ -294,14 +323,29 @@
       buffer.data[i : i + chunksize]
       for i in range(0, len(buffer.data), chunksize)
   ]
-  buffer.data = b''.join(
-      [
-          int.from_bytes(byteswap, from_endiness).to_bytes(
-              chunksize, to_endiness
-          )
-          for byteswap in to_swap
-      ]
-  )
+  buffer.data = b''.join([
+      int.from_bytes(byteswap, from_endiness).to_bytes(chunksize, to_endiness)
+      for byteswap in to_swap
+  ])
+
+
+def byte_swap_string_content(buffer, from_endiness, to_endiness):
+  """Helper function for byte-swapping the string buffer.
+
+  Args:
+    buffer: TFLite string buffer of from_endiness format.
+    from_endiness: The original endianness format of the string buffer.
+    to_endiness: The destined endianness format of the string buffer.
+  """
+  num_of_strings = int.from_bytes(buffer.data[0:4], from_endiness)
+  string_content = bytearray(buffer.data[4 * (num_of_strings + 2) :])
+  prefix_data = b''.join([
+      int.from_bytes(buffer.data[i : i + 4], from_endiness).to_bytes(
+          4, to_endiness
+      )
+      for i in range(0, (num_of_strings + 1) * 4 + 1, 4)
+  ])
+  buffer.data = prefix_data + string_content
 
 
 def byte_swap_tflite_model_obj(model, from_endiness, to_endiness):
@@ -341,7 +385,11 @@
           and tensor.buffer not in buffer_swapped
           and model.buffers[tensor.buffer].data is not None
       ):
-        if tensor.type in types_of_16_bits:
+        if tensor.type == schema_fb.TensorType.STRING:
+          byte_swap_string_content(
+              model.buffers[tensor.buffer], from_endiness, to_endiness
+          )
+        elif tensor.type in types_of_16_bits:
           byte_swap_buffer_content(
               model.buffers[tensor.buffer], 2, from_endiness, to_endiness
           )
diff --git a/tensorflow/lite/tools/randomize_weights.py b/tensorflow/lite/tools/randomize_weights.py
index 2b36fb1..4baf50d 100644
--- a/tensorflow/lite/tools/randomize_weights.py
+++ b/tensorflow/lite/tools/randomize_weights.py
@@ -30,6 +30,15 @@
     'i.e., to be left unmodified.')
 flags.DEFINE_multi_string(
     'ops_to_skip', [], 'Ops in the TFLite model to be skipped / unmodified.')
+flags.DEFINE_multi_string(
+    'ops_operands_to_skip',
+    [],
+    'Op operand indices in the TFLite model to be skipped / unmodified. It'
+    ' should be specified in the format'
+    ' <op_name>:<operand_index>[,<operand_index>]. For example,'
+    ' TRANSPOSE_CONV:0,2 stands for skipping the TRANSPOSE_CONV operands'
+    ' indexed 0 and 2',
+)
 flags.DEFINE_integer('random_seed', 0, 'Input to the random number generator.')
 
 flags.mark_flag_as_required('input_tflite_file')
@@ -39,15 +48,31 @@
 def main(_):
   buffers_to_skip = FLAGS.buffers_to_skip
   ops_to_skip = [op.upper() for op in FLAGS.ops_to_skip]
+  ops_operands_to_skip = {}
+  for op_operands_to_skip in FLAGS.ops_operands_to_skip:
+    op_name, indices = op_operands_to_skip.split(':')
+    op_name_upper = op_name.upper()
+    if op_name_upper in ops_operands_to_skip:
+      raise ValueError(
+          'Indices for the same op must be specified only once multiple'
+          f' specification for op {op_name}.'
+      )
+    ops_operands_to_skip[op_name_upper] = list(map(int, indices.split(',')))
+
   model = flatbuffer_utils.read_model(FLAGS.input_tflite_file)
 
-  # Add in buffers for ops in ops_to_skip to the list of skipped buffers.
+  # Add in buffers for ops in ops_to_skip or ops_operands_to_skip to the list of
+  # skipped buffers.
   for graph in model.subgraphs:
     for op in graph.operators:
       op_name = flatbuffer_utils.opcode_to_name(model, op.opcodeIndex)
-      if op_name.upper() in ops_to_skip:
+      op_name_upper = op_name.upper()
+      if op_name_upper in ops_to_skip:
         for input_idx in op.inputs:
           buffers_to_skip.append(graph.tensors[input_idx].buffer)
+      if op_name_upper in ops_operands_to_skip:
+        for operand_idx in ops_operands_to_skip[op_name_upper]:
+          buffers_to_skip.append(graph.tensors[op.inputs[operand_idx]].buffer)
 
   flatbuffer_utils.randomize_weights(model, FLAGS.random_seed,
                                      FLAGS.buffers_to_skip)
diff --git a/third_party/flatbuffers/BUILD.external b/third_party/flatbuffers/BUILD.oss
similarity index 86%
rename from third_party/flatbuffers/BUILD.external
rename to third_party/flatbuffers/BUILD.oss
index dfd3a16..8f9d4eb 100644
--- a/third_party/flatbuffers/BUILD.external
+++ b/third_party/flatbuffers/BUILD.oss
@@ -2,30 +2,23 @@
 
 package(default_visibility = ["//visibility:public"])
 
-licenses(["notice"])
+licenses(["notice"])  # Apache 2.0
 
-exports_files(["LICENSE.txt"])
-
-licenses(["notice"])
+exports_files(["LICENSE"])
 
 config_setting(
-    name = "freebsd",
+    name = "platform_freebsd",
     values = {"cpu": "freebsd"},
 )
 
 config_setting(
-    name = "windows",
-    values = {"cpu": "x64_windows"},
-)
-
-config_setting(
     name = "platform_openbsd",
     values = {"cpu": "openbsd"},
 )
 
 config_setting(
-    name = "platform_freebsd",
-    values = {"cpu": "freebsd"},
+    name = "windows",
+    values = {"cpu": "x64_windows"},
 )
 
 load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library")
@@ -47,12 +40,13 @@
         "include/flatbuffers/allocator.h",
         "include/flatbuffers/array.h",
         "include/flatbuffers/base.h",
-        "include/flatbuffers/bfbs_generator.h",
         "include/flatbuffers/buffer.h",
         "include/flatbuffers/buffer_ref.h",
+        "include/flatbuffers/code_generator.h",
         "include/flatbuffers/code_generators.h",
         "include/flatbuffers/default_allocator.h",
         "include/flatbuffers/detached_buffer.h",
+        "include/flatbuffers/file_manager.h",
         "include/flatbuffers/flatbuffer_builder.h",
         "include/flatbuffers/flatbuffers.h",
         "include/flatbuffers/flex_flat_util.h",
@@ -73,7 +67,7 @@
         "include/flatbuffers/vector_downward.h",
         "include/flatbuffers/verifier.h",
     ],
-    visibility = ["//:__subpackages__"],
+    visibility = ["//visibility:public"],
 )
 
 # Public flatc compiler library.
@@ -90,9 +84,11 @@
 cc_binary(
     name = "flatc",
     linkopts = select({
-        ":freebsd": [
+        ":platform_freebsd": [
             "-lm",
         ],
+        # If Visual Studio 2022 developers facing linking errors,
+        # change the line below as ":windows": ["/DEFAULTLIB:msvcrt.lib"],
         ":windows": [],
         "//conditions:default": [
             "-lm",
@@ -110,7 +106,7 @@
     srcs = [
         "include/flatbuffers/flatc.h",
     ],
-    visibility = ["//:__subpackages__"],
+    visibility = ["//visibility:public"],
 )
 
 # Library used by flatbuffer_cc_library rules.
@@ -120,22 +116,13 @@
         "include/flatbuffers/allocator.h",
         "include/flatbuffers/array.h",
         "include/flatbuffers/base.h",
-        "include/flatbuffers/bfbs_generator.h",
         "include/flatbuffers/buffer.h",
         "include/flatbuffers/buffer_ref.h",
-        "include/flatbuffers/code_generators.h",
         "include/flatbuffers/default_allocator.h",
         "include/flatbuffers/detached_buffer.h",
         "include/flatbuffers/flatbuffer_builder.h",
         "include/flatbuffers/flatbuffers.h",
         "include/flatbuffers/flexbuffers.h",
-        "include/flatbuffers/grpc.h",
-        "include/flatbuffers/hash.h",
-        "include/flatbuffers/idl.h",
-        "include/flatbuffers/minireflect.h",
-        "include/flatbuffers/reflection.h",
-        "include/flatbuffers/reflection_generated.h",
-        "include/flatbuffers/registry.h",
         "include/flatbuffers/stl_emulation.h",
         "include/flatbuffers/string.h",
         "include/flatbuffers/struct.h",
@@ -187,4 +174,4 @@
     name = "runtime_py",
     srcs = [":runtime_py_srcs"],
     visibility = ["//visibility:public"],
-)
\ No newline at end of file
+)
diff --git a/third_party/flatbuffers/build_defs.bzl b/third_party/flatbuffers/build_defs.bzl
index 1a84928..92c9e2e 100644
--- a/third_party/flatbuffers/build_defs.bzl
+++ b/third_party/flatbuffers/build_defs.bzl
@@ -194,7 +194,7 @@
         reflection binaries for the schemas.
     '''
     output_headers = [
-        (out_prefix + "%s_generated.h") % (s.replace(".fbs", "").split("/")[-1])
+        (out_prefix + "%s_generated.h") % (s.replace(".fbs", "").split("/")[-1].split(":")[-1])
         for s in srcs
     ]
     reflection_name = "%s_reflection" % name if gen_reflections else ""
@@ -279,6 +279,11 @@
     else:
         no_includes_statement = []
 
+    if ctx.attr.language_flag == "--python":
+        onefile_statement = ["--gen-onefile"]
+    else:
+        onefile_statement = []
+
     # Need to generate all files in a directory.
     if not outputs:
         outputs = [ctx.actions.declare_directory("{}_all".format(ctx.attr.name))]
@@ -314,6 +319,7 @@
                             "-I",
                             ctx.bin_dir.path,
                         ] + no_includes_statement +
+                        onefile_statement +
                         include_paths_cmd_line + [
                 "--no-union-value-namespacing",
                 "--gen-object-api",
@@ -359,7 +365,6 @@
             cfg = "exec",
         ),
     },
-    output_to_genfiles = True,
 )
 
 def flatbuffer_py_strip_prefix_srcs(name, srcs = [], strip_prefix = ""):
@@ -394,6 +399,7 @@
             ctx.attr.deps[0].files.to_list()[0].path,
             ctx.outputs.out.path,
         ),
+        use_default_shell_env = True,
     )
 
 _concat_flatbuffer_py_srcs = rule(
@@ -401,7 +407,6 @@
     attrs = {
         "deps": attr.label_list(mandatory = True),
     },
-    output_to_genfiles = True,
     outputs = {"out": "%{name}.py"},
 )
 
@@ -432,6 +437,8 @@
         deps = deps,
         include_paths = include_paths,
     )
+
+    # TODO(b/235550563): Remove the concatnation rule with 2.0.6 update.
     all_srcs_no_include = "{}_srcs_no_include".format(name)
     _gen_flatbuffer_srcs(
         name = all_srcs_no_include,
diff --git a/third_party/flatbuffers/workspace.bzl b/third_party/flatbuffers/workspace.bzl
index e799a70..812ade7 100644
--- a/third_party/flatbuffers/workspace.bzl
+++ b/third_party/flatbuffers/workspace.bzl
@@ -5,12 +5,12 @@
 def repo():
     tf_http_archive(
         name = "flatbuffers",
-        strip_prefix = "flatbuffers-a66de58af9565586832c276fbb4251fc416bf07f",
-        sha256 = "da06ac2fc6fed8e38b6392f5a20fa24a4290cecaadd87aef16b6b84960408680",
+        strip_prefix = "flatbuffers-23.5.26",
+        sha256 = "1cce06b17cddd896b6d73cc047e36a254fb8df4d7ea18a46acf16c4c0cd3f3f3",
         urls = [
-            "https://github.com/google/flatbuffers/archive/a66de58af9565586832c276fbb4251fc416bf07f.tar.gz",
+            "https://github.com/google/flatbuffers/archive/v23.5.26.tar.gz",
         ],
-        build_file = "//third_party/flatbuffers:BUILD.external",
+        build_file = "//third_party/flatbuffers:BUILD.oss",
         system_build_file = "//third_party/flatbuffers:BUILD.system",
         link_files = {
             "//third_party/flatbuffers:build_defs.bzl": "build_defs.bzl",
diff --git a/third_party/hexagon/fully_connected.cc b/third_party/hexagon/fully_connected.cc
index c27c238..99ee1f3 100644
--- a/third_party/hexagon/fully_connected.cc
+++ b/third_party/hexagon/fully_connected.cc
@@ -129,4 +129,8 @@
                                    HexagonFullyConnectedEval);
 }
 
+TFLMInferenceRegistration RegisterInference_FULLY_CONNECTED() {
+  return tflite::micro::RegisterOp(HexagonFullyConnectedEval);
+}
+
 }  // namespace tflite
diff --git a/third_party/python_requirements.in b/third_party/python_requirements.in
index 3614311..29c081e 100644
--- a/third_party/python_requirements.in
+++ b/third_party/python_requirements.in
@@ -26,7 +26,9 @@
 # is sensitive to the Python environment (interpreter version, etc.) in which
 # it is run.
 
-tensorflow-cpu
+hexdump
+tensorflow
+twine
 numpy
 mako
 pillow
diff --git a/third_party/python_requirements.txt b/third_party/python_requirements.txt
index d0021b0..9246029 100644
--- a/third_party/python_requirements.txt
+++ b/third_party/python_requirements.txt
@@ -1,342 +1,520 @@
 #
-# This file is autogenerated by pip-compile with python 3.10
-# To update, run:
+# This file is autogenerated by pip-compile with Python 3.11
+# by the following command:
 #
 #    bazel run //third_party:python_requirements.update
 #
-absl-py==1.4.0 \
-    --hash=sha256:0d3fe606adfa4f7db64792dd4c7aee4ee0c38ab75dfd353b7a83ed3e957fcb47 \
-    --hash=sha256:d2c244d01048ba476e7c080bd2c6df5e141d211de80223460d5b3b8a2a58433d
+absl-py==2.0.0 \
+    --hash=sha256:9a28abb62774ae4e8edbe2dd4c49ffcd45a6a848952a5eccc6a49f3f0fc1e2f3 \
+    --hash=sha256:d9690211c5fcfefcdd1a45470ac2b5c5acd45241c3af71eed96bc5441746c0d5
     # via
     #   tensorboard
-    #   tensorflow-cpu
+    #   tensorflow
 astunparse==1.6.3 \
     --hash=sha256:5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872 \
     --hash=sha256:c2652417f2c8b5bb325c885ae329bdf3f86424075c4fd1a128674bc6fba4b8e8
-    # via tensorflow-cpu
-cachetools==5.3.0 \
-    --hash=sha256:13dfddc7b8df938c21a940dfa6557ce6e94a2f1cdfa58eb90c805721d58f2c14 \
-    --hash=sha256:429e1a1e845c008ea6c85aa35d4b98b65d6a9763eeef3e37e92728a12d1de9d4
+    # via tensorflow
+cachetools==5.3.2 \
+    --hash=sha256:086ee420196f7b2ab9ca2db2520aca326318b68fe5ba8bc4d49cca91add450f2 \
+    --hash=sha256:861f35a13a451f94e301ce2bec7cac63e881232ccce7ed67fab9b5df4d3beaa1
     # via google-auth
-certifi==2023.5.7 \
-    --hash=sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7 \
-    --hash=sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716
+certifi==2023.11.17 \
+    --hash=sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1 \
+    --hash=sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474
     # via requests
-charset-normalizer==3.1.0 \
-    --hash=sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6 \
-    --hash=sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1 \
-    --hash=sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e \
-    --hash=sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373 \
-    --hash=sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62 \
-    --hash=sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230 \
-    --hash=sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be \
-    --hash=sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c \
-    --hash=sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0 \
-    --hash=sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448 \
-    --hash=sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f \
-    --hash=sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649 \
-    --hash=sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d \
-    --hash=sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0 \
-    --hash=sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706 \
-    --hash=sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a \
-    --hash=sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59 \
-    --hash=sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23 \
-    --hash=sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5 \
-    --hash=sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb \
-    --hash=sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e \
-    --hash=sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e \
-    --hash=sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c \
-    --hash=sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28 \
-    --hash=sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d \
-    --hash=sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41 \
-    --hash=sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974 \
-    --hash=sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce \
-    --hash=sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f \
-    --hash=sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1 \
-    --hash=sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d \
-    --hash=sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8 \
-    --hash=sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017 \
-    --hash=sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31 \
-    --hash=sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7 \
-    --hash=sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8 \
-    --hash=sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e \
-    --hash=sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14 \
-    --hash=sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd \
-    --hash=sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d \
-    --hash=sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795 \
-    --hash=sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b \
-    --hash=sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b \
-    --hash=sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b \
-    --hash=sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203 \
-    --hash=sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f \
-    --hash=sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19 \
-    --hash=sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1 \
-    --hash=sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a \
-    --hash=sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac \
-    --hash=sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9 \
-    --hash=sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0 \
-    --hash=sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137 \
-    --hash=sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f \
-    --hash=sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6 \
-    --hash=sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5 \
-    --hash=sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909 \
-    --hash=sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f \
-    --hash=sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0 \
-    --hash=sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324 \
-    --hash=sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755 \
-    --hash=sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb \
-    --hash=sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854 \
-    --hash=sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c \
-    --hash=sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60 \
-    --hash=sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84 \
-    --hash=sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0 \
-    --hash=sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b \
-    --hash=sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1 \
-    --hash=sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531 \
-    --hash=sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1 \
-    --hash=sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11 \
-    --hash=sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326 \
-    --hash=sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df \
-    --hash=sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab
+cffi==1.16.0 \
+    --hash=sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc \
+    --hash=sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a \
+    --hash=sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417 \
+    --hash=sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab \
+    --hash=sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520 \
+    --hash=sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36 \
+    --hash=sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743 \
+    --hash=sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8 \
+    --hash=sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed \
+    --hash=sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684 \
+    --hash=sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56 \
+    --hash=sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324 \
+    --hash=sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d \
+    --hash=sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235 \
+    --hash=sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e \
+    --hash=sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088 \
+    --hash=sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000 \
+    --hash=sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7 \
+    --hash=sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e \
+    --hash=sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673 \
+    --hash=sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c \
+    --hash=sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe \
+    --hash=sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2 \
+    --hash=sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098 \
+    --hash=sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8 \
+    --hash=sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a \
+    --hash=sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0 \
+    --hash=sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b \
+    --hash=sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896 \
+    --hash=sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e \
+    --hash=sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9 \
+    --hash=sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2 \
+    --hash=sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b \
+    --hash=sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6 \
+    --hash=sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404 \
+    --hash=sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f \
+    --hash=sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0 \
+    --hash=sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4 \
+    --hash=sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc \
+    --hash=sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936 \
+    --hash=sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba \
+    --hash=sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872 \
+    --hash=sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb \
+    --hash=sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614 \
+    --hash=sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1 \
+    --hash=sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d \
+    --hash=sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969 \
+    --hash=sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b \
+    --hash=sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4 \
+    --hash=sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627 \
+    --hash=sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956 \
+    --hash=sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357
+    # via cryptography
+charset-normalizer==3.3.2 \
+    --hash=sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027 \
+    --hash=sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087 \
+    --hash=sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786 \
+    --hash=sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8 \
+    --hash=sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09 \
+    --hash=sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185 \
+    --hash=sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574 \
+    --hash=sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e \
+    --hash=sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519 \
+    --hash=sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898 \
+    --hash=sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269 \
+    --hash=sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3 \
+    --hash=sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f \
+    --hash=sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6 \
+    --hash=sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8 \
+    --hash=sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a \
+    --hash=sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73 \
+    --hash=sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc \
+    --hash=sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714 \
+    --hash=sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2 \
+    --hash=sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc \
+    --hash=sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce \
+    --hash=sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d \
+    --hash=sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e \
+    --hash=sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6 \
+    --hash=sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269 \
+    --hash=sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96 \
+    --hash=sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d \
+    --hash=sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a \
+    --hash=sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4 \
+    --hash=sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77 \
+    --hash=sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d \
+    --hash=sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0 \
+    --hash=sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed \
+    --hash=sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068 \
+    --hash=sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac \
+    --hash=sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25 \
+    --hash=sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8 \
+    --hash=sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab \
+    --hash=sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26 \
+    --hash=sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2 \
+    --hash=sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db \
+    --hash=sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f \
+    --hash=sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5 \
+    --hash=sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99 \
+    --hash=sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c \
+    --hash=sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d \
+    --hash=sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811 \
+    --hash=sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa \
+    --hash=sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a \
+    --hash=sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03 \
+    --hash=sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b \
+    --hash=sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04 \
+    --hash=sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c \
+    --hash=sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001 \
+    --hash=sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458 \
+    --hash=sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389 \
+    --hash=sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99 \
+    --hash=sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985 \
+    --hash=sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537 \
+    --hash=sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238 \
+    --hash=sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f \
+    --hash=sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d \
+    --hash=sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796 \
+    --hash=sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a \
+    --hash=sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143 \
+    --hash=sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8 \
+    --hash=sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c \
+    --hash=sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5 \
+    --hash=sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5 \
+    --hash=sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711 \
+    --hash=sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4 \
+    --hash=sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6 \
+    --hash=sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c \
+    --hash=sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7 \
+    --hash=sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4 \
+    --hash=sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b \
+    --hash=sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae \
+    --hash=sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12 \
+    --hash=sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c \
+    --hash=sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae \
+    --hash=sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8 \
+    --hash=sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887 \
+    --hash=sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b \
+    --hash=sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4 \
+    --hash=sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f \
+    --hash=sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5 \
+    --hash=sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33 \
+    --hash=sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519 \
+    --hash=sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561
     # via requests
-flatbuffers==23.5.9 \
-    --hash=sha256:93a506b6ab771c79ce816e7b35a93ed08ec5b4c9edb811101a22c44a4152f018 \
-    --hash=sha256:a02eb8c2d61cba153cd211937de8f8f7764b6a7510971b2c4684ed8b02e6e571
-    # via tensorflow-cpu
-gast==0.4.0 \
-    --hash=sha256:40feb7b8b8434785585ab224d1568b857edb18297e5a3047f1ba012bc83b42c1 \
-    --hash=sha256:b7adcdd5adbebf1adf17378da5ba3f543684dbec47b1cda1f3997e573cd542c4
-    # via tensorflow-cpu
-google-auth==2.18.0 \
-    --hash=sha256:c66b488a8b005b23ccb97b1198b6cece516c91869091ac5b7c267422db2733c7 \
-    --hash=sha256:ef3f3a67fa54d421a1c155864570f9a8de9179cedc937bda496b7a8ca338e936
+cryptography==41.0.7 \
+    --hash=sha256:079b85658ea2f59c4f43b70f8119a52414cdb7be34da5d019a77bf96d473b960 \
+    --hash=sha256:09616eeaef406f99046553b8a40fbf8b1e70795a91885ba4c96a70793de5504a \
+    --hash=sha256:13f93ce9bea8016c253b34afc6bd6a75993e5c40672ed5405a9c832f0d4a00bc \
+    --hash=sha256:37a138589b12069efb424220bf78eac59ca68b95696fc622b6ccc1c0a197204a \
+    --hash=sha256:3c78451b78313fa81607fa1b3f1ae0a5ddd8014c38a02d9db0616133987b9cdf \
+    --hash=sha256:43f2552a2378b44869fe8827aa19e69512e3245a219104438692385b0ee119d1 \
+    --hash=sha256:48a0476626da912a44cc078f9893f292f0b3e4c739caf289268168d8f4702a39 \
+    --hash=sha256:49f0805fc0b2ac8d4882dd52f4a3b935b210935d500b6b805f321addc8177406 \
+    --hash=sha256:5429ec739a29df2e29e15d082f1d9ad683701f0ec7709ca479b3ff2708dae65a \
+    --hash=sha256:5a1b41bc97f1ad230a41657d9155113c7521953869ae57ac39ac7f1bb471469a \
+    --hash=sha256:68a2dec79deebc5d26d617bfdf6e8aab065a4f34934b22d3b5010df3ba36612c \
+    --hash=sha256:7a698cb1dac82c35fcf8fe3417a3aaba97de16a01ac914b89a0889d364d2f6be \
+    --hash=sha256:841df4caa01008bad253bce2a6f7b47f86dc9f08df4b433c404def869f590a15 \
+    --hash=sha256:90452ba79b8788fa380dfb587cca692976ef4e757b194b093d845e8d99f612f2 \
+    --hash=sha256:928258ba5d6f8ae644e764d0f996d61a8777559f72dfeb2eea7e2fe0ad6e782d \
+    --hash=sha256:af03b32695b24d85a75d40e1ba39ffe7db7ffcb099fe507b39fd41a565f1b157 \
+    --hash=sha256:b640981bf64a3e978a56167594a0e97db71c89a479da8e175d8bb5be5178c003 \
+    --hash=sha256:c5ca78485a255e03c32b513f8c2bc39fedb7f5c5f8535545bdc223a03b24f248 \
+    --hash=sha256:c7f3201ec47d5207841402594f1d7950879ef890c0c495052fa62f58283fde1a \
+    --hash=sha256:d5ec85080cce7b0513cfd233914eb8b7bbd0633f1d1703aa28d1dd5a72f678ec \
+    --hash=sha256:d6c391c021ab1f7a82da5d8d0b3cee2f4b2c455ec86c8aebbc84837a631ff309 \
+    --hash=sha256:e3114da6d7f95d2dee7d3f4eec16dacff819740bbab931aff8648cb13c5ff5e7 \
+    --hash=sha256:f983596065a18a2183e7f79ab3fd4c475205b839e02cbc0efbbf9666c4b3083d
+    # via secretstorage
+docutils==0.20.1 \
+    --hash=sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6 \
+    --hash=sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b
+    # via readme-renderer
+flatbuffers==23.5.26 \
+    --hash=sha256:9ea1144cac05ce5d86e2859f431c6cd5e66cd9c78c558317c7955fb8d4c78d89 \
+    --hash=sha256:c0ff356da363087b915fde4b8b45bdda73432fc17cddb3c8157472eab1422ad1
+    # via tensorflow
+gast==0.5.4 \
+    --hash=sha256:6fc4fa5fa10b72fb8aab4ae58bcb023058386e67b6fa2e3e34cec5c769360316 \
+    --hash=sha256:9c270fe5f4b130969b54174de7db4e764b09b4f7f67ccfc32480e29f78348d97
+    # via tensorflow
+google-auth==2.26.2 \
+    --hash=sha256:3f445c8ce9b61ed6459aad86d8ccdba4a9afed841b2d1451a11ef4db08957424 \
+    --hash=sha256:97327dbbf58cccb58fc5a1712bba403ae76668e64814eb30f7316f7e27126b81
     # via
     #   google-auth-oauthlib
     #   tensorboard
-google-auth-oauthlib==1.0.0 \
-    --hash=sha256:95880ca704928c300f48194d1770cf5b1462835b6e49db61445a520f793fd5fb \
-    --hash=sha256:e375064964820b47221a7e1b7ee1fd77051b6323c3f9e3e19785f78ab67ecfc5
+google-auth-oauthlib==1.2.0 \
+    --hash=sha256:292d2d3783349f2b0734a0a0207b1e1e322ac193c2c09d8f7c613fb7cc501ea8 \
+    --hash=sha256:297c1ce4cb13a99b5834c74a1fe03252e1e499716718b190f56bcb9c4abc4faf
     # via tensorboard
 google-pasta==0.2.0 \
     --hash=sha256:4612951da876b1a10fe3960d7226f0c7682cf901e16ac06e473b267a5afa8954 \
     --hash=sha256:b32482794a366b5366a32c92a9a9201b107821889935a02b3e51f6b432ea84ed \
     --hash=sha256:c9f2c8dfc8f96d0d5808299920721be30c9eec37f2389f28904f454565c8a16e
-    # via tensorflow-cpu
-grpcio==1.54.0 \
-    --hash=sha256:02000b005bc8b72ff50c477b6431e8886b29961159e8b8d03c00b3dd9139baed \
-    --hash=sha256:031bbd26656e0739e4b2c81c172155fb26e274b8d0312d67aefc730bcba915b6 \
-    --hash=sha256:1209d6b002b26e939e4c8ea37a3d5b4028eb9555394ea69fb1adbd4b61a10bb8 \
-    --hash=sha256:125ed35aa3868efa82eabffece6264bf638cfdc9f0cd58ddb17936684aafd0f8 \
-    --hash=sha256:1382bc499af92901c2240c4d540c74eae8a671e4fe9839bfeefdfcc3a106b5e2 \
-    --hash=sha256:16bca8092dd994f2864fdab278ae052fad4913f36f35238b2dd11af2d55a87db \
-    --hash=sha256:1c59d899ee7160638613a452f9a4931de22623e7ba17897d8e3e348c2e9d8d0b \
-    --hash=sha256:1d109df30641d050e009105f9c9ca5a35d01e34d2ee2a4e9c0984d392fd6d704 \
-    --hash=sha256:1fa7d6ddd33abbd3c8b3d7d07c56c40ea3d1891ce3cd2aa9fa73105ed5331866 \
-    --hash=sha256:21c4a1aae861748d6393a3ff7867473996c139a77f90326d9f4104bebb22d8b8 \
-    --hash=sha256:224166f06ccdaf884bf35690bf4272997c1405de3035d61384ccb5b25a4c1ca8 \
-    --hash=sha256:2262bd3512ba9e9f0e91d287393df6f33c18999317de45629b7bd46c40f16ba9 \
-    --hash=sha256:2585b3c294631a39b33f9f967a59b0fad23b1a71a212eba6bc1e3ca6e6eec9ee \
-    --hash=sha256:27fb030a4589d2536daec5ff5ba2a128f4f155149efab578fe2de2cb21596d3d \
-    --hash=sha256:30fbbce11ffeb4f9f91c13fe04899aaf3e9a81708bedf267bf447596b95df26b \
-    --hash=sha256:3930669c9e6f08a2eed824738c3d5699d11cd47a0ecc13b68ed11595710b1133 \
-    --hash=sha256:3b170e441e91e4f321e46d3cc95a01cb307a4596da54aca59eb78ab0fc03754d \
-    --hash=sha256:3db71c6f1ab688d8dfc102271cedc9828beac335a3a4372ec54b8bf11b43fd29 \
-    --hash=sha256:48cb7af77238ba16c77879009003f6b22c23425e5ee59cb2c4c103ec040638a5 \
-    --hash=sha256:49eace8ea55fbc42c733defbda1e4feb6d3844ecd875b01bb8b923709e0f5ec8 \
-    --hash=sha256:533eaf5b2a79a3c6f35cbd6a095ae99cac7f4f9c0e08bdcf86c130efd3c32adf \
-    --hash=sha256:5942a3e05630e1ef5b7b5752e5da6582460a2e4431dae603de89fc45f9ec5aa9 \
-    --hash=sha256:62117486460c83acd3b5d85c12edd5fe20a374630475388cfc89829831d3eb79 \
-    --hash=sha256:650f5f2c9ab1275b4006707411bb6d6bc927886874a287661c3c6f332d4c068b \
-    --hash=sha256:6dc1e2c9ac292c9a484ef900c568ccb2d6b4dfe26dfa0163d5bc815bb836c78d \
-    --hash=sha256:73c238ef6e4b64272df7eec976bb016c73d3ab5a6c7e9cd906ab700523d312f3 \
-    --hash=sha256:775a2f70501370e5ba54e1ee3464413bff9bd85bd9a0b25c989698c44a6fb52f \
-    --hash=sha256:860fcd6db7dce80d0a673a1cc898ce6bc3d4783d195bbe0e911bf8a62c93ff3f \
-    --hash=sha256:87f47bf9520bba4083d65ab911f8f4c0ac3efa8241993edd74c8dd08ae87552f \
-    --hash=sha256:960b176e0bb2b4afeaa1cd2002db1e82ae54c9b6e27ea93570a42316524e77cf \
-    --hash=sha256:a7caf553ccaf715ec05b28c9b2ab2ee3fdb4036626d779aa09cf7cbf54b71445 \
-    --hash=sha256:a947d5298a0bbdd4d15671024bf33e2b7da79a70de600ed29ba7e0fef0539ebb \
-    --hash=sha256:a97b0d01ae595c997c1d9d8249e2d2da829c2d8a4bdc29bb8f76c11a94915c9a \
-    --hash=sha256:b7655f809e3420f80ce3bf89737169a9dce73238af594049754a1128132c0da4 \
-    --hash=sha256:c33744d0d1a7322da445c0fe726ea6d4e3ef2dfb0539eadf23dce366f52f546c \
-    --hash=sha256:c55a9cf5cba80fb88c850915c865b8ed78d5e46e1f2ec1b27692f3eaaf0dca7e \
-    --hash=sha256:d2f62fb1c914a038921677cfa536d645cb80e3dd07dc4859a3c92d75407b90a5 \
-    --hash=sha256:d8ae6e0df3a608e99ee1acafaafd7db0830106394d54571c1ece57f650124ce9 \
-    --hash=sha256:e355ee9da9c1c03f174efea59292b17a95e0b7b4d7d2a389265f731a9887d5a9 \
-    --hash=sha256:e3e526062c690517b42bba66ffe38aaf8bc99a180a78212e7b22baa86902f690 \
-    --hash=sha256:eb0807323572642ab73fd86fe53d88d843ce617dd1ddf430351ad0759809a0ae \
-    --hash=sha256:ebff0738be0499d7db74d20dca9f22a7b27deae31e1bf92ea44924fd69eb6251 \
-    --hash=sha256:ed36e854449ff6c2f8ee145f94851fe171298e1e793f44d4f672c4a0d78064e7 \
-    --hash=sha256:ed3d458ded32ff3a58f157b60cc140c88f7ac8c506a1c567b2a9ee8a2fd2ce54 \
-    --hash=sha256:f4a7dca8ccd8023d916b900aa3c626f1bd181bd5b70159479b142f957ff420e4
+    # via tensorflow
+grpcio==1.60.0 \
+    --hash=sha256:073f959c6f570797272f4ee9464a9997eaf1e98c27cb680225b82b53390d61e6 \
+    --hash=sha256:0fd3b3968ffe7643144580f260f04d39d869fcc2cddb745deef078b09fd2b328 \
+    --hash=sha256:1434ca77d6fed4ea312901122dc8da6c4389738bf5788f43efb19a838ac03ead \
+    --hash=sha256:1c30bb23a41df95109db130a6cc1b974844300ae2e5d68dd4947aacba5985aa5 \
+    --hash=sha256:20e7a4f7ded59097c84059d28230907cd97130fa74f4a8bfd1d8e5ba18c81491 \
+    --hash=sha256:2199165a1affb666aa24adf0c97436686d0a61bc5fc113c037701fb7c7fceb96 \
+    --hash=sha256:297eef542156d6b15174a1231c2493ea9ea54af8d016b8ca7d5d9cc65cfcc444 \
+    --hash=sha256:2aef56e85901c2397bd557c5ba514f84de1f0ae5dd132f5d5fed042858115951 \
+    --hash=sha256:30943b9530fe3620e3b195c03130396cd0ee3a0d10a66c1bee715d1819001eaf \
+    --hash=sha256:3b36a2c6d4920ba88fa98075fdd58ff94ebeb8acc1215ae07d01a418af4c0253 \
+    --hash=sha256:428d699c8553c27e98f4d29fdc0f0edc50e9a8a7590bfd294d2edb0da7be3629 \
+    --hash=sha256:43e636dc2ce9ece583b3e2ca41df5c983f4302eabc6d5f9cd04f0562ee8ec1ae \
+    --hash=sha256:452ca5b4afed30e7274445dd9b441a35ece656ec1600b77fff8c216fdf07df43 \
+    --hash=sha256:467a7d31554892eed2aa6c2d47ded1079fc40ea0b9601d9f79204afa8902274b \
+    --hash=sha256:4b44d7e39964e808b071714666a812049765b26b3ea48c4434a3b317bac82f14 \
+    --hash=sha256:4c86343cf9ff7b2514dd229bdd88ebba760bd8973dac192ae687ff75e39ebfab \
+    --hash=sha256:5208a57eae445ae84a219dfd8b56e04313445d146873117b5fa75f3245bc1390 \
+    --hash=sha256:5ff21e000ff2f658430bde5288cb1ac440ff15c0d7d18b5fb222f941b46cb0d2 \
+    --hash=sha256:675997222f2e2f22928fbba640824aebd43791116034f62006e19730715166c0 \
+    --hash=sha256:676e4a44e740deaba0f4d95ba1d8c5c89a2fcc43d02c39f69450b1fa19d39590 \
+    --hash=sha256:6e306b97966369b889985a562ede9d99180def39ad42c8014628dd3cc343f508 \
+    --hash=sha256:6fd9584bf1bccdfff1512719316efa77be235469e1e3295dce64538c4773840b \
+    --hash=sha256:705a68a973c4c76db5d369ed573fec3367d7d196673fa86614b33d8c8e9ebb08 \
+    --hash=sha256:74d7d9fa97809c5b892449b28a65ec2bfa458a4735ddad46074f9f7d9550ad13 \
+    --hash=sha256:77c8a317f0fd5a0a2be8ed5cbe5341537d5c00bb79b3bb27ba7c5378ba77dbca \
+    --hash=sha256:79a050889eb8d57a93ed21d9585bb63fca881666fc709f5d9f7f9372f5e7fd03 \
+    --hash=sha256:7db16dd4ea1b05ada504f08d0dca1cd9b926bed3770f50e715d087c6f00ad748 \
+    --hash=sha256:83f2292ae292ed5a47cdcb9821039ca8e88902923198f2193f13959360c01860 \
+    --hash=sha256:87c9224acba0ad8bacddf427a1c2772e17ce50b3042a789547af27099c5f751d \
+    --hash=sha256:8a97a681e82bc11a42d4372fe57898d270a2707f36c45c6676e49ce0d5c41353 \
+    --hash=sha256:9073513ec380434eb8d21970e1ab3161041de121f4018bbed3146839451a6d8e \
+    --hash=sha256:90bdd76b3f04bdb21de5398b8a7c629676c81dfac290f5f19883857e9371d28c \
+    --hash=sha256:91229d7203f1ef0ab420c9b53fe2ca5c1fbeb34f69b3bc1b5089466237a4a134 \
+    --hash=sha256:92f88ca1b956eb8427a11bb8b4a0c0b2b03377235fc5102cb05e533b8693a415 \
+    --hash=sha256:95ae3e8e2c1b9bf671817f86f155c5da7d49a2289c5cf27a319458c3e025c320 \
+    --hash=sha256:9e30be89a75ee66aec7f9e60086fadb37ff8c0ba49a022887c28c134341f7179 \
+    --hash=sha256:a48edde788b99214613e440fce495bbe2b1e142a7f214cce9e0832146c41e324 \
+    --hash=sha256:a7152fa6e597c20cb97923407cf0934e14224af42c2b8d915f48bc3ad2d9ac18 \
+    --hash=sha256:a9c7b71211f066908e518a2ef7a5e211670761651039f0d6a80d8d40054047df \
+    --hash=sha256:b0571a5aef36ba9177e262dc88a9240c866d903a62799e44fd4aae3f9a2ec17e \
+    --hash=sha256:b0fb2d4801546598ac5cd18e3ec79c1a9af8b8f2a86283c55a5337c5aeca4b1b \
+    --hash=sha256:b10241250cb77657ab315270b064a6c7f1add58af94befa20687e7c8d8603ae6 \
+    --hash=sha256:b87efe4a380887425bb15f220079aa8336276398dc33fce38c64d278164f963d \
+    --hash=sha256:b98f43fcdb16172dec5f4b49f2fece4b16a99fd284d81c6bbac1b3b69fcbe0ff \
+    --hash=sha256:c193109ca4070cdcaa6eff00fdb5a56233dc7610216d58fb81638f89f02e4968 \
+    --hash=sha256:c826f93050c73e7769806f92e601e0efdb83ec8d7c76ddf45d514fee54e8e619 \
+    --hash=sha256:d020cfa595d1f8f5c6b343530cd3ca16ae5aefdd1e832b777f9f0eb105f5b139 \
+    --hash=sha256:d6a478581b1a1a8fdf3318ecb5f4d0cda41cacdffe2b527c23707c9c1b8fdb55 \
+    --hash=sha256:de2ad69c9a094bf37c1102b5744c9aec6cf74d2b635558b779085d0263166454 \
+    --hash=sha256:e278eafb406f7e1b1b637c2cf51d3ad45883bb5bd1ca56bc05e4fc135dfdaa65 \
+    --hash=sha256:e381fe0c2aa6c03b056ad8f52f8efca7be29fb4d9ae2f8873520843b6039612a \
+    --hash=sha256:e61e76020e0c332a98290323ecfec721c9544f5b739fab925b6e8cbe1944cf19 \
+    --hash=sha256:f897c3b127532e6befdcf961c415c97f320d45614daf84deba0a54e64ea2457b \
+    --hash=sha256:fb464479934778d7cc5baf463d959d361954d6533ad34c3a4f1d267e86ee25fd
     # via
     #   tensorboard
-    #   tensorflow-cpu
-h5py==3.8.0 \
-    --hash=sha256:03890b1c123d024fb0239a3279737d5432498c1901c354f8b10d8221d1d16235 \
-    --hash=sha256:0fef76e10b9216657fa37e7edff6d8be0709b25bd5066474c229b56cf0098df9 \
-    --hash=sha256:26ffc344ec9984d2cd3ca0265007299a8bac8d85c1ad48f4639d8d3aed2af171 \
-    --hash=sha256:290e00fa2de74a10688d1bac98d5a9cdd43f14f58e562c580b5b3dfbd358ecae \
-    --hash=sha256:33b15aae79e9147aebe1d0e54099cbcde8d65e3e227cd5b59e49b1272aa0e09d \
-    --hash=sha256:36761693efbe53df179627a775476dcbc37727d6e920958277a7efbc18f1fb73 \
-    --hash=sha256:377865821fe80ad984d003723d6f8890bd54ceeb5981b43c0313b9df95411b30 \
-    --hash=sha256:49bc857635f935fa30e92e61ac1e87496df8f260a6945a3235e43a9890426866 \
-    --hash=sha256:4a506fc223def428f4329e7e1f9fe1c8c593eab226e7c0942c8d75308ad49950 \
-    --hash=sha256:533d7dad466ddb7e3b30af274b630eb7c1a6e4ddf01d1c373a0334dc2152110a \
-    --hash=sha256:5fd2252d1fc364ba0e93dd0b7089f4906b66805cb4e6aca7fa8874ac08649647 \
-    --hash=sha256:6fead82f0c4000cf38d53f9c030780d81bfa0220218aee13b90b7701c937d95f \
-    --hash=sha256:7f3350fc0a8407d668b13247861c2acd23f7f5fe7d060a3ad9b0820f5fcbcae0 \
-    --hash=sha256:8f55d9c6c84d7d09c79fb85979e97b81ec6071cc776a97eb6b96f8f6ec767323 \
-    --hash=sha256:98a240cd4c1bfd568aaa52ec42d263131a2582dab82d74d3d42a0d954cac12be \
-    --hash=sha256:9f6f6ffadd6bfa9b2c5b334805eb4b19ca0a5620433659d8f7fb86692c40a359 \
-    --hash=sha256:b685453e538b2b5934c58a644ac3f3b3d0cec1a01b6fb26d57388e9f9b674ad0 \
-    --hash=sha256:b7865de06779b14d98068da387333ad9bf2756b5b579cc887fac169bc08f87c3 \
-    --hash=sha256:bacaa1c16810dd2b3e4417f8e730971b7c4d53d234de61fe4a918db78e80e1e4 \
-    --hash=sha256:bae730580ae928de409d63cbe4fdca4c82c3ad2bed30511d19d34e995d63c77e \
-    --hash=sha256:c3389b63222b1c7a158bb7fe69d11ca00066740ec5574596d47a2fe5317f563a \
-    --hash=sha256:c873ba9fd4fa875ad62ce0e4891725e257a8fe7f5abdbc17e51a5d54819be55c \
-    --hash=sha256:db03e3f2c716205fbdabb34d0848459840585225eb97b4f08998c743821ca323 \
-    --hash=sha256:f47f757d1b76f0ecb8aa0508ec8d1b390df67a8b67ee2515dc1b046f3a1596ea \
-    --hash=sha256:f891b17e3a3e974e93f9e34e7cca9f530806543571ce078998676a555837d91d
-    # via tensorflow-cpu
-idna==3.4 \
-    --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \
-    --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2
-    # via requests
-jax==0.4.9 \
-    --hash=sha256:1ed135cd08f48e4baf10f6eafdb4a4cdae781f9052b5838c09c91a9f4fa75f09
-    # via tensorflow-cpu
-keras==2.12.0 \
-    --hash=sha256:35c39534011e909645fb93515452e98e1a0ce23727b55d4918b9c58b2308c15e
-    # via tensorflow-cpu
-libclang==16.0.0 \
-    --hash=sha256:2adce42ae652f312245b8f4eda6f30b4076fb61f7619f2dfd0a0c31dee4c32b9 \
-    --hash=sha256:65258a6bb3e7dc31dc9b26f8d42f53c9d3b959643ade291fcd1aef4855303ca6 \
-    --hash=sha256:7b6686b67a0daa84b4c614bcc119578329fc4fbb52b919565b7376b507c4793b \
-    --hash=sha256:a043138caaf2cb076ebb060c6281ec95612926645d425c691991fc9df00e8a24 \
-    --hash=sha256:af55a4aa86fdfe6b2ec68bc8cfe5fdac6c448d591ca7648be86ca17099b41ca8 \
-    --hash=sha256:bf4628fc4da7a1dd06a244f9b8e121c5ec68076a763c59d6b13cbb103acc935b \
-    --hash=sha256:eb59652cb0559c0e71784ff4c8ba24c14644becc907b1446563ecfaa622d523b \
-    --hash=sha256:ee20bf93e3dd330f71fc50cdbf13b92ced0aec8e540be64251db53502a9b33f7
-    # via tensorflow-cpu
-mako==1.2.4 \
-    --hash=sha256:c97c79c018b9165ac9922ae4f32da095ffd3c4e6872b45eded42926deea46818 \
-    --hash=sha256:d60a3903dc3bb01a18ad6a89cdbe2e4eadc69c0bc8ef1e3773ba53d44c3f7a34
+    #   tensorflow
+h5py==3.10.0 \
+    --hash=sha256:012ab448590e3c4f5a8dd0f3533255bc57f80629bf7c5054cf4c87b30085063c \
+    --hash=sha256:212bb997a91e6a895ce5e2f365ba764debeaef5d2dca5c6fb7098d66607adf99 \
+    --hash=sha256:2381e98af081b6df7f6db300cd88f88e740649d77736e4b53db522d8874bf2dc \
+    --hash=sha256:2c8e4fda19eb769e9a678592e67eaec3a2f069f7570c82d2da909c077aa94339 \
+    --hash=sha256:3074ec45d3dc6e178c6f96834cf8108bf4a60ccb5ab044e16909580352010a97 \
+    --hash=sha256:3c97d03f87f215e7759a354460fb4b0d0f27001450b18b23e556e7856a0b21c3 \
+    --hash=sha256:43a61b2c2ad65b1fabc28802d133eed34debcc2c8b420cb213d3d4ef4d3e2229 \
+    --hash=sha256:492305a074327e8d2513011fa9fffeb54ecb28a04ca4c4227d7e1e9616d35641 \
+    --hash=sha256:5dfc65ac21fa2f630323c92453cadbe8d4f504726ec42f6a56cf80c2f90d6c52 \
+    --hash=sha256:667fe23ab33d5a8a6b77970b229e14ae3bb84e4ea3382cc08567a02e1499eedd \
+    --hash=sha256:6c013d2e79c00f28ffd0cc24e68665ea03ae9069e167087b2adb5727d2736a52 \
+    --hash=sha256:781a24263c1270a62cd67be59f293e62b76acfcc207afa6384961762bb88ea03 \
+    --hash=sha256:86df4c2de68257b8539a18646ceccdcf2c1ce6b1768ada16c8dcfb489eafae20 \
+    --hash=sha256:90286b79abd085e4e65e07c1bd7ee65a0f15818ea107f44b175d2dfe1a4674b7 \
+    --hash=sha256:92273ce69ae4983dadb898fd4d3bea5eb90820df953b401282ee69ad648df684 \
+    --hash=sha256:93dd840bd675787fc0b016f7a05fc6efe37312a08849d9dd4053fd0377b1357f \
+    --hash=sha256:9450464b458cca2c86252b624279115dcaa7260a40d3cb1594bf2b410a2bd1a3 \
+    --hash=sha256:ae2f0201c950059676455daf92700eeb57dcf5caaf71b9e1328e6e6593601770 \
+    --hash=sha256:aece0e2e1ed2aab076c41802e50a0c3e5ef8816d60ece39107d68717d4559824 \
+    --hash=sha256:b963fb772964fc1d1563c57e4e2e874022ce11f75ddc6df1a626f42bd49ab99f \
+    --hash=sha256:ba9ab36be991119a3ff32d0c7cbe5faf9b8d2375b5278b2aea64effbeba66039 \
+    --hash=sha256:d4682b94fd36ab217352be438abd44c8f357c5449b8995e63886b431d260f3d3 \
+    --hash=sha256:d93adc48ceeb33347eb24a634fb787efc7ae4644e6ea4ba733d099605045c049 \
+    --hash=sha256:f42e6c30698b520f0295d70157c4e202a9e402406f50dc08f5a7bc416b24e52d \
+    --hash=sha256:fd6f6d1384a9f491732cee233b99cd4bfd6e838a8815cc86722f9d2ee64032af
+    # via tensorflow
+hexdump==3.3 \
+    --hash=sha256:d781a43b0c16ace3f9366aade73e8ad3a7bd5137d58f0b45ab2d3f54876f20db
     # via -r third_party/python_requirements.in
-markdown==3.4.3 \
-    --hash=sha256:065fd4df22da73a625f14890dd77eb8040edcbd68794bcd35943be14490608b2 \
-    --hash=sha256:8bf101198e004dc93e84a12a7395e31aac6a9c9942848ae1d99b9d72cf9b3520
+idna==3.6 \
+    --hash=sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca \
+    --hash=sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f
+    # via requests
+importlib-metadata==7.0.1 \
+    --hash=sha256:4805911c3a4ec7c3966410053e9ec6a1fecd629117df5adee56dfc9432a1081e \
+    --hash=sha256:f238736bb06590ae52ac1fab06a3a9ef1d8dce2b7a35b5ab329371d6c8f5d2cc
+    # via
+    #   keyring
+    #   twine
+    #   yapf
+jaraco-classes==3.3.0 \
+    --hash=sha256:10afa92b6743f25c0cf5f37c6bb6e18e2c5bb84a16527ccfc0040ea377e7aaeb \
+    --hash=sha256:c063dd08e89217cee02c8d5e5ec560f2c8ce6cdc2fcdc2e68f7b2e5547ed3621
+    # via keyring
+jeepney==0.8.0 \
+    --hash=sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806 \
+    --hash=sha256:c0a454ad016ca575060802ee4d590dd912e35c122fa04e70306de3d076cce755
+    # via
+    #   keyring
+    #   secretstorage
+keras==2.15.0 \
+    --hash=sha256:2dcc6d2e30cf9c951064b63c1f4c404b966c59caf09e01f3549138ec8ee0dd1f \
+    --hash=sha256:81871d298c064dc4ac6b58440fdae67bfcf47c8d7ad28580fab401834c06a575
+    # via tensorflow
+keyring==24.3.0 \
+    --hash=sha256:4446d35d636e6a10b8bce7caa66913dd9eca5fd222ca03a3d42c38608ac30836 \
+    --hash=sha256:e730ecffd309658a08ee82535a3b5ec4b4c8669a9be11efb66249d8e0aeb9a25
+    # via twine
+libclang==16.0.6 \
+    --hash=sha256:1e940048f51d0b0999099a9b78629ab8a64b62af5e9ff1b2b062439c21ee244d \
+    --hash=sha256:4a9acbfd9c135a72f80d5dbff7588dfb0c81458244a89b9e83526e8595880e0a \
+    --hash=sha256:4acdde39dfe410c877b4ccc0d4b57eb952100e4ee26bbdf6cfdb88e2033a7d31 \
+    --hash=sha256:8130482120500476a027171f8f3c8dfc2536b591716eea71fc5da22cae13131b \
+    --hash=sha256:88bc7e7b393c32e41e03ba77ef02fdd647da1f764c2cd028e69e0837080b79f6 \
+    --hash=sha256:9dcdc730939788b8b69ffd6d5d75fe5366e3ee007f1e36a99799ec0b0c001492 \
+    --hash=sha256:d80ed5827736ed5ec2bcedf536720476fd9d4fa4c79ef0cb24aea4c59332f361 \
+    --hash=sha256:da9e47ebc3f0a6d90fb169ef25f9fbcd29b4a4ef97a8b0e3e3a17800af1423f4 \
+    --hash=sha256:daab4a11dae228f1efa9efa3fe638b493b14d8d52c71fb3c7019e2f1df4514c2 \
+    --hash=sha256:e1a5ad1e895e5443e205568c85c04b4608e4e973dae42f4dfd9cb46c81d1486b \
+    --hash=sha256:f04e3060ae1f207f234d0608900c99c50edcb743e5e18276d78da2ddd727d39f
+    # via tensorflow
+mako==1.3.0 \
+    --hash=sha256:57d4e997349f1a92035aa25c17ace371a4213f2ca42f99bee9a602500cfd54d9 \
+    --hash=sha256:e3a9d388fd00e87043edbe8792f45880ac0114e9c4adc69f6e9bfb2c55e3b11b
+    # via -r third_party/python_requirements.in
+markdown==3.5.2 \
+    --hash=sha256:d43323865d89fc0cb9b20c75fc8ad313af307cc087e84b657d9eec768eddeadd \
+    --hash=sha256:e1ac7b3dc550ee80e602e71c1d168002f062e49f1b11e26a36264dafd4df2ef8
     # via tensorboard
-markupsafe==2.1.2 \
-    --hash=sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed \
-    --hash=sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc \
-    --hash=sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2 \
-    --hash=sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460 \
-    --hash=sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7 \
-    --hash=sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0 \
-    --hash=sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1 \
-    --hash=sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa \
-    --hash=sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03 \
-    --hash=sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323 \
-    --hash=sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65 \
-    --hash=sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013 \
-    --hash=sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036 \
-    --hash=sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f \
-    --hash=sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4 \
-    --hash=sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419 \
-    --hash=sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2 \
-    --hash=sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619 \
-    --hash=sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a \
-    --hash=sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a \
-    --hash=sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd \
-    --hash=sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7 \
-    --hash=sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666 \
-    --hash=sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65 \
-    --hash=sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859 \
-    --hash=sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625 \
-    --hash=sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff \
-    --hash=sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156 \
-    --hash=sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd \
-    --hash=sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba \
-    --hash=sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f \
-    --hash=sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1 \
-    --hash=sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094 \
-    --hash=sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a \
-    --hash=sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513 \
-    --hash=sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed \
-    --hash=sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d \
-    --hash=sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3 \
-    --hash=sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147 \
-    --hash=sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c \
-    --hash=sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603 \
-    --hash=sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601 \
-    --hash=sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a \
-    --hash=sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1 \
-    --hash=sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d \
-    --hash=sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3 \
-    --hash=sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54 \
-    --hash=sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2 \
-    --hash=sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6 \
-    --hash=sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58
+markdown-it-py==3.0.0 \
+    --hash=sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1 \
+    --hash=sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb
+    # via rich
+markupsafe==2.1.3 \
+    --hash=sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e \
+    --hash=sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e \
+    --hash=sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431 \
+    --hash=sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686 \
+    --hash=sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c \
+    --hash=sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559 \
+    --hash=sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc \
+    --hash=sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb \
+    --hash=sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939 \
+    --hash=sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c \
+    --hash=sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0 \
+    --hash=sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4 \
+    --hash=sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9 \
+    --hash=sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575 \
+    --hash=sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba \
+    --hash=sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d \
+    --hash=sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd \
+    --hash=sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3 \
+    --hash=sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00 \
+    --hash=sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155 \
+    --hash=sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac \
+    --hash=sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52 \
+    --hash=sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f \
+    --hash=sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8 \
+    --hash=sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b \
+    --hash=sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007 \
+    --hash=sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24 \
+    --hash=sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea \
+    --hash=sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198 \
+    --hash=sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0 \
+    --hash=sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee \
+    --hash=sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be \
+    --hash=sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2 \
+    --hash=sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1 \
+    --hash=sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707 \
+    --hash=sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6 \
+    --hash=sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c \
+    --hash=sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58 \
+    --hash=sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823 \
+    --hash=sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779 \
+    --hash=sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636 \
+    --hash=sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c \
+    --hash=sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad \
+    --hash=sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee \
+    --hash=sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc \
+    --hash=sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2 \
+    --hash=sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48 \
+    --hash=sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7 \
+    --hash=sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e \
+    --hash=sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b \
+    --hash=sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa \
+    --hash=sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5 \
+    --hash=sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e \
+    --hash=sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb \
+    --hash=sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9 \
+    --hash=sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57 \
+    --hash=sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc \
+    --hash=sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc \
+    --hash=sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2 \
+    --hash=sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11
     # via
     #   mako
     #   werkzeug
-ml-dtypes==0.1.0 \
-    --hash=sha256:273c306db846005b83a98c9c7ec3dc8fa20e8f11c3772c8e8c20cc12d8abfd4b \
-    --hash=sha256:2de6c81b0da398d54aabdd7de599f2dfc43e30b65d9fad379a69f4cc4ae165d3 \
-    --hash=sha256:36e8518c8fd2c38729f020125f39ef07b045f5c16d0846320c7252d7773285ee \
-    --hash=sha256:377f2d5cfbf809b59188e0bfda4a0774e658541f575b637fee4850d99c2f9fdc \
-    --hash=sha256:41b6beeaea47e2466b94068664c9a45b2a65dd023aa4e5deeb5a73303661344e \
-    --hash=sha256:77970beeb3cf6ac559c4b6b393f24778a5abd34fafbaad82d5a0d17d0f148936 \
-    --hash=sha256:87aa1cf83d41fed5a40fc27ee57ac4c1bf904e940f082531d3d58f1c318b5928 \
-    --hash=sha256:8c5c9fe086756fbc1bf51296431d64429536093cf6e2ba592e042d7fc07c8514 \
-    --hash=sha256:8de9bbf5bed587a1166699447ea14d1e8fe66d4e812811e37bf2f4d988475476 \
-    --hash=sha256:99fab8262d175c49bf1655c229244f301274e8289449c350ba4d5b95ade07d9a \
-    --hash=sha256:a29fbf128583673eca0f43def1dbe77e02c1e8b8a8331db2877bbb57d091ef11 \
-    --hash=sha256:ad765159ac6c18d5ee7d325fcf34d3106a9d9d7a49713d998f5cfa330a1459b4 \
-    --hash=sha256:b9c5578dffd85637a7dd437192de18bc1a14eb6ba7d53ef40de3f84c51c789e5 \
-    --hash=sha256:c1fc0afe63ce99069f9d7e0693a61cfd0aea90241fc3821af9953d0c11f4048a \
-    --hash=sha256:c9218175b06764b8ddc95cb18d11a6c4b48a4b103a31c9ea2b2c3cd0cfc369f8 \
-    --hash=sha256:dee8ea629b8e3e20c6649852c1b9deacfa13384ab9337f2c9e717e401d102f23 \
-    --hash=sha256:ffb7882dd46399217dc54f37affc899e0a29a4cfb63e5bf733ac0baf4a179c77
-    # via jax
-numpy==1.23.5 \
-    --hash=sha256:01dd17cbb340bf0fc23981e52e1d18a9d4050792e8fb8363cecbf066a84b827d \
-    --hash=sha256:06005a2ef6014e9956c09ba07654f9837d9e26696a0470e42beedadb78c11b07 \
-    --hash=sha256:09b7847f7e83ca37c6e627682f145856de331049013853f344f37b0c9690e3df \
-    --hash=sha256:0aaee12d8883552fadfc41e96b4c82ee7d794949e2a7c3b3a7201e968c7ecab9 \
-    --hash=sha256:0cbe9848fad08baf71de1a39e12d1b6310f1d5b2d0ea4de051058e6e1076852d \
-    --hash=sha256:1b1766d6f397c18153d40015ddfc79ddb715cabadc04d2d228d4e5a8bc4ded1a \
-    --hash=sha256:33161613d2269025873025b33e879825ec7b1d831317e68f4f2f0f84ed14c719 \
-    --hash=sha256:5039f55555e1eab31124a5768898c9e22c25a65c1e0037f4d7c495a45778c9f2 \
-    --hash=sha256:522e26bbf6377e4d76403826ed689c295b0b238f46c28a7251ab94716da0b280 \
-    --hash=sha256:56e454c7833e94ec9769fa0f86e6ff8e42ee38ce0ce1fa4cbb747ea7e06d56aa \
-    --hash=sha256:58f545efd1108e647604a1b5aa809591ccd2540f468a880bedb97247e72db387 \
-    --hash=sha256:5e05b1c973a9f858c74367553e236f287e749465f773328c8ef31abe18f691e1 \
-    --hash=sha256:7903ba8ab592b82014713c491f6c5d3a1cde5b4a3bf116404e08f5b52f6daf43 \
-    --hash=sha256:8969bfd28e85c81f3f94eb4a66bc2cf1dbdc5c18efc320af34bffc54d6b1e38f \
-    --hash=sha256:92c8c1e89a1f5028a4c6d9e3ccbe311b6ba53694811269b992c0b224269e2398 \
-    --hash=sha256:9c88793f78fca17da0145455f0d7826bcb9f37da4764af27ac945488116efe63 \
-    --hash=sha256:a7ac231a08bb37f852849bbb387a20a57574a97cfc7b6cabb488a4fc8be176de \
-    --hash=sha256:abdde9f795cf292fb9651ed48185503a2ff29be87770c3b8e2a14b0cd7aa16f8 \
-    --hash=sha256:af1da88f6bc3d2338ebbf0e22fe487821ea4d8e89053e25fa59d1d79786e7481 \
-    --hash=sha256:b2a9ab7c279c91974f756c84c365a669a887efa287365a8e2c418f8b3ba73fb0 \
-    --hash=sha256:bf837dc63ba5c06dc8797c398db1e223a466c7ece27a1f7b5232ba3466aafe3d \
-    --hash=sha256:ca51fcfcc5f9354c45f400059e88bc09215fb71a48d3768fb80e357f3b457e1e \
-    --hash=sha256:ce571367b6dfe60af04e04a1834ca2dc5f46004ac1cc756fb95319f64c095a96 \
-    --hash=sha256:d208a0f8729f3fb790ed18a003f3a57895b989b40ea4dce4717e9cf4af62c6bb \
-    --hash=sha256:dbee87b469018961d1ad79b1a5d50c0ae850000b639bcb1b694e9981083243b6 \
-    --hash=sha256:e9f4c4e51567b616be64e05d517c79a8a22f3606499941d97bb76f2ca59f982d \
-    --hash=sha256:f063b69b090c9d918f9df0a12116029e274daf0181df392839661c4c7ec9018a \
-    --hash=sha256:f9a909a8bae284d46bbfdefbdd4a262ba19d3bc9921b1e76126b1d21c3c34135
+mdurl==0.1.2 \
+    --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \
+    --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba
+    # via markdown-it-py
+ml-dtypes==0.2.0 \
+    --hash=sha256:022d5a4ee6be14569c2a9d1549e16f1ec87ca949681d0dca59995445d5fcdd5b \
+    --hash=sha256:1749b60348da71fd3c2ab303fdbc1965958dc50775ead41f5669c932a341cafd \
+    --hash=sha256:32107e7fa9f62db9a5281de923861325211dfff87bd23faefb27b303314635ab \
+    --hash=sha256:35b984cddbe8173b545a0e3334fe56ea1a5c3eb67c507f60d0cfde1d3fa8f8c2 \
+    --hash=sha256:36d28b8861a8931695e5a31176cad5ae85f6504906650dea5598fbec06c94606 \
+    --hash=sha256:50845af3e9a601810751b55091dee6c2562403fa1cb4e0123675cf3a4fc2c17a \
+    --hash=sha256:6488eb642acaaf08d8020f6de0a38acee7ac324c1e6e92ee0c0fea42422cb797 \
+    --hash=sha256:75015818a7fccf99a5e8ed18720cb430f3e71a8838388840f4cdf225c036c983 \
+    --hash=sha256:80d304c836d73f10605c58ccf7789c171cc229bfb678748adfb7cea2510dfd0e \
+    --hash=sha256:832a019a1b6db5c4422032ca9940a990fa104eee420f643713241b3a518977fa \
+    --hash=sha256:8faaf0897942c8253dd126662776ba45f0a5861968cf0f06d6d465f8a7bc298a \
+    --hash=sha256:bc29a0524ef5e23a7fbb8d881bdecabeb3fc1d19d9db61785d077a86cb94fab2 \
+    --hash=sha256:df6a76e1c8adf484feb138ed323f9f40a7b6c21788f120f7c78bec20ac37ee81 \
+    --hash=sha256:e70047ec2c83eaee01afdfdabee2c5b0c133804d90d0f7db4dd903360fcc537c \
+    --hash=sha256:e85ba8e24cf48d456e564688e981cf379d4c8e644db0a2f719b78de281bac2ca \
+    --hash=sha256:f00c71c8c63e03aff313bc6a7aeaac9a4f1483a921a6ffefa6d4404efd1af3d0 \
+    --hash=sha256:f08c391c2794f2aad358e6f4c70785a9a7b1df980ef4c232b3ccd4f6fe39f719
+    # via tensorflow
+more-itertools==10.2.0 \
+    --hash=sha256:686b06abe565edfab151cb8fd385a05651e1fdf8f0a14191e4439283421f8684 \
+    --hash=sha256:8fccb480c43d3e99a00087634c06dd02b0d50fbf088b380de5a41a015ec239e1
+    # via jaraco-classes
+nh3==0.2.15 \
+    --hash=sha256:0d02d0ff79dfd8208ed25a39c12cbda092388fff7f1662466e27d97ad011b770 \
+    --hash=sha256:3277481293b868b2715907310c7be0f1b9d10491d5adf9fce11756a97e97eddf \
+    --hash=sha256:3b803a5875e7234907f7d64777dfde2b93db992376f3d6d7af7f3bc347deb305 \
+    --hash=sha256:427fecbb1031db085eaac9931362adf4a796428ef0163070c484b5a768e71601 \
+    --hash=sha256:5f0d77272ce6d34db6c87b4f894f037d55183d9518f948bba236fe81e2bb4e28 \
+    --hash=sha256:60684857cfa8fdbb74daa867e5cad3f0c9789415aba660614fe16cd66cbb9ec7 \
+    --hash=sha256:6f42f99f0cf6312e470b6c09e04da31f9abaadcd3eb591d7d1a88ea931dca7f3 \
+    --hash=sha256:86e447a63ca0b16318deb62498db4f76fc60699ce0a1231262880b38b6cff911 \
+    --hash=sha256:8d595df02413aa38586c24811237e95937ef18304e108b7e92c890a06793e3bf \
+    --hash=sha256:9c0d415f6b7f2338f93035bba5c0d8c1b464e538bfbb1d598acd47d7969284f0 \
+    --hash=sha256:a5167a6403d19c515217b6bcaaa9be420974a6ac30e0da9e84d4fc67a5d474c5 \
+    --hash=sha256:ac19c0d68cd42ecd7ead91a3a032fdfff23d29302dbb1311e641a130dfefba97 \
+    --hash=sha256:b1e97221cedaf15a54f5243f2c5894bb12ca951ae4ddfd02a9d4ea9df9e1a29d \
+    --hash=sha256:bc2d086fb540d0fa52ce35afaded4ea526b8fc4d3339f783db55c95de40ef02e \
+    --hash=sha256:d1e30ff2d8d58fb2a14961f7aac1bbb1c51f9bdd7da727be35c63826060b0bf3 \
+    --hash=sha256:f3b53ba93bb7725acab1e030bc2ecd012a817040fd7851b332f86e2f9bb98dc6
+    # via readme-renderer
+numpy==1.26.3 \
+    --hash=sha256:02f98011ba4ab17f46f80f7f8f1c291ee7d855fcef0a5a98db80767a468c85cd \
+    --hash=sha256:0b7e807d6888da0db6e7e75838444d62495e2b588b99e90dd80c3459594e857b \
+    --hash=sha256:12c70ac274b32bc00c7f61b515126c9205323703abb99cd41836e8125ea0043e \
+    --hash=sha256:1666f634cb3c80ccbd77ec97bc17337718f56d6658acf5d3b906ca03e90ce87f \
+    --hash=sha256:18c3319a7d39b2c6a9e3bb75aab2304ab79a811ac0168a671a62e6346c29b03f \
+    --hash=sha256:211ddd1e94817ed2d175b60b6374120244a4dd2287f4ece45d49228b4d529178 \
+    --hash=sha256:21a9484e75ad018974a2fdaa216524d64ed4212e418e0a551a2d83403b0531d3 \
+    --hash=sha256:39763aee6dfdd4878032361b30b2b12593fb445ddb66bbac802e2113eb8a6ac4 \
+    --hash=sha256:3c67423b3703f8fbd90f5adaa37f85b5794d3366948efe9a5190a5f3a83fc34e \
+    --hash=sha256:46f47ee566d98849323f01b349d58f2557f02167ee301e5e28809a8c0e27a2d0 \
+    --hash=sha256:51c7f1b344f302067b02e0f5b5d2daa9ed4a721cf49f070280ac202738ea7f00 \
+    --hash=sha256:5f24750ef94d56ce6e33e4019a8a4d68cfdb1ef661a52cdaee628a56d2437419 \
+    --hash=sha256:697df43e2b6310ecc9d95f05d5ef20eacc09c7c4ecc9da3f235d39e71b7da1e4 \
+    --hash=sha256:6d45b3ec2faed4baca41c76617fcdcfa4f684ff7a151ce6fc78ad3b6e85af0a6 \
+    --hash=sha256:77810ef29e0fb1d289d225cabb9ee6cf4d11978a00bb99f7f8ec2132a84e0166 \
+    --hash=sha256:7ca4f24341df071877849eb2034948459ce3a07915c2734f1abb4018d9c49d7b \
+    --hash=sha256:7f784e13e598e9594750b2ef6729bcd5a47f6cfe4a12cca13def35e06d8163e3 \
+    --hash=sha256:806dd64230dbbfaca8a27faa64e2f414bf1c6622ab78cc4264f7f5f028fee3bf \
+    --hash=sha256:867e3644e208c8922a3be26fc6bbf112a035f50f0a86497f98f228c50c607bb2 \
+    --hash=sha256:8c66d6fec467e8c0f975818c1796d25c53521124b7cfb760114be0abad53a0a2 \
+    --hash=sha256:8ed07a90f5450d99dad60d3799f9c03c6566709bd53b497eb9ccad9a55867f36 \
+    --hash=sha256:9bc6d1a7f8cedd519c4b7b1156d98e051b726bf160715b769106661d567b3f03 \
+    --hash=sha256:9e1591f6ae98bcfac2a4bbf9221c0b92ab49762228f38287f6eeb5f3f55905ce \
+    --hash=sha256:9e87562b91f68dd8b1c39149d0323b42e0082db7ddb8e934ab4c292094d575d6 \
+    --hash=sha256:a7081fd19a6d573e1a05e600c82a1c421011db7935ed0d5c483e9dd96b99cf13 \
+    --hash=sha256:a8474703bffc65ca15853d5fd4d06b18138ae90c17c8d12169968e998e448bb5 \
+    --hash=sha256:af36e0aa45e25c9f57bf684b1175e59ea05d9a7d3e8e87b7ae1a1da246f2767e \
+    --hash=sha256:b1240f767f69d7c4c8a29adde2310b871153df9b26b5cb2b54a561ac85146485 \
+    --hash=sha256:b4d362e17bcb0011738c2d83e0a65ea8ce627057b2fdda37678f4374a382a137 \
+    --hash=sha256:b831295e5472954104ecb46cd98c08b98b49c69fdb7040483aff799a755a7374 \
+    --hash=sha256:b8c275f0ae90069496068c714387b4a0eba5d531aace269559ff2b43655edd58 \
+    --hash=sha256:bdd2b45bf079d9ad90377048e2747a0c82351989a2165821f0c96831b4a2a54b \
+    --hash=sha256:cc0743f0302b94f397a4a65a660d4cd24267439eb16493fb3caad2e4389bccbb \
+    --hash=sha256:da4b0c6c699a0ad73c810736303f7fbae483bcb012e38d7eb06a5e3b432c981b \
+    --hash=sha256:f25e2811a9c932e43943a2615e65fc487a0b6b49218899e62e426e7f0a57eeda \
+    --hash=sha256:f73497e8c38295aaa4741bdfa4fda1a5aedda5473074369eca10626835445511
     # via
     #   -r third_party/python_requirements.in
     #   h5py
-    #   jax
     #   ml-dtypes
     #   opt-einsum
-    #   scipy
     #   tensorboard
-    #   tensorflow-cpu
+    #   tensorflow
 oauthlib==3.2.2 \
     --hash=sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca \
     --hash=sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918
@@ -344,102 +522,110 @@
 opt-einsum==3.3.0 \
     --hash=sha256:2455e59e3947d3c275477df7f5205b30635e266fe6dc300e3d9f9646bfcea147 \
     --hash=sha256:59f6475f77bbc37dcf7cd748519c0ec60722e91e63ca114e68821c0c54a46549
-    # via
-    #   jax
-    #   tensorflow-cpu
-packaging==23.1 \
-    --hash=sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61 \
-    --hash=sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f
-    # via tensorflow-cpu
-pillow==9.5.0 \
-    --hash=sha256:07999f5834bdc404c442146942a2ecadd1cb6292f5229f4ed3b31e0a108746b1 \
-    --hash=sha256:0852ddb76d85f127c135b6dd1f0bb88dbb9ee990d2cd9aa9e28526c93e794fba \
-    --hash=sha256:1781a624c229cb35a2ac31cc4a77e28cafc8900733a864870c49bfeedacd106a \
-    --hash=sha256:1e7723bd90ef94eda669a3c2c19d549874dd5badaeefabefd26053304abe5799 \
-    --hash=sha256:229e2c79c00e85989a34b5981a2b67aa079fd08c903f0aaead522a1d68d79e51 \
-    --hash=sha256:22baf0c3cf0c7f26e82d6e1adf118027afb325e703922c8dfc1d5d0156bb2eeb \
-    --hash=sha256:252a03f1bdddce077eff2354c3861bf437c892fb1832f75ce813ee94347aa9b5 \
-    --hash=sha256:2dfaaf10b6172697b9bceb9a3bd7b951819d1ca339a5ef294d1f1ac6d7f63270 \
-    --hash=sha256:322724c0032af6692456cd6ed554bb85f8149214d97398bb80613b04e33769f6 \
-    --hash=sha256:35f6e77122a0c0762268216315bf239cf52b88865bba522999dc38f1c52b9b47 \
-    --hash=sha256:375f6e5ee9620a271acb6820b3d1e94ffa8e741c0601db4c0c4d3cb0a9c224bf \
-    --hash=sha256:3ded42b9ad70e5f1754fb7c2e2d6465a9c842e41d178f262e08b8c85ed8a1d8e \
-    --hash=sha256:432b975c009cf649420615388561c0ce7cc31ce9b2e374db659ee4f7d57a1f8b \
-    --hash=sha256:482877592e927fd263028c105b36272398e3e1be3269efda09f6ba21fd83ec66 \
-    --hash=sha256:489f8389261e5ed43ac8ff7b453162af39c3e8abd730af8363587ba64bb2e865 \
-    --hash=sha256:54f7102ad31a3de5666827526e248c3530b3a33539dbda27c6843d19d72644ec \
-    --hash=sha256:560737e70cb9c6255d6dcba3de6578a9e2ec4b573659943a5e7e4af13f298f5c \
-    --hash=sha256:5671583eab84af046a397d6d0ba25343c00cd50bce03787948e0fff01d4fd9b1 \
-    --hash=sha256:5ba1b81ee69573fe7124881762bb4cd2e4b6ed9dd28c9c60a632902fe8db8b38 \
-    --hash=sha256:5d4ebf8e1db4441a55c509c4baa7a0587a0210f7cd25fcfe74dbbce7a4bd1906 \
-    --hash=sha256:60037a8db8750e474af7ffc9faa9b5859e6c6d0a50e55c45576bf28be7419705 \
-    --hash=sha256:608488bdcbdb4ba7837461442b90ea6f3079397ddc968c31265c1e056964f1ef \
-    --hash=sha256:6608ff3bf781eee0cd14d0901a2b9cc3d3834516532e3bd673a0a204dc8615fc \
-    --hash=sha256:662da1f3f89a302cc22faa9f14a262c2e3951f9dbc9617609a47521c69dd9f8f \
-    --hash=sha256:7002d0797a3e4193c7cdee3198d7c14f92c0836d6b4a3f3046a64bd1ce8df2bf \
-    --hash=sha256:763782b2e03e45e2c77d7779875f4432e25121ef002a41829d8868700d119392 \
-    --hash=sha256:77165c4a5e7d5a284f10a6efaa39a0ae8ba839da344f20b111d62cc932fa4e5d \
-    --hash=sha256:7c9af5a3b406a50e313467e3565fc99929717f780164fe6fbb7704edba0cebbe \
-    --hash=sha256:7ec6f6ce99dab90b52da21cf0dc519e21095e332ff3b399a357c187b1a5eee32 \
-    --hash=sha256:833b86a98e0ede388fa29363159c9b1a294b0905b5128baf01db683672f230f5 \
-    --hash=sha256:84a6f19ce086c1bf894644b43cd129702f781ba5751ca8572f08aa40ef0ab7b7 \
-    --hash=sha256:8507eda3cd0608a1f94f58c64817e83ec12fa93a9436938b191b80d9e4c0fc44 \
-    --hash=sha256:85ec677246533e27770b0de5cf0f9d6e4ec0c212a1f89dfc941b64b21226009d \
-    --hash=sha256:8aca1152d93dcc27dc55395604dcfc55bed5f25ef4c98716a928bacba90d33a3 \
-    --hash=sha256:8d935f924bbab8f0a9a28404422da8af4904e36d5c33fc6f677e4c4485515625 \
-    --hash=sha256:8f36397bf3f7d7c6a3abdea815ecf6fd14e7fcd4418ab24bae01008d8d8ca15e \
-    --hash=sha256:91ec6fe47b5eb5a9968c79ad9ed78c342b1f97a091677ba0e012701add857829 \
-    --hash=sha256:965e4a05ef364e7b973dd17fc765f42233415974d773e82144c9bbaaaea5d089 \
-    --hash=sha256:96e88745a55b88a7c64fa49bceff363a1a27d9a64e04019c2281049444a571e3 \
-    --hash=sha256:99eb6cafb6ba90e436684e08dad8be1637efb71c4f2180ee6b8f940739406e78 \
-    --hash=sha256:9adf58f5d64e474bed00d69bcd86ec4bcaa4123bfa70a65ce72e424bfb88ed96 \
-    --hash=sha256:9b1af95c3a967bf1da94f253e56b6286b50af23392a886720f563c547e48e964 \
-    --hash=sha256:a0aa9417994d91301056f3d0038af1199eb7adc86e646a36b9e050b06f526597 \
-    --hash=sha256:a0f9bb6c80e6efcde93ffc51256d5cfb2155ff8f78292f074f60f9e70b942d99 \
-    --hash=sha256:a127ae76092974abfbfa38ca2d12cbeddcdeac0fb71f9627cc1135bedaf9d51a \
-    --hash=sha256:aaf305d6d40bd9632198c766fb64f0c1a83ca5b667f16c1e79e1661ab5060140 \
-    --hash=sha256:aca1c196f407ec7cf04dcbb15d19a43c507a81f7ffc45b690899d6a76ac9fda7 \
-    --hash=sha256:ace6ca218308447b9077c14ea4ef381ba0b67ee78d64046b3f19cf4e1139ad16 \
-    --hash=sha256:b416f03d37d27290cb93597335a2f85ed446731200705b22bb927405320de903 \
-    --hash=sha256:bf548479d336726d7a0eceb6e767e179fbde37833ae42794602631a070d630f1 \
-    --hash=sha256:c1170d6b195555644f0616fd6ed929dfcf6333b8675fcca044ae5ab110ded296 \
-    --hash=sha256:c380b27d041209b849ed246b111b7c166ba36d7933ec6e41175fd15ab9eb1572 \
-    --hash=sha256:c446d2245ba29820d405315083d55299a796695d747efceb5717a8b450324115 \
-    --hash=sha256:c830a02caeb789633863b466b9de10c015bded434deb3ec87c768e53752ad22a \
-    --hash=sha256:cb841572862f629b99725ebaec3287fc6d275be9b14443ea746c1dd325053cbd \
-    --hash=sha256:cfa4561277f677ecf651e2b22dc43e8f5368b74a25a8f7d1d4a3a243e573f2d4 \
-    --hash=sha256:cfcc2c53c06f2ccb8976fb5c71d448bdd0a07d26d8e07e321c103416444c7ad1 \
-    --hash=sha256:d3c6b54e304c60c4181da1c9dadf83e4a54fd266a99c70ba646a9baa626819eb \
-    --hash=sha256:d3d403753c9d5adc04d4694d35cf0391f0f3d57c8e0030aac09d7678fa8030aa \
-    --hash=sha256:d9c206c29b46cfd343ea7cdfe1232443072bbb270d6a46f59c259460db76779a \
-    --hash=sha256:e49eb4e95ff6fd7c0c402508894b1ef0e01b99a44320ba7d8ecbabefddcc5569 \
-    --hash=sha256:f8286396b351785801a976b1e85ea88e937712ee2c3ac653710a4a57a8da5d9c \
-    --hash=sha256:f8fc330c3370a81bbf3f88557097d1ea26cd8b019d6433aa59f71195f5ddebbf \
-    --hash=sha256:fbd359831c1657d69bb81f0db962905ee05e5e9451913b18b831febfe0519082 \
-    --hash=sha256:fe7e1c262d3392afcf5071df9afa574544f28eac825284596ac6db56e6d11062 \
-    --hash=sha256:fed1e1cf6a42577953abbe8e6cf2fe2f566daebde7c34724ec8803c4c0cda579
+    # via tensorflow
+packaging==23.2 \
+    --hash=sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5 \
+    --hash=sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7
+    # via tensorflow
+pillow==10.2.0 \
+    --hash=sha256:0304004f8067386b477d20a518b50f3fa658a28d44e4116970abfcd94fac34a8 \
+    --hash=sha256:0689b5a8c5288bc0504d9fcee48f61a6a586b9b98514d7d29b840143d6734f39 \
+    --hash=sha256:0eae2073305f451d8ecacb5474997c08569fb4eb4ac231ffa4ad7d342fdc25ac \
+    --hash=sha256:0fb3e7fc88a14eacd303e90481ad983fd5b69c761e9e6ef94c983f91025da869 \
+    --hash=sha256:11fa2e5984b949b0dd6d7a94d967743d87c577ff0b83392f17cb3990d0d2fd6e \
+    --hash=sha256:127cee571038f252a552760076407f9cff79761c3d436a12af6000cd182a9d04 \
+    --hash=sha256:154e939c5f0053a383de4fd3d3da48d9427a7e985f58af8e94d0b3c9fcfcf4f9 \
+    --hash=sha256:15587643b9e5eb26c48e49a7b33659790d28f190fc514a322d55da2fb5c2950e \
+    --hash=sha256:170aeb00224ab3dc54230c797f8404507240dd868cf52066f66a41b33169bdbe \
+    --hash=sha256:1b5e1b74d1bd1b78bc3477528919414874748dd363e6272efd5abf7654e68bef \
+    --hash=sha256:1da3b2703afd040cf65ec97efea81cfba59cdbed9c11d8efc5ab09df9509fc56 \
+    --hash=sha256:1e23412b5c41e58cec602f1135c57dfcf15482013ce6e5f093a86db69646a5aa \
+    --hash=sha256:2247178effb34a77c11c0e8ac355c7a741ceca0a732b27bf11e747bbc950722f \
+    --hash=sha256:257d8788df5ca62c980314053197f4d46eefedf4e6175bc9412f14412ec4ea2f \
+    --hash=sha256:3031709084b6e7852d00479fd1d310b07d0ba82765f973b543c8af5061cf990e \
+    --hash=sha256:322209c642aabdd6207517e9739c704dc9f9db943015535783239022002f054a \
+    --hash=sha256:322bdf3c9b556e9ffb18f93462e5f749d3444ce081290352c6070d014c93feb2 \
+    --hash=sha256:33870dc4653c5017bf4c8873e5488d8f8d5f8935e2f1fb9a2208c47cdd66efd2 \
+    --hash=sha256:35bb52c37f256f662abdfa49d2dfa6ce5d93281d323a9af377a120e89a9eafb5 \
+    --hash=sha256:3c31822339516fb3c82d03f30e22b1d038da87ef27b6a78c9549888f8ceda39a \
+    --hash=sha256:3eedd52442c0a5ff4f887fab0c1c0bb164d8635b32c894bc1faf4c618dd89df2 \
+    --hash=sha256:3ff074fc97dd4e80543a3e91f69d58889baf2002b6be64347ea8cf5533188213 \
+    --hash=sha256:47c0995fc4e7f79b5cfcab1fc437ff2890b770440f7696a3ba065ee0fd496563 \
+    --hash=sha256:49d9ba1ed0ef3e061088cd1e7538a0759aab559e2e0a80a36f9fd9d8c0c21591 \
+    --hash=sha256:51f1a1bffc50e2e9492e87d8e09a17c5eea8409cda8d3f277eb6edc82813c17c \
+    --hash=sha256:52a50aa3fb3acb9cf7213573ef55d31d6eca37f5709c69e6858fe3bc04a5c2a2 \
+    --hash=sha256:54f1852cd531aa981bc0965b7d609f5f6cc8ce8c41b1139f6ed6b3c54ab82bfb \
+    --hash=sha256:609448742444d9290fd687940ac0b57fb35e6fd92bdb65386e08e99af60bf757 \
+    --hash=sha256:69ffdd6120a4737710a9eee73e1d2e37db89b620f702754b8f6e62594471dee0 \
+    --hash=sha256:6fad5ff2f13d69b7e74ce5b4ecd12cc0ec530fcee76356cac6742785ff71c452 \
+    --hash=sha256:7049e301399273a0136ff39b84c3678e314f2158f50f517bc50285fb5ec847ad \
+    --hash=sha256:70c61d4c475835a19b3a5aa42492409878bbca7438554a1f89d20d58a7c75c01 \
+    --hash=sha256:716d30ed977be8b37d3ef185fecb9e5a1d62d110dfbdcd1e2a122ab46fddb03f \
+    --hash=sha256:753cd8f2086b2b80180d9b3010dd4ed147efc167c90d3bf593fe2af21265e5a5 \
+    --hash=sha256:773efe0603db30c281521a7c0214cad7836c03b8ccff897beae9b47c0b657d61 \
+    --hash=sha256:7823bdd049099efa16e4246bdf15e5a13dbb18a51b68fa06d6c1d4d8b99a796e \
+    --hash=sha256:7c8f97e8e7a9009bcacbe3766a36175056c12f9a44e6e6f2d5caad06dcfbf03b \
+    --hash=sha256:823ef7a27cf86df6597fa0671066c1b596f69eba53efa3d1e1cb8b30f3533068 \
+    --hash=sha256:8373c6c251f7ef8bda6675dd6d2b3a0fcc31edf1201266b5cf608b62a37407f9 \
+    --hash=sha256:83b2021f2ade7d1ed556bc50a399127d7fb245e725aa0113ebd05cfe88aaf588 \
+    --hash=sha256:870ea1ada0899fd0b79643990809323b389d4d1d46c192f97342eeb6ee0b8483 \
+    --hash=sha256:8d12251f02d69d8310b046e82572ed486685c38f02176bd08baf216746eb947f \
+    --hash=sha256:9c23f307202661071d94b5e384e1e1dc7dfb972a28a2310e4ee16103e66ddb67 \
+    --hash=sha256:9d189550615b4948f45252d7f005e53c2040cea1af5b60d6f79491a6e147eef7 \
+    --hash=sha256:a086c2af425c5f62a65e12fbf385f7c9fcb8f107d0849dba5839461a129cf311 \
+    --hash=sha256:a2b56ba36e05f973d450582fb015594aaa78834fefe8dfb8fcd79b93e64ba4c6 \
+    --hash=sha256:aebb6044806f2e16ecc07b2a2637ee1ef67a11840a66752751714a0d924adf72 \
+    --hash=sha256:b1b3020d90c2d8e1dae29cf3ce54f8094f7938460fb5ce8bc5c01450b01fbaf6 \
+    --hash=sha256:b4b6b1e20608493548b1f32bce8cca185bf0480983890403d3b8753e44077129 \
+    --hash=sha256:b6f491cdf80ae540738859d9766783e3b3c8e5bd37f5dfa0b76abdecc5081f13 \
+    --hash=sha256:b792a349405fbc0163190fde0dc7b3fef3c9268292586cf5645598b48e63dc67 \
+    --hash=sha256:b7c2286c23cd350b80d2fc9d424fc797575fb16f854b831d16fd47ceec078f2c \
+    --hash=sha256:babf5acfede515f176833ed6028754cbcd0d206f7f614ea3447d67c33be12516 \
+    --hash=sha256:c365fd1703040de1ec284b176d6af5abe21b427cb3a5ff68e0759e1e313a5e7e \
+    --hash=sha256:c4225f5220f46b2fde568c74fca27ae9771536c2e29d7c04f4fb62c83275ac4e \
+    --hash=sha256:c570f24be1e468e3f0ce7ef56a89a60f0e05b30a3669a459e419c6eac2c35364 \
+    --hash=sha256:c6dafac9e0f2b3c78df97e79af707cdc5ef8e88208d686a4847bab8266870023 \
+    --hash=sha256:c8de2789052ed501dd829e9cae8d3dcce7acb4777ea4a479c14521c942d395b1 \
+    --hash=sha256:cb28c753fd5eb3dd859b4ee95de66cc62af91bcff5db5f2571d32a520baf1f04 \
+    --hash=sha256:cb4c38abeef13c61d6916f264d4845fab99d7b711be96c326b84df9e3e0ff62d \
+    --hash=sha256:d1b35bcd6c5543b9cb547dee3150c93008f8dd0f1fef78fc0cd2b141c5baf58a \
+    --hash=sha256:d8e6aeb9201e655354b3ad049cb77d19813ad4ece0df1249d3c793de3774f8c7 \
+    --hash=sha256:d8ecd059fdaf60c1963c58ceb8997b32e9dc1b911f5da5307aab614f1ce5c2fb \
+    --hash=sha256:da2b52b37dad6d9ec64e653637a096905b258d2fc2b984c41ae7d08b938a67e4 \
+    --hash=sha256:e87f0b2c78157e12d7686b27d63c070fd65d994e8ddae6f328e0dcf4a0cd007e \
+    --hash=sha256:edca80cbfb2b68d7b56930b84a0e45ae1694aeba0541f798e908a49d66b837f1 \
+    --hash=sha256:f379abd2f1e3dddb2b61bc67977a6b5a0a3f7485538bcc6f39ec76163891ee48 \
+    --hash=sha256:fe4c15f6c9285dc54ce6553a3ce908ed37c8f3825b5a51a15c91442bb955b868
     # via -r third_party/python_requirements.in
-protobuf==4.23.0 \
-    --hash=sha256:03eee35b60317112a72d19c54d0bff7bc58ff12fea4cd7b018232bd99758ffdf \
-    --hash=sha256:2b94bd6df92d71bd1234a2ffe7ce96ddf6d10cf637a18d6b55ad0a89fbb7fc21 \
-    --hash=sha256:36f5370a930cb77c8ad2f4135590c672d0d2c72d4a707c7d0058dce4b4b4a598 \
-    --hash=sha256:5f1eba1da2a2f3f7df469fccddef3cc060b8a16cfe3cc65961ad36b4dbcf59c5 \
-    --hash=sha256:6c16657d6717a0c62d5d740cb354fbad1b0d8cb811669e06fc1caa0ff4799ddd \
-    --hash=sha256:6fe180b56e1169d72ecc4acbd39186339aed20af5384531b8e8979b02bbee159 \
-    --hash=sha256:7cb5b9a05ce52c6a782bb97de52679bd3438ff2b7460eff5da348db65650f227 \
-    --hash=sha256:9744e934ea5855d12191040ea198eaf704ac78665d365a89d9572e3b627c2688 \
-    --hash=sha256:9f5a0fbfcdcc364f3986f9ed9f8bb1328fb84114fd790423ff3d7fdb0f85c2d1 \
-    --hash=sha256:baca40d067dddd62141a129f244703160d278648b569e90bb0e3753067644711 \
-    --hash=sha256:d5a35ff54e3f62e8fc7be02bb0d2fbc212bba1a5a9cc2748090690093996f07b \
-    --hash=sha256:e62fb869762b4ba18666370e2f8a18f17f8ab92dd4467295c6d38be6f8fef60b \
-    --hash=sha256:ebde3a023b8e11bfa6c890ef34cd6a8b47d586f26135e86c21344fe433daf2e2
+pkginfo==1.9.6 \
+    --hash=sha256:4b7a555a6d5a22169fcc9cf7bfd78d296b0361adad412a346c1226849af5e546 \
+    --hash=sha256:8fd5896e8718a4372f0ea9cc9d96f6417c9b986e23a4d116dda26b62cc29d046
+    # via twine
+platformdirs==4.1.0 \
+    --hash=sha256:11c8f37bcca40db96d8144522d925583bdb7a31f7b0e37e3ed4318400a8e2380 \
+    --hash=sha256:906d548203468492d432bcb294d4bc2fff751bf84971fbb2c10918cc206ee420
+    # via yapf
+protobuf==4.23.4 \
+    --hash=sha256:0a5759f5696895de8cc913f084e27fd4125e8fb0914bb729a17816a33819f474 \
+    --hash=sha256:351cc90f7d10839c480aeb9b870a211e322bf05f6ab3f55fcb2f51331f80a7d2 \
+    --hash=sha256:5fea3c64d41ea5ecf5697b83e41d09b9589e6f20b677ab3c48e5f242d9b7897b \
+    --hash=sha256:6dd9b9940e3f17077e820b75851126615ee38643c2c5332aa7a359988820c720 \
+    --hash=sha256:7b19b6266d92ca6a2a87effa88ecc4af73ebc5cfde194dc737cf8ef23a9a3b12 \
+    --hash=sha256:8547bf44fe8cec3c69e3042f5c4fb3e36eb2a7a013bb0a44c018fc1e427aafbd \
+    --hash=sha256:9053df6df8e5a76c84339ee4a9f5a2661ceee4a0dab019e8663c50ba324208b0 \
+    --hash=sha256:c3e0939433c40796ca4cfc0fac08af50b00eb66a40bbbc5dee711998fb0bbc1e \
+    --hash=sha256:ccd9430c0719dce806b93f89c91de7977304729e55377f872a92465d548329a9 \
+    --hash=sha256:e1c915778d8ced71e26fcf43c0866d7499891bca14c4368448a82edc61fdbc70 \
+    --hash=sha256:e9d0be5bf34b275b9f87ba7407796556abeeba635455d036c7351f7c183ef8ff \
+    --hash=sha256:effeac51ab79332d44fba74660d40ae79985901ac21bca408f8dc335a81aa597 \
+    --hash=sha256:fee88269a090ada09ca63551bf2f573eb2424035bcf2cb1b121895b01a46594a
     # via
     #   -r third_party/python_requirements.in
     #   tensorboard
-    #   tensorflow-cpu
-pyasn1==0.5.0 \
-    --hash=sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57 \
-    --hash=sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde
+    #   tensorflow
+pyasn1==0.5.1 \
+    --hash=sha256:4439847c58d40b1d0a573d07e3856e95333f1976294494c325775aeca506eb58 \
+    --hash=sha256:6d391a96e59b23130a5cfa74d6fd7f388dbbe26cc8f1edf39fdddf08d9d6676c
     # via
     #   pyasn1-modules
     #   rsa
@@ -447,120 +633,124 @@
     --hash=sha256:5bd01446b736eb9d31512a30d46c1ac3395d676c6f3cafa4c03eb54b9925631c \
     --hash=sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d
     # via google-auth
-requests==2.30.0 \
-    --hash=sha256:10e94cc4f3121ee6da529d358cdaeaff2f1c409cd377dbc72b825852f2f7e294 \
-    --hash=sha256:239d7d4458afcb28a692cdd298d87542235f4ca8d36d03a15bfc128a6559a2f4
+pycparser==2.21 \
+    --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \
+    --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206
+    # via cffi
+pygments==2.17.2 \
+    --hash=sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c \
+    --hash=sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367
+    # via
+    #   readme-renderer
+    #   rich
+readme-renderer==42.0 \
+    --hash=sha256:13d039515c1f24de668e2c93f2e877b9dbe6c6c32328b90a40a49d8b2b85f36d \
+    --hash=sha256:2d55489f83be4992fe4454939d1a051c33edbab778e82761d060c9fc6b308cd1
+    # via twine
+requests==2.31.0 \
+    --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \
+    --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1
     # via
     #   requests-oauthlib
+    #   requests-toolbelt
     #   tensorboard
+    #   twine
 requests-oauthlib==1.3.1 \
     --hash=sha256:2577c501a2fb8d05a304c09d090d6e47c306fef15809d102b327cf8364bddab5 \
     --hash=sha256:75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a
     # via google-auth-oauthlib
+requests-toolbelt==1.0.0 \
+    --hash=sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6 \
+    --hash=sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06
+    # via twine
+rfc3986==2.0.0 \
+    --hash=sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd \
+    --hash=sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c
+    # via twine
+rich==13.7.0 \
+    --hash=sha256:5cb5123b5cf9ee70584244246816e9114227e0b98ad9176eede6ad54bf5403fa \
+    --hash=sha256:6da14c108c4866ee9520bbffa71f6fe3962e193b7da68720583850cd4548e235
+    # via twine
 rsa==4.9 \
     --hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \
     --hash=sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21
     # via google-auth
-scipy==1.10.1 \
-    --hash=sha256:049a8bbf0ad95277ffba9b3b7d23e5369cc39e66406d60422c8cfef40ccc8415 \
-    --hash=sha256:07c3457ce0b3ad5124f98a86533106b643dd811dd61b548e78cf4c8786652f6f \
-    --hash=sha256:0f1564ea217e82c1bbe75ddf7285ba0709ecd503f048cb1236ae9995f64217bd \
-    --hash=sha256:1553b5dcddd64ba9a0d95355e63fe6c3fc303a8fd77c7bc91e77d61363f7433f \
-    --hash=sha256:15a35c4242ec5f292c3dd364a7c71a61be87a3d4ddcc693372813c0b73c9af1d \
-    --hash=sha256:1b4735d6c28aad3cdcf52117e0e91d6b39acd4272f3f5cd9907c24ee931ad601 \
-    --hash=sha256:2cf9dfb80a7b4589ba4c40ce7588986d6d5cebc5457cad2c2880f6bc2d42f3a5 \
-    --hash=sha256:39becb03541f9e58243f4197584286e339029e8908c46f7221abeea4b749fa88 \
-    --hash=sha256:43b8e0bcb877faf0abfb613d51026cd5cc78918e9530e375727bf0625c82788f \
-    --hash=sha256:4b3f429188c66603a1a5c549fb414e4d3bdc2a24792e061ffbd607d3d75fd84e \
-    --hash=sha256:4c0ff64b06b10e35215abce517252b375e580a6125fd5fdf6421b98efbefb2d2 \
-    --hash=sha256:51af417a000d2dbe1ec6c372dfe688e041a7084da4fdd350aeb139bd3fb55353 \
-    --hash=sha256:5678f88c68ea866ed9ebe3a989091088553ba12c6090244fdae3e467b1139c35 \
-    --hash=sha256:79c8e5a6c6ffaf3a2262ef1be1e108a035cf4f05c14df56057b64acc5bebffb6 \
-    --hash=sha256:7ff7f37b1bf4417baca958d254e8e2875d0cc23aaadbe65b3d5b3077b0eb23ea \
-    --hash=sha256:aaea0a6be54462ec027de54fca511540980d1e9eea68b2d5c1dbfe084797be35 \
-    --hash=sha256:bce5869c8d68cf383ce240e44c1d9ae7c06078a9396df68ce88a1230f93a30c1 \
-    --hash=sha256:cd9f1027ff30d90618914a64ca9b1a77a431159df0e2a195d8a9e8a04c78abf9 \
-    --hash=sha256:d925fa1c81b772882aa55bcc10bf88324dadb66ff85d548c71515f6689c6dac5 \
-    --hash=sha256:e7354fd7527a4b0377ce55f286805b34e8c54b91be865bac273f527e1b839019 \
-    --hash=sha256:fae8a7b898c42dffe3f7361c40d5952b6bf32d10c4569098d276b4c547905ee1
-    # via jax
+secretstorage==3.3.3 \
+    --hash=sha256:2403533ef369eca6d2ba81718576c5e0f564d5cca1b58f73a8b23e7d4eeebd77 \
+    --hash=sha256:f356e6628222568e3af06f2eba8df495efa13b3b63081dafd4f7d9a7b7bc9f99
+    # via keyring
+setuptools==69.0.3 \
+    --hash=sha256:385eb4edd9c9d5c17540511303e39a147ce2fc04bc55289c322b9e5904fe2c05 \
+    --hash=sha256:be1af57fc409f93647f2e8e4573a142ed38724b8cdd389706a867bb4efcf1e78
+    # via
+    #   tensorboard
+    #   tensorflow
 six==1.16.0 \
     --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
     --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254
     # via
     #   astunparse
-    #   google-auth
     #   google-pasta
-    #   tensorflow-cpu
-tensorboard==2.12.3 \
-    --hash=sha256:b4a69366784bc347e02fbe7d847e01896a649ca52f8948a11005e205dcf724fb
-    # via tensorflow-cpu
-tensorboard-data-server==0.7.0 \
-    --hash=sha256:64aa1be7c23e80b1a42c13b686eb0875bb70f5e755f4d2b8de5c1d880cf2267f \
-    --hash=sha256:753d4214799b31da7b6d93837959abebbc6afa86e69eacf1e9a317a48daa31eb \
-    --hash=sha256:eb7fa518737944dbf4f0cf83c2e40a7ac346bf91be2e6a0215de98be74e85454
+    #   tensorboard
+    #   tensorflow
+tensorboard==2.15.1 \
+    --hash=sha256:c46c1d1cf13a458c429868a78b2531d8ff5f682058d69ec0840b0bc7a38f1c0f
+    # via tensorflow
+tensorboard-data-server==0.7.2 \
+    --hash=sha256:7e0610d205889588983836ec05dc098e80f97b7e7bbff7e994ebb78f578d0ddb \
+    --hash=sha256:9fe5d24221b29625dbc7328b0436ca7fc1c23de4acf4d272f1180856e32f9f60 \
+    --hash=sha256:ef687163c24185ae9754ed5650eb5bc4d84ff257aabdc33f0cc6f74d8ba54530
     # via tensorboard
-tensorflow-cpu==2.12.0 \
-    --hash=sha256:361b19b5a64bf611beccd22de1fc04f614a8c157ac99893d9702ed24932018d6 \
-    --hash=sha256:374b15d1cec1a62006e388062e89dd4899a121272d41ea5d3fcbcc96e2d875c9 \
-    --hash=sha256:55685b9a19c8ecb2587fb53914c045b188ed0289a2c6495e4e59d5fb082da9cc \
-    --hash=sha256:5beeb99d2a1cc1383ca981513c35a4a18157e52d91a89e69c94cb7b7e411f0d8 \
-    --hash=sha256:734ce850e2b3493041bdc071b594f0f78d35e4bfce5a7e0a98d449b20420e01d \
-    --hash=sha256:8fdb636736f95094368bc7d26bb3b8ed93ba820cc5d95f847e00bf4a7645463d \
-    --hash=sha256:a406f751180fe5282776e8bc84f39a2dc2b796c3ae35fbe20e4edc86ec580dd3 \
-    --hash=sha256:b6ba926f9a56cdf0657defc6d046735e31ded383054f67c1a16ef2b0511f68d7 \
-    --hash=sha256:b9c8f0d0658da8a5b25a4fe5ca315f86c449eb11e30d79cea49c7658be75a825 \
-    --hash=sha256:d5ad746bf8c87d9a9fcea4698828ba1d101a7f7bfd323a2571130374a192578b \
-    --hash=sha256:e8c7047552a2d759f3e65ac13e36dd24bb5fec2e6576e848287811ec44b3d62f \
-    --hash=sha256:ef4f142b6fe75fcc71ada6331ed2a15ed61b7034187049d0ef1dac482d52db78
+tensorflow==2.15.0.post1 \
+    --hash=sha256:8716acd8eb2950db126d74a419ac1ed870558ba34efc7d8e506165ad5896b261 \
+    --hash=sha256:c2dc9b6a519a9caad80430220c17f604e9ae76f3b5007c07cc8a5321e9f9cad0 \
+    --hash=sha256:d4b3926a3759e08a1e818f01104c25e6952da6fb1d7a77c75d9f1d8407f72593
     # via -r third_party/python_requirements.in
-tensorflow-estimator==2.12.0 \
-    --hash=sha256:59b191bead4883822de3d63ac02ace11a83bfe6c10d64d0c4dfde75a50e60ca1
-    # via tensorflow-cpu
-tensorflow-io-gcs-filesystem==0.32.0 \
-    --hash=sha256:045d51bba586390d0545fcd8a18727d62b175eb142f6f4c6d719d39de40774cd \
-    --hash=sha256:05e65d3cb6c93a7929b384d86c6369c63cbbab8a770440a3d95e094878403f9f \
-    --hash=sha256:122be149e5f6a030f5c2901be0cc3cb07619232f7b03889e2cdf3da1c0d4f92f \
-    --hash=sha256:1ce80e1555d6ee88dda67feddf366cc8b30252b5837a7a17303df7b06a71fc2e \
-    --hash=sha256:21de7dcc06eb1e7de3c022b0072d90ba35ef886578149663437aa7a6fb5bf6b3 \
-    --hash=sha256:28202492d904a6e280cf27560791e87ac1c7566000db82065d63a70c27008af2 \
-    --hash=sha256:336d9b3fe6b55aea149c4f6aa1fd6ffaf27d4e5c37e55a182340b47caba38846 \
-    --hash=sha256:5635df0bbe40f971dc1b946e3372744b0bdfda45c38ffcd28ef53a32bb8da4da \
-    --hash=sha256:74a7e25e83d4117a7ebb09a3f247553a5497393ab48c3ee0cf0d17b405026817 \
-    --hash=sha256:79fdd02103b8ae9f8b89af41f744c013fa1caaea709de19833917795e3063857 \
-    --hash=sha256:7f15fd22e592661b10de317be2f42a0f84be7bfc5e6a565fcfcb04b60d625b78 \
-    --hash=sha256:8214cdf85bea694160f9035ff395221c1e25e119784ccb4c104919b1f5dec84e \
-    --hash=sha256:842f5f09cd756bdb3b4d0b5571b3a6f72fd534d42da938b9acf0ef462995eada \
-    --hash=sha256:db682e9a510c27dd35710ba5a2c62c371e25b727741b2fe3a920355fa501e947
-    # via tensorflow-cpu
-termcolor==2.3.0 \
-    --hash=sha256:3afb05607b89aed0ffe25202399ee0867ad4d3cb4180d98aaf8eefa6a5f7d475 \
-    --hash=sha256:b5b08f68937f138fe92f6c089b99f1e2da0ae56c52b78bf7075fd95420fd9a5a
-    # via tensorflow-cpu
+tensorflow-estimator==2.15.0 \
+    --hash=sha256:aedf21eec7fb2dc91150fc91a1ce12bc44dbb72278a08b58e79ff87c9e28f153
+    # via tensorflow
+tensorflow-io-gcs-filesystem==0.35.0 \
+    --hash=sha256:0fce1466bdb91096b6d22e7df17358ba228bcb92db5cff83f2f9f1c68eb26788 \
+    --hash=sha256:1856fe321fdb75f3386d92109c60db6ef097f610b450f9cc69d76444fb9980d1 \
+    --hash=sha256:35b6eca7225c815d962254327195f191d88c3c9c2278a5ab23e0ac834acbadbb \
+    --hash=sha256:5521721b38105496d4b43a4ffb0af5b04cc4873d464f26fbceddf8d63815ce98 \
+    --hash=sha256:6e997389bfe008210cbd97c0c738d64282a2f03ad4d0536013bb0a9efde0c283 \
+    --hash=sha256:ac8f1de60fdf9c734aea967b98555e366ac8743f77bca15c49eff023f587076b \
+    --hash=sha256:b8fb3402fb1457482c386ea19371bc76383412ae9ea4396edb1e8adb4ba76f21 \
+    --hash=sha256:c4f786eebd98d401565374722f2e67f3878675b0d87489cbaa13c70ee6ac370a \
+    --hash=sha256:dd8f30908bf8b7b2a017d6b145720d105aff7f998422671b71729708ec7b2fe4 \
+    --hash=sha256:eb6bf8f5b40207ecb17e7fdc3b4fc824a8361267c14e9528c1688e16de135cb7
+    # via tensorflow
+termcolor==2.4.0 \
+    --hash=sha256:9297c0df9c99445c2412e832e882a7884038a25617c60cea2ad69488d4040d63 \
+    --hash=sha256:aab9e56047c8ac41ed798fa36d892a37aca6b3e9159f3e0c24bc64a9b3ac7b7a
+    # via tensorflow
 tomli==2.0.1 \
     --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \
     --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f
     # via yapf
-typing-extensions==4.5.0 \
-    --hash=sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb \
-    --hash=sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4
-    # via tensorflow-cpu
-urllib3==1.26.15 \
-    --hash=sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305 \
-    --hash=sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42
+twine==4.0.2 \
+    --hash=sha256:929bc3c280033347a00f847236564d1c52a3e61b1ac2516c97c48f3ceab756d8 \
+    --hash=sha256:9e102ef5fdd5a20661eb88fad46338806c3bd32cf1db729603fe3697b1bc83c8
+    # via -r third_party/python_requirements.in
+typing-extensions==4.9.0 \
+    --hash=sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783 \
+    --hash=sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd
+    # via tensorflow
+urllib3==2.1.0 \
+    --hash=sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3 \
+    --hash=sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54
     # via
-    #   google-auth
     #   requests
-werkzeug==2.3.4 \
-    --hash=sha256:1d5a58e0377d1fe39d061a5de4469e414e78ccb1e1e59c0f5ad6fa1c36c52b76 \
-    --hash=sha256:48e5e61472fee0ddee27ebad085614ebedb7af41e88f687aaf881afb723a162f
+    #   twine
+werkzeug==3.0.1 \
+    --hash=sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc \
+    --hash=sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10
     # via tensorboard
-wheel==0.40.0 \
-    --hash=sha256:cd1196f3faee2b31968d626e1731c94f99cbdb67cf5a46e4f5656cbee7738873 \
-    --hash=sha256:d236b20e7cb522daf2390fa84c55eea81c5c30190f90f29ae2ca1ad8355bf247
-    # via
-    #   astunparse
-    #   tensorboard
+wheel==0.42.0 \
+    --hash=sha256:177f9c9b0d45c47873b619f5b650346d632cdc35fb5e4d25058e09c9e581433d \
+    --hash=sha256:c45be39f7882c9d34243236f2d63cbd58039e360f85d0913425fbd7ceea617a8
+    # via astunparse
 wrapt==1.14.1 \
     --hash=sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3 \
     --hash=sha256:01c205616a89d09827986bc4e859bcabd64f5a0662a7fe95e0d359424e0e071b \
@@ -568,23 +758,30 @@
     --hash=sha256:07f7a7d0f388028b2df1d916e94bbb40624c59b48ecc6cbc232546706fac74c2 \
     --hash=sha256:11871514607b15cfeb87c547a49bca19fde402f32e2b1c24a632506c0a756656 \
     --hash=sha256:1b376b3f4896e7930f1f772ac4b064ac12598d1c38d04907e696cc4d794b43d3 \
+    --hash=sha256:2020f391008ef874c6d9e208b24f28e31bcb85ccff4f335f15a3251d222b92d9 \
     --hash=sha256:21ac0156c4b089b330b7666db40feee30a5d52634cc4560e1905d6529a3897ff \
+    --hash=sha256:240b1686f38ae665d1b15475966fe0472f78e71b1b4903c143a842659c8e4cb9 \
     --hash=sha256:257fd78c513e0fb5cdbe058c27a0624c9884e735bbd131935fd49e9fe719d310 \
+    --hash=sha256:26046cd03936ae745a502abf44dac702a5e6880b2b01c29aea8ddf3353b68224 \
     --hash=sha256:2b39d38039a1fdad98c87279b48bc5dce2c0ca0d73483b12cb72aa9609278e8a \
     --hash=sha256:2cf71233a0ed05ccdabe209c606fe0bac7379fdcf687f39b944420d2a09fdb57 \
     --hash=sha256:2fe803deacd09a233e4762a1adcea5db5d31e6be577a43352936179d14d90069 \
+    --hash=sha256:2feecf86e1f7a86517cab34ae6c2f081fd2d0dac860cb0c0ded96d799d20b335 \
     --hash=sha256:3232822c7d98d23895ccc443bbdf57c7412c5a65996c30442ebe6ed3df335383 \
     --hash=sha256:34aa51c45f28ba7f12accd624225e2b1e5a3a45206aa191f6f9aac931d9d56fe \
+    --hash=sha256:358fe87cc899c6bb0ddc185bf3dbfa4ba646f05b1b0b9b5a27c2cb92c2cea204 \
     --hash=sha256:36f582d0c6bc99d5f39cd3ac2a9062e57f3cf606ade29a0a0d6b323462f4dd87 \
     --hash=sha256:380a85cf89e0e69b7cfbe2ea9f765f004ff419f34194018a6827ac0e3edfed4d \
     --hash=sha256:40e7bc81c9e2b2734ea4bc1aceb8a8f0ceaac7c5299bc5d69e37c44d9081d43b \
     --hash=sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907 \
+    --hash=sha256:49ef582b7a1152ae2766557f0550a9fcbf7bbd76f43fbdc94dd3bf07cc7168be \
     --hash=sha256:4fcc4649dc762cddacd193e6b55bc02edca674067f5f98166d7713b193932b7f \
     --hash=sha256:5a0f54ce2c092aaf439813735584b9537cad479575a09892b8352fea5e988dc0 \
     --hash=sha256:5a9a0d155deafd9448baff28c08e150d9b24ff010e899311ddd63c45c2445e28 \
     --hash=sha256:5b02d65b9ccf0ef6c34cba6cf5bf2aab1bb2f49c6090bafeecc9cd81ad4ea1c1 \
     --hash=sha256:60db23fa423575eeb65ea430cee741acb7c26a1365d103f7b0f6ec412b893853 \
     --hash=sha256:642c2e7a804fcf18c222e1060df25fc210b9c58db7c91416fb055897fc27e8cc \
+    --hash=sha256:6447e9f3ba72f8e2b985a1da758767698efa72723d5b59accefd716e9e8272bf \
     --hash=sha256:6a9a25751acb379b466ff6be78a315e2b439d4c94c1e99cb7266d40a537995d3 \
     --hash=sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3 \
     --hash=sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164 \
@@ -607,8 +804,10 @@
     --hash=sha256:9e0fd32e0148dd5dea6af5fee42beb949098564cc23211a88d799e434255a1f4 \
     --hash=sha256:9f3e6f9e05148ff90002b884fbc2a86bd303ae847e472f44ecc06c2cd2fcdb2d \
     --hash=sha256:a85d2b46be66a71bedde836d9e41859879cc54a2a04fad1191eb50c2066f6e9d \
+    --hash=sha256:a9008dad07d71f68487c91e96579c8567c98ca4c3881b9b113bc7b33e9fd78b8 \
     --hash=sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8 \
     --hash=sha256:aa31fdcc33fef9eb2552cbcbfee7773d5a6792c137b359e82879c101e98584c5 \
+    --hash=sha256:acae32e13a4153809db37405f5eba5bac5fbe2e2ba61ab227926a22901051c0a \
     --hash=sha256:b014c23646a467558be7da3d6b9fa409b2c567d2110599b7cf9a0c5992b3b471 \
     --hash=sha256:b21bb4c09ffabfa0e85e3a6b623e19b80e7acd709b9f91452b8297ace2a8ab00 \
     --hash=sha256:b5901a312f4d14c59918c221323068fad0540e34324925c8475263841dbdfe68 \
@@ -623,19 +822,16 @@
     --hash=sha256:dee60e1de1898bde3b238f18340eec6148986da0455d8ba7848d50470a7a32fb \
     --hash=sha256:e2f83e18fe2f4c9e7db597e988f72712c0c3676d337d8b101f6758107c42425b \
     --hash=sha256:e3fb1677c720409d5f671e39bac6c9e0e422584e5f518bfd50aa4cbbea02433f \
+    --hash=sha256:ecee4132c6cd2ce5308e21672015ddfed1ff975ad0ac8d27168ea82e71413f55 \
     --hash=sha256:ee2b1b1769f6707a8a445162ea16dddf74285c3964f605877a20e38545c3c462 \
     --hash=sha256:ee6acae74a2b91865910eef5e7de37dc6895ad96fa23603d1d27ea69df545015 \
     --hash=sha256:ef3f72c9666bba2bab70d2a8b79f2c6d2c1a42a7f7e2b0ec83bb2f9e383950af
-    # via tensorflow-cpu
-yapf==0.33.0 \
-    --hash=sha256:4c2b59bd5ffe46f3a7da48df87596877189148226ce267c16e8b44240e51578d \
-    --hash=sha256:da62bdfea3df3673553351e6246abed26d9fe6780e548a5af9e70f6d2b4f5b9a
+    # via tensorflow
+yapf==0.40.2 \
+    --hash=sha256:4dab8a5ed7134e26d57c1647c7483afb3f136878b579062b786c9ba16b94637b \
+    --hash=sha256:adc8b5dd02c0143108878c499284205adb258aad6db6634e5b869e7ee2bd548b
     # via -r third_party/python_requirements.in
-
-# The following packages are considered to be unsafe in a requirements file:
-setuptools==67.7.2 \
-    --hash=sha256:23aaf86b85ca52ceb801d32703f12d77517b2556af839621c641fca11287952b \
-    --hash=sha256:f104fa03692a2602fa0fec6c6a9e63b6c8a968de13e17c026957dd1f53d80990
-    # via
-    #   tensorboard
-    #   tensorflow-cpu
+zipp==3.17.0 \
+    --hash=sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31 \
+    --hash=sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0
+    # via importlib-metadata
diff --git a/third_party/ruy/BUILD b/third_party/ruy/BUILD
index 8db906a..8fabe49 100644
--- a/third_party/ruy/BUILD
+++ b/third_party/ruy/BUILD
@@ -5,4 +5,4 @@
     licenses = ["notice"],
 )
 
-exports_files(glob(["*.patch"]) + ["LICENSE"])
+exports_files(glob(["*.patch"]))
diff --git a/third_party/xtensa/nnlib_hifi4/BUILD b/third_party/xtensa/nnlib_hifi4/BUILD
new file mode 100644
index 0000000..aadcd9f
--- /dev/null
+++ b/third_party/xtensa/nnlib_hifi4/BUILD
@@ -0,0 +1,7 @@
+alias(
+    name = "nnlib_hifi4_lib",
+    actual = "@nnlib_hifi4//:lib",
+    visibility = [
+        "//visibility:public",
+    ],
+)
diff --git a/third_party/xtensa/nnlib_hifi4/nnlib_hifi4.BUILD b/third_party/xtensa/nnlib_hifi4/nnlib_hifi4.BUILD
new file mode 100644
index 0000000..44f4777
--- /dev/null
+++ b/third_party/xtensa/nnlib_hifi4/nnlib_hifi4.BUILD
@@ -0,0 +1,43 @@
+constraint_setting(
+    name = "compatible_constraint",
+)
+
+# Set this constraint_value on your platform to indicate compatiblity with this
+# library.
+constraint_value(
+    name = "compatible",
+    constraint_setting = ":compatible_constraint",
+    visibility = [
+        "//visibility:public",
+    ],
+)
+
+cc_library(
+    name = "lib",
+    srcs = glob(["xa_nnlib/algo/**/*.c"]),
+    hdrs = glob([
+        "xa_nnlib/algo/**/*.h",
+        "xa_nnlib/include/**/*.h",
+    ]),
+    copts = ["-Wno-unused-parameter"],
+    defines = [
+        "NNLIB_V2=1",
+        "MODEL_INT16=1",
+        "EIGEN_NO_MALLOC=1",
+        "hifi4=1",
+    ],
+    includes = [
+        "xa_nnlib",
+        "xa_nnlib/algo/common/include",
+        "xa_nnlib/algo/kernels",
+        "xa_nnlib/algo/ndsp/hifi4/include",
+        "xa_nnlib/include",
+        "xa_nnlib/include/nnlib",
+    ],
+    target_compatible_with = [
+        ":compatible",
+    ],
+    visibility = [
+        "//visibility:public",
+    ],
+)
diff --git a/tools/BUILD b/tools/BUILD
new file mode 100644
index 0000000..d886dc5
--- /dev/null
+++ b/tools/BUILD
@@ -0,0 +1,16 @@
+load("@rules_python//python:defs.bzl", "py_binary", "py_test")
+
+package(
+    default_visibility = ["//visibility:public"],
+)
+
+py_binary(
+    name = "expand_stamp_vars",
+    srcs = ["expand_stamp_vars.py"],
+)
+
+py_test(
+    name = "expand_stamp_vars_test",
+    srcs = ["expand_stamp_vars_test.py"],
+    deps = [":expand_stamp_vars"],
+)
diff --git a/tools/expand_stamp_vars.bzl b/tools/expand_stamp_vars.bzl
new file mode 100644
index 0000000..39db265
--- /dev/null
+++ b/tools/expand_stamp_vars.bzl
@@ -0,0 +1,52 @@
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ----
+
+def expand_stamp_vars(name, template, out):
+    """Macro for expanding a template using workspace status variables.
+
+    Typical usage in a BUILD file:
+
+        expand_stamp_vars(
+            name = "version",
+            template = "_version.py.in",
+            out = "_version.py",
+        )
+
+    Writes `template` to `out`, expanding references of the form '{KEY}' to the
+    value of the corresponding Bazel workspace status variable.
+    """
+
+    # This macro uses a genrule to call a helper program at Bazel execution
+    # time, because workspace variables are not available until execution time.
+    # Workspace variables are generated by bazel on each invocation, and
+    # written to the hardcoded files names used below. See the Bazel
+    # documentation for the option --workspace_status_command.
+
+    native.genrule(
+        name = name,
+        srcs = [template],
+        outs = [out],
+        cmd = "$(location //tools:expand_stamp_vars) " +
+              "bazel-out/stable-status.txt " +
+              "bazel-out/volatile-status.txt " +
+              "<$< >$@",
+        tools = [
+            "//tools:expand_stamp_vars",
+        ],
+
+        # Undocumented, but valid, and the only way to declare the necessary
+        # dependencies on {stable,volatile}-status.txt.
+        stamp = 1,
+    )
diff --git a/tools/expand_stamp_vars.py b/tools/expand_stamp_vars.py
new file mode 100644
index 0000000..40d1a3f
--- /dev/null
+++ b/tools/expand_stamp_vars.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python3
+
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ----
+"""
+ A filter that expands Bazel workspace stamp variables.
+
+ For example, the input steam:
+
+     This build was compiled at {BUILD_DATE}.
+
+ is expanded into the output stream:
+
+     This build was compiled at 2023-02-10T14:15.
+
+ Stamp variable key-value pairs are read from all files passed as positional
+ arguments. These files are typically bazel-out/stable-status.txt and
+ bazel-out/volatile-status.txt. See the Bazel documentation for the option
+ --workspace_status_command.
+"""
+
+import sys
+
+
+def read_stamps(file):
+  """Return a dictionary of key-value pairs read from a stamp file.
+
+  These files are typically bazel-out/stable-status.txt and
+  bazel-out/volatile-status.txt. See the Bazel documentation for the option
+  --workspace_status_command."""
+
+  stamps = {}
+  for line in file:
+    try:
+      key, value = line.split(" ", maxsplit=1)
+      stamps[key] = value.strip()
+    except ValueError:
+      pass  # Skip blank lines, etc.
+
+  return stamps
+
+
+def expand(istream, ostream, stamps):
+  """Write istream to ostream, expanding placeholders like {KEY}."""
+  for line in istream:
+    for key, value in stamps.items():
+      line = line.replace(f"{{{key}}}", value)
+    ostream.write(line)
+
+
+def _main():
+  """Stamp variables are read from all files passed as positional arguments."""
+  stamps = {}
+  for name in sys.argv[1:]:
+    with open(name) as f:
+      stamps.update(read_stamps(f))
+
+  expand(sys.stdin, sys.stdout, stamps)
+
+  sys.exit(0)
+
+
+if __name__ == "__main__":
+  _main()
diff --git a/tools/expand_stamp_vars_test.py b/tools/expand_stamp_vars_test.py
new file mode 100644
index 0000000..07b047a
--- /dev/null
+++ b/tools/expand_stamp_vars_test.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python3
+
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ----
+
+# A test for the filter that expands Bazel workspace stamp variables.
+
+from tflite_micro.tools import expand_stamp_vars
+
+import io
+import unittest
+
+
+class FilterTest(unittest.TestCase):
+  """A simple test of the expansion feature."""
+
+  def test_basic(self):
+    stamps = """
+BUILD_STAMP_ONE value_one
+BUILD_STAMP_TWO value_two
+"""
+    input = "This is {BUILD_STAMP_TWO}. This is {BUILD_STAMP_ONE}."
+    golden = "This is value_two. This is value_one."
+
+    istream = io.StringIO(input)
+    ostream = io.StringIO()
+    stamps = expand_stamp_vars.read_stamps(io.StringIO(stamps))
+    expand_stamp_vars.expand(istream, ostream, stamps)
+
+    self.assertEqual(ostream.getvalue(), golden)
+
+
+if __name__ == "__main__":
+  unittest.main()
diff --git a/tools/workspace_status.sh b/tools/workspace_status.sh
new file mode 100755
index 0000000..5d17d1a
--- /dev/null
+++ b/tools/workspace_status.sh
@@ -0,0 +1,42 @@
+#!/bin/sh
+
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ---
+
+# Output key-value pairs with which to stamp build outputs. This script is
+# called by the bazel option --workspace_status_command, which is likely to be
+# embedded in .bazelrc. Bazel generates some keys, such as BUILD_EMBED_LABEL,
+# on its own, outside of this script. Search for "Bazel workspace status" for
+# more, including the differences between STABLE_ and volatile keys.
+
+
+# Unambiguous identification of the source tree
+echo STABLE_GIT_HASH $(git describe --always --long --dirty)
+
+# Human-readable timestamp of git HEAD's commit date. Use dates derived from
+# git for stability across multiple invocations of the `bazel` command. Use UTC
+# rather than committer or local timezones for consistency across build
+# environments. Use commit date instead of author date, the default date shown
+# by `git log` and GitHub, because amending, rebasing, merging, etc. can cause
+# the author date of descendent commits to be earlier than those of their
+# ancestors.
+#
+# Comparable commit dates can be produced via:
+#     `TZ=UTC0 git log --pretty=fuller --date=local`.
+#
+echo STABLE_GIT_COMMIT_TIME $(TZ=UTC0 git show \
+    --no-patch \
+    --format=format:%cd \
+    --date=format-local:%Y%m%d%H%M%S)