)]}'
{
  "commit": "f9b370865b441f2bd805a02711342f79dbb375c0",
  "tree": "8650fc659b9fa247764db8d272a43cd413f6f0f2",
  "parents": [
    "9b86ad5f69e85ff77c4575283f6807259fc0ab0b"
  ],
  "author": {
    "name": "bjacob",
    "email": "benoitjacob@google.com",
    "time": "Wed Jun 08 11:13:45 2022 -0400"
  },
  "committer": {
    "name": "GitHub",
    "email": "noreply@github.com",
    "time": "Wed Jun 08 08:13:45 2022 -0700"
  },
  "message": "dequantize softmax (#9337)\n\nSee #8974. This is still a 20% end-to-end latency improvement on MobileBert-int8 on configs where matmuls are already reasonably fast, making other things like Softmax more important relatively. That is even after Softmax slowness was much improved recently as observed in #9170. Moreover, discussion around #8974 suggests that the path forward for non-dequantized Softmax is nontrivial, so putting our benchmarks on the dequantized path for now will help insulate them a bit from what we expect to be in-flux for the foreseeable future.",
  "tree_diff": [
    {
      "type": "modify",
      "old_id": "6adf6768d96c1fbf1429c14e08592e6261771470",
      "old_mode": 33188,
      "old_path": "integrations/tensorflow/iree_tf_compiler/TFL/Passes.cpp",
      "new_id": "804d2e019d94a1159680fc43c391afcac56dd429",
      "new_mode": 33188,
      "new_path": "integrations/tensorflow/iree_tf_compiler/TFL/Passes.cpp"
    },
    {
      "type": "modify",
      "old_id": "fa95b23220184855ba8dbb99b2547df006317fe5",
      "old_mode": 33188,
      "old_path": "integrations/tensorflow/test/python/iree_tfl_tests/mobilebert_tf2_quant_test.py",
      "new_id": "259aa1f386a5efe5379b26f4d6409092b4172982",
      "new_mode": 33188,
      "new_path": "integrations/tensorflow/test/python/iree_tfl_tests/mobilebert_tf2_quant_test.py"
    }
  ]
}
