)]}'
{
  "commit": "0fe0ca8f7c7daf9af6e6da1df995bdd1098a8c9c",
  "tree": "622cd8da8d264e21f6d747073db7ee531d08d8d8",
  "parents": [
    "d62d69b27352204b026883498be964e0606c0eec"
  ],
  "author": {
    "name": "Keshav Vinayak Jha",
    "email": "31160700+keshavvinayak01@users.noreply.github.com",
    "time": "Fri May 08 03:05:33 2026 +0530"
  },
  "committer": {
    "name": "GitHub",
    "email": "noreply@github.com",
    "time": "Fri May 08 03:05:33 2026 +0530"
  },
  "message": "[Torch][LinalgExt] Support GQA in torch.hop_flex_attention lowering (#24313)\n\nThis PR adds grouped-query attention support to the\ntorch.hop_flex_attention lowering in IREE’s Torch input pipeline.\n\nWhen query, key, and value have different head counts, the lowering now\nexpands key and value heads to match the query head count before\nemitting `iree_linalg_ext.online_attention`.\n\nThis is similar to how the torch-mlir \u003d\u003e TMTensor lowering handles this\ncase.\n\n---------\n\nSigned-off-by: Keshav Vinayak Jha \u003ckeshavvinayakjha@gmail.com\u003e\nCo-authored-by: GPT-5 Codex \u003cnoreply@openai.com\u003e",
  "tree_diff": [
    {
      "type": "modify",
      "old_id": "b74e08c8a92a3c0fc73398794476dae3f761b8ae",
      "old_mode": 33188,
      "old_path": "build_tools/bazel_to_cmake/bazel_to_cmake_targets.py",
      "new_id": "c1ba62797c7351e660d53018f6fb267ad2548d23",
      "new_mode": 33188,
      "new_path": "build_tools/bazel_to_cmake/bazel_to_cmake_targets.py"
    },
    {
      "type": "modify",
      "old_id": "a4711250313c0f38f78ec153bbd85d3c3b6c47a5",
      "old_mode": 33188,
      "old_path": "compiler/plugins/input/Torch/InputConversion/BUILD.bazel",
      "new_id": "3485639e14596c6988b7f19ff78bfd741ed19f16",
      "new_mode": 33188,
      "new_path": "compiler/plugins/input/Torch/InputConversion/BUILD.bazel"
    },
    {
      "type": "modify",
      "old_id": "fc42cf97e360c931530bef11cf18c42fb83d6288",
      "old_mode": 33188,
      "old_path": "compiler/plugins/input/Torch/InputConversion/CMakeLists.txt",
      "new_id": "301f62c312d734714b5becc95164a8b7cea68400",
      "new_mode": 33188,
      "new_path": "compiler/plugins/input/Torch/InputConversion/CMakeLists.txt"
    },
    {
      "type": "modify",
      "old_id": "1734faccfac4a6c31e026560be44c6875cdf96e5",
      "old_mode": 33188,
      "old_path": "compiler/plugins/input/Torch/InputConversion/ConvertTorchUnstructuredToLinalgExt.cpp",
      "new_id": "fe8209de328de61cec682a6192643babc9150fd2",
      "new_mode": 33188,
      "new_path": "compiler/plugins/input/Torch/InputConversion/ConvertTorchUnstructuredToLinalgExt.cpp"
    },
    {
      "type": "modify",
      "old_id": "bcaab19a043aaba4ca18d0f33275e4a18b7c59dd",
      "old_mode": 33188,
      "old_path": "compiler/plugins/input/Torch/InputConversion/test/unstructured_linalg_ext.mlir",
      "new_id": "f586088703fd5037135d2af0b0c06e90f1277936",
      "new_mode": 33188,
      "new_path": "compiler/plugins/input/Torch/InputConversion/test/unstructured_linalg_ext.mlir"
    }
  ]
}
