Propagating errors through barrier tasks.
diff --git a/iree/task/list.c b/iree/task/list.c
index 984dd50..607ab82 100644
--- a/iree/task/list.c
+++ b/iree/task/list.c
@@ -47,6 +47,7 @@
   while (!iree_task_list_is_empty(list)) {
     iree_task_t* task = iree_task_list_pop_front(list);
     iree_task_discard(task, list);
+    task = NULL;  // invalidated during discard
   }
 }
 
diff --git a/iree/task/list_test.cc b/iree/task/list_test.cc
index 6eef318..c5cb5b2 100644
--- a/iree/task/list_test.cc
+++ b/iree/task/list_test.cc
@@ -108,6 +108,62 @@
   // IMPLICIT: if the tasks were not released back to the pool we'll leak.
 }
 
+TEST(TaskListTest, DiscardSequence) {
+  auto pool = AllocateNopPool();
+  auto scope = AllocateScope("a");
+
+  iree_task_list_t list;
+  iree_task_list_initialize(&list);
+  EXPECT_TRUE(iree_task_list_is_empty(&list));
+
+  auto task0 = AcquireNopTask(pool, scope, 0);
+  auto task1 = AcquireNopTask(pool, scope, 1);
+  auto task2 = AcquireNopTask(pool, scope, 2);
+  auto task3 = AcquireNopTask(pool, scope, 3);
+  iree_task_set_completion_task(task0, task1);
+  iree_task_set_completion_task(task1, task2);
+  iree_task_set_completion_task(task2, task3);
+  iree_task_list_push_back(&list, task0);
+  iree_task_list_push_back(&list, task1);
+  iree_task_list_push_back(&list, task2);
+  iree_task_list_push_back(&list, task3);
+  EXPECT_EQ(4, iree_task_list_calculate_size(&list));
+  EXPECT_TRUE(CheckListOrderFIFO(&list));
+
+  iree_task_list_discard(&list);
+  EXPECT_TRUE(iree_task_list_is_empty(&list));
+
+  // IMPLICIT: if the tasks were not released back to the pool we'll leak.
+}
+
+TEST(TaskListTest, DiscardJoin) {
+  auto pool = AllocateNopPool();
+  auto scope = AllocateScope("a");
+
+  iree_task_list_t list;
+  iree_task_list_initialize(&list);
+  EXPECT_TRUE(iree_task_list_is_empty(&list));
+
+  auto task0 = AcquireNopTask(pool, scope, 0);
+  auto task1 = AcquireNopTask(pool, scope, 1);
+  auto task2 = AcquireNopTask(pool, scope, 2);
+  auto task3 = AcquireNopTask(pool, scope, 3);
+  iree_task_set_completion_task(task0, task3);
+  iree_task_set_completion_task(task1, task3);
+  iree_task_set_completion_task(task2, task3);
+  iree_task_list_push_back(&list, task0);
+  iree_task_list_push_back(&list, task1);
+  iree_task_list_push_back(&list, task2);
+  iree_task_list_push_back(&list, task3);
+  EXPECT_EQ(4, iree_task_list_calculate_size(&list));
+  EXPECT_TRUE(CheckListOrderFIFO(&list));
+
+  iree_task_list_discard(&list);
+  EXPECT_TRUE(iree_task_list_is_empty(&list));
+
+  // IMPLICIT: if the tasks were not released back to the pool we'll leak.
+}
+
 TEST(TaskListTest, PushFront) {
   auto pool = AllocateNopPool();
   auto scope = AllocateScope("a");
diff --git a/iree/task/scope.c b/iree/task/scope.c
index 245fe90..723a07b 100644
--- a/iree/task/scope.c
+++ b/iree/task/scope.c
@@ -66,6 +66,11 @@
   return result;
 }
 
+bool iree_task_scope_has_failed(iree_task_scope_t* scope) {
+  return iree_atomic_load_intptr(&scope->permanent_status,
+                                 iree_memory_order_seq_cst) != 0;
+}
+
 iree_status_t iree_task_scope_consume_status(iree_task_scope_t* scope) {
   iree_status_t old_status = iree_ok_status();
   iree_status_t new_status = iree_ok_status();
diff --git a/iree/task/scope.h b/iree/task/scope.h
index 4e47f9d..e13ef0b 100644
--- a/iree/task/scope.h
+++ b/iree/task/scope.h
@@ -104,6 +104,11 @@
 iree_task_dispatch_statistics_t iree_task_scope_consume_statistics(
     iree_task_scope_t* scope);
 
+// Returns true if the scope has failed.
+// iree_task_scope_consume_status can be used once to get the full status
+// describing the failure and subsequent calls will return the status code.
+bool iree_task_scope_has_failed(iree_task_scope_t* scope);
+
 // Returns the permanent scope failure status to the caller (transfering
 // ownership). The scope will remain in a failed state with the status code.
 iree_status_t iree_task_scope_consume_status(iree_task_scope_t* scope);
diff --git a/iree/task/task.c b/iree/task/task.c
index 5d79e20..02cb20d 100644
--- a/iree/task/task.c
+++ b/iree/task/task.c
@@ -90,17 +90,31 @@
   }
 }
 
+static void iree_task_barrier_discard(iree_task_barrier_t* task,
+                                      iree_task_list_t* discard_worklist);
+static void iree_task_fence_discard(iree_task_fence_t* task,
+                                    iree_task_list_t* discard_worklist);
+
 void iree_task_discard(iree_task_t* task, iree_task_list_t* discard_worklist) {
   IREE_TRACE_ZONE_BEGIN(z0);
 
-  // NOTE: we always try adding to the head of the discard_worklist so that
-  // we hopefully get some locality benefits. This models a DFS discard in
-  // our non-recursive approach.
+  // This models a BFS discard in our non-recursive approach.
+  // We must ensure that we only discard each task once and that we discard the
+  // tasks in the appropriate order: if we had a DAG of A -> B, C -> D we must
+  // discard respecting the same topological ordering.
+
+  IREE_ASSERT_EQ(0, iree_atomic_load_int32(&task->pending_dependency_count,
+                                           iree_memory_order_acquire));
 
   // Almost all tasks will have a completion task; some may have additional
   // dependent tasks (like barriers) that will be handled below.
-  if (task->completion_task) {
-    iree_task_list_push_front(discard_worklist, task->completion_task);
+  const bool completion_task_ready =
+      task->completion_task &&
+      iree_atomic_fetch_sub_int32(
+          &task->completion_task->pending_dependency_count, 1,
+          iree_memory_order_acq_rel) == 1;
+  if (completion_task_ready) {
+    iree_task_list_push_back(discard_worklist, task->completion_task);
   }
 
   switch (task->type) {
@@ -108,18 +122,12 @@
     case IREE_TASK_TYPE_NOP:
     case IREE_TASK_TYPE_CALL:
       break;
-    case IREE_TASK_TYPE_BARRIER: {
-      iree_task_barrier_t* barrier_task = (iree_task_barrier_t*)task;
-      for (uint32_t i = 0; i < barrier_task->dependent_task_count; ++i) {
-        iree_task_list_push_front(discard_worklist,
-                                  barrier_task->dependent_tasks[i]);
-      }
+    case IREE_TASK_TYPE_BARRIER:
+      iree_task_barrier_discard((iree_task_barrier_t*)task, discard_worklist);
       break;
-    }
-    case IREE_TASK_TYPE_FENCE: {
+    case IREE_TASK_TYPE_FENCE:
       iree_task_scope_end(task->scope);
       break;
-    }
     case IREE_TASK_TYPE_WAIT:
     case IREE_TASK_TYPE_DISPATCH:
       break;
@@ -127,6 +135,7 @@
 
   iree_task_cleanup(task, IREE_STATUS_ABORTED);
   // NOTE: task is invalidated here and cannot be used!
+  task = NULL;
 
   IREE_TRACE_ZONE_END(z0);
 }
@@ -292,22 +301,59 @@
   }
 }
 
+static void iree_task_barrier_discard(iree_task_barrier_t* task,
+                                      iree_task_list_t* discard_worklist) {
+  IREE_TRACE_ZONE_BEGIN(z0);
+
+  // Discard all of the tasks after the barrier.
+  // Note that we need to ensure we only enqueue them for discard after all of
+  // their dependencies have been met - otherwise we'll double-discard.
+  for (iree_host_size_t i = 0; i < task->dependent_task_count; ++i) {
+    iree_task_t* dependent_task = task->dependent_tasks[i];
+    const bool dependent_task_ready =
+        iree_atomic_fetch_sub_int32(&dependent_task->pending_dependency_count,
+                                    1, iree_memory_order_acq_rel) == 1;
+    if (dependent_task_ready) {
+      // The dependent task has retired and can now be discard.
+      iree_task_list_push_back(discard_worklist, dependent_task);
+    }
+  }
+
+  IREE_TRACE_ZONE_END(z0);
+}
+
 void iree_task_barrier_retire(iree_task_barrier_t* task,
                               iree_task_submission_t* pending_submission) {
   IREE_TRACE_ZONE_BEGIN(z0);
 
-  // NOTE: we walk in reverse so that we enqueue in LIFO order.
-  for (iree_host_size_t i = 0; i < task->dependent_task_count; ++i) {
-    iree_task_t* dependent_task =
-        task->dependent_tasks[task->dependent_task_count - i - 1];
-    if (iree_atomic_fetch_sub_int32(&dependent_task->pending_dependency_count,
-                                    1, iree_memory_order_acq_rel) == 1) {
-      // The dependent task has retired and can now be made ready.
-      iree_task_submission_enqueue(pending_submission, dependent_task);
+  // If the scope has been marked as failing then we abort the barrier.
+  // This needs to happen as a poll here because one or more of the tasks we
+  // are joining may have failed.
+  const bool has_failed = iree_task_scope_has_failed(task->header.scope);
+  if (has_failed) {
+    // This was the last pending dependency and we know that we can safely
+    // abort the completion task by discarding.
+    iree_task_list_t discard_worklist;
+    iree_task_list_initialize(&discard_worklist);
+    iree_task_barrier_discard(task, &discard_worklist);
+    iree_task_list_discard(&discard_worklist);
+  } else {
+    // NOTE: we walk in reverse so that we enqueue in LIFO order.
+    for (iree_host_size_t i = 0; i < task->dependent_task_count; ++i) {
+      iree_task_t* dependent_task =
+          task->dependent_tasks[task->dependent_task_count - i - 1];
+      if (iree_atomic_fetch_sub_int32(&dependent_task->pending_dependency_count,
+                                      1, iree_memory_order_acq_rel) == 1) {
+        // The dependent task has retired and can now be made ready.
+        iree_task_submission_enqueue(pending_submission, dependent_task);
+      }
     }
   }
 
-  iree_task_retire(&task->header, pending_submission, iree_ok_status());
+  iree_task_retire(&task->header, pending_submission,
+                   has_failed ? iree_status_from_code(IREE_STATUS_ABORTED)
+                              : iree_ok_status());
+
   IREE_TRACE_ZONE_END(z0);
 }
 
@@ -328,6 +374,7 @@
   iree_task_scope_end(task->header.scope);
 
   iree_task_retire(&task->header, pending_submission, iree_ok_status());
+
   IREE_TRACE_ZONE_END(z0);
 }
 
diff --git a/iree/task/task_test_barrier.cc b/iree/task/task_test_barrier.cc
index 2495f70..dcb8937 100644
--- a/iree/task/task_test_barrier.cc
+++ b/iree/task/task_test_barrier.cc
@@ -16,6 +16,10 @@
 
 namespace {
 
+using iree::Status;
+using iree::StatusCode;
+using iree::testing::status::StatusIs;
+
 class TaskBarrierTest : public TaskTest {};
 
 enum {
@@ -30,15 +34,15 @@
   std::atomic<uint32_t> tasks_called = {0};
 };
 
-#define MAKE_CALL_TASK_CLOSURE(task_ctx, task_id)      \
-  iree_task_make_call_closure(                         \
-      [](void* user_context, iree_task_t* task,        \
-         iree_task_submission_t* pending_submission) { \
-        auto* ctx = (TaskCtx*)user_context;            \
-        EXPECT_EQ(0, (ctx->tasks_called & (task_id))); \
-        ctx->tasks_called |= (task_id);                \
-        return iree_ok_status();                       \
-      },                                               \
+#define MAKE_CALL_TASK_CLOSURE(task_ctx, task_id, status_code) \
+  iree_task_make_call_closure(                                 \
+      [](void* user_context, iree_task_t* task,                \
+         iree_task_submission_t* pending_submission) {         \
+        auto* ctx = (TaskCtx*)user_context;                    \
+        EXPECT_EQ(0, (ctx->tasks_called & (task_id)));         \
+        ctx->tasks_called |= (task_id);                        \
+        return iree_status_from_code(status_code);             \
+      },                                                       \
       (void*)task_ctx)
 
 // Issues a standalone empty barrier:
@@ -52,15 +56,17 @@
 
 // Issues a serialized sequence:
 //  { a | barrier | b }
-TEST_F(TaskBarrierTest, IssueSerializedSequence) {
+TEST_F(TaskBarrierTest, IssueSequence) {
   TaskCtx task_ctx;
 
   iree_task_call_t task_a;
-  iree_task_call_initialize(&scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_A),
-                            &task_a);
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_A, IREE_STATUS_OK),
+      &task_a);
   iree_task_call_t task_b;
-  iree_task_call_initialize(&scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_B),
-                            &task_b);
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_B, IREE_STATUS_OK),
+      &task_b);
 
   iree_task_t* dependent_tasks[1] = {&task_b.header};
   iree_task_barrier_t barrier_task;
@@ -72,23 +78,91 @@
   EXPECT_EQ(TASK_A | TASK_B, task_ctx.tasks_called);
 }
 
+// Issues a serialized sequence where task A fails:
+//  { a | barrier | b }
+// B should not be run.
+TEST_F(TaskBarrierTest, IssueSequenceFailure) {
+  TaskCtx task_ctx;
+
+  iree_task_call_t task_a;
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_A, IREE_STATUS_DATA_LOSS),
+      &task_a);
+  iree_task_call_t task_b;
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_B, IREE_STATUS_OK),
+      &task_b);
+
+  iree_task_t* dependent_tasks[1] = {&task_b.header};
+  iree_task_barrier_t barrier_task;
+  iree_task_barrier_initialize(&scope_, IREE_ARRAYSIZE(dependent_tasks),
+                               dependent_tasks, &barrier_task);
+  iree_task_set_completion_task(&task_a.header, &barrier_task.header);
+
+  IREE_ASSERT_OK(SubmitTasksAndWaitIdle(&task_a.header, &task_b.header));
+  EXPECT_EQ(TASK_A, task_ctx.tasks_called);
+  EXPECT_THAT(Status(iree_task_scope_consume_status(&scope_)),
+              StatusIs(StatusCode::kDataLoss));
+}
+
+// Issues a deeply serialized sequence where task A fails:
+//  { a | barrier | b | barrier | c }
+// B and C should not be run.
+TEST_F(TaskBarrierTest, IssueDeepSequenceFailure) {
+  TaskCtx task_ctx;
+
+  iree_task_call_t task_a;
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_A, IREE_STATUS_DATA_LOSS),
+      &task_a);
+  iree_task_call_t task_b;
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_B, IREE_STATUS_OK),
+      &task_b);
+  iree_task_call_t task_c;
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_C, IREE_STATUS_OK),
+      &task_c);
+
+  iree_task_t* dependent_tasks_0[1] = {&task_b.header};
+  iree_task_barrier_t barrier_task_0;
+  iree_task_barrier_initialize(&scope_, IREE_ARRAYSIZE(dependent_tasks_0),
+                               dependent_tasks_0, &barrier_task_0);
+  iree_task_set_completion_task(&task_a.header, &barrier_task_0.header);
+
+  iree_task_t* dependent_tasks_1[1] = {&task_c.header};
+  iree_task_barrier_t barrier_task_1;
+  iree_task_barrier_initialize(&scope_, IREE_ARRAYSIZE(dependent_tasks_1),
+                               dependent_tasks_1, &barrier_task_1);
+  iree_task_set_completion_task(&task_b.header, &barrier_task_1.header);
+
+  IREE_ASSERT_OK(SubmitTasksAndWaitIdle(&task_a.header, &task_c.header));
+  EXPECT_EQ(TASK_A, task_ctx.tasks_called);
+  EXPECT_THAT(Status(iree_task_scope_consume_status(&scope_)),
+              StatusIs(StatusCode::kDataLoss));
+}
+
 // Issues a join:
 //  { a, b, c | barrier | d }
 TEST_F(TaskBarrierTest, IssueJoin) {
   TaskCtx task_ctx;
 
   iree_task_call_t task_a;
-  iree_task_call_initialize(&scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_A),
-                            &task_a);
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_A, IREE_STATUS_OK),
+      &task_a);
   iree_task_call_t task_b;
-  iree_task_call_initialize(&scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_B),
-                            &task_b);
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_B, IREE_STATUS_OK),
+      &task_b);
   iree_task_call_t task_c;
-  iree_task_call_initialize(&scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_C),
-                            &task_c);
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_C, IREE_STATUS_OK),
+      &task_c);
   iree_task_call_t task_d;
-  iree_task_call_initialize(&scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_D),
-                            &task_d);
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_D, IREE_STATUS_OK),
+      &task_d);
 
   iree_task_t* dependent_tasks[1] = {&task_d.header};
   iree_task_barrier_t barrier_task;
@@ -107,23 +181,69 @@
   EXPECT_EQ(TASK_A | TASK_B | TASK_C | TASK_D, task_ctx.tasks_called);
 }
 
+// Issues a join where a dependent task B fails:
+//  { a, b, c | barrier | d }
+// A, B, and C should all run but the barrier should fail and D should not.
+TEST_F(TaskBarrierTest, IssueJoinFailure) {
+  TaskCtx task_ctx;
+
+  iree_task_call_t task_a;
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_A, IREE_STATUS_OK),
+      &task_a);
+  iree_task_call_t task_b;
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_B, IREE_STATUS_DATA_LOSS),
+      &task_b);
+  iree_task_call_t task_c;
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_C, IREE_STATUS_OK),
+      &task_c);
+  iree_task_call_t task_d;
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_D, IREE_STATUS_OK),
+      &task_d);
+
+  iree_task_t* dependent_tasks[1] = {&task_d.header};
+  iree_task_barrier_t barrier_task;
+  iree_task_barrier_initialize(&scope_, IREE_ARRAYSIZE(dependent_tasks),
+                               dependent_tasks, &barrier_task);
+  iree_task_set_completion_task(&task_a.header, &barrier_task.header);
+  iree_task_set_completion_task(&task_b.header, &barrier_task.header);
+  iree_task_set_completion_task(&task_c.header, &barrier_task.header);
+
+  iree_task_submission_t submission;
+  iree_task_submission_initialize(&submission);
+  iree_task_submission_enqueue(&submission, &task_a.header);
+  iree_task_submission_enqueue(&submission, &task_b.header);
+  iree_task_submission_enqueue(&submission, &task_c.header);
+  IREE_ASSERT_OK(SubmitAndWaitIdle(&submission, &task_d.header));
+  EXPECT_EQ(TASK_A | TASK_B | TASK_C, task_ctx.tasks_called);
+  EXPECT_THAT(Status(iree_task_scope_consume_status(&scope_)),
+              StatusIs(StatusCode::kDataLoss));
+}
+
 // Issues a fork:
 //  { a | barrier | b, c, d | nop }
 TEST_F(TaskBarrierTest, IssueFork) {
   TaskCtx task_ctx;
 
   iree_task_call_t task_a;
-  iree_task_call_initialize(&scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_A),
-                            &task_a);
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_A, IREE_STATUS_OK),
+      &task_a);
   iree_task_call_t task_b;
-  iree_task_call_initialize(&scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_B),
-                            &task_b);
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_B, IREE_STATUS_OK),
+      &task_b);
   iree_task_call_t task_c;
-  iree_task_call_initialize(&scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_C),
-                            &task_c);
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_C, IREE_STATUS_OK),
+      &task_c);
   iree_task_call_t task_d;
-  iree_task_call_initialize(&scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_D),
-                            &task_d);
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_D, IREE_STATUS_OK),
+      &task_d);
 
   iree_task_t* dependent_tasks[3] = {
       &task_b.header,
@@ -146,4 +266,50 @@
   EXPECT_EQ(TASK_A | TASK_B | TASK_C | TASK_D, task_ctx.tasks_called);
 }
 
+// Issues a fork where task A fails:
+//  { a (fails) | barrier | b, c, d | nop }
+// The barrier should fail and none of the subsequent tasks B, C, D should run.
+TEST_F(TaskBarrierTest, IssueForkFailure) {
+  TaskCtx task_ctx;
+
+  iree_task_call_t task_a;
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_A, IREE_STATUS_DATA_LOSS),
+      &task_a);
+  iree_task_call_t task_b;
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_B, IREE_STATUS_OK),
+      &task_b);
+  iree_task_call_t task_c;
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_C, IREE_STATUS_OK),
+      &task_c);
+  iree_task_call_t task_d;
+  iree_task_call_initialize(
+      &scope_, MAKE_CALL_TASK_CLOSURE(&task_ctx, TASK_D, IREE_STATUS_OK),
+      &task_d);
+
+  iree_task_t* dependent_tasks[3] = {
+      &task_b.header,
+      &task_c.header,
+      &task_d.header,
+  };
+  iree_task_barrier_t barrier_task;
+  iree_task_barrier_initialize(&scope_, IREE_ARRAYSIZE(dependent_tasks),
+                               dependent_tasks, &barrier_task);
+  iree_task_set_completion_task(&task_a.header, &barrier_task.header);
+
+  // Just to give us a tail task to wait on.
+  iree_task_nop_t nop_task;
+  iree_task_nop_initialize(&scope_, &nop_task);
+  iree_task_set_completion_task(&task_b.header, &nop_task.header);
+  iree_task_set_completion_task(&task_c.header, &nop_task.header);
+  iree_task_set_completion_task(&task_d.header, &nop_task.header);
+
+  IREE_ASSERT_OK(SubmitTasksAndWaitIdle(&task_a.header, &nop_task.header));
+  EXPECT_EQ(TASK_A, task_ctx.tasks_called);
+  EXPECT_THAT(Status(iree_task_scope_consume_status(&scope_)),
+              StatusIs(StatusCode::kDataLoss));
+}
+
 }  // namespace