aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorAlan Huang <mmpgouride@gmail.com>2024-10-03 03:06:32 +0800
committerKent Overstreet <kent.overstreet@linux.dev>2025-01-14 10:45:18 -0500
commitb169138d482980d31207c2d1706a2be2529978cf (patch)
treec3b1547efb4c88703e58d44d497e6a9b1364c9fe
parentbcachefs: Introduce lock_graph_pop_from (diff)
downloadwireguard-linux-b169138d482980d31207c2d1706a2be2529978cf.tar.xz
wireguard-linux-b169138d482980d31207c2d1706a2be2529978cf.zip
bcachefs: Only abort the transactions in the cycle
When the cycle doesn't involve the initiator of the cycle detection, we might choose a transaction that is not involved in the cycle to abort. It shouldn't be that since it won't break the cycle, this patch therefore chooses the transaction in the cycle to abort. Signed-off-by: Alan Huang <mmpgouride@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r--fs/bcachefs/btree_locking.c21
1 files changed, 14 insertions, 7 deletions
diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c
index ec72a8adf713..15ef9f71ca43 100644
--- a/fs/bcachefs/btree_locking.c
+++ b/fs/bcachefs/btree_locking.c
@@ -130,11 +130,17 @@ static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
__lock_graph_down(g, trans);
}
-static bool lock_graph_remove_non_waiters(struct lock_graph *g)
+static bool lock_graph_remove_non_waiters(struct lock_graph *g,
+ struct trans_waiting_for_lock *from)
{
struct trans_waiting_for_lock *i;
- for (i = g->g + 1; i < g->g + g->nr; i++)
+ if (from->trans->locking != from->node_want) {
+ lock_graph_pop_from(g, from);
+ return true;
+ }
+
+ for (i = from + 1; i < g->g + g->nr; i++)
if (i->trans->locking != i->node_want ||
i->trans->locking_wait.start_time != i[-1].lock_start_time) {
lock_graph_pop_from(g, i);
@@ -184,13 +190,14 @@ static int btree_trans_abort_preference(struct btree_trans *trans)
return 3;
}
-static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
+static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle,
+ struct trans_waiting_for_lock *from)
{
struct trans_waiting_for_lock *i, *abort = NULL;
unsigned best = 0, pref;
int ret;
- if (lock_graph_remove_non_waiters(g))
+ if (lock_graph_remove_non_waiters(g, from))
return 0;
/* Only checking, for debugfs: */
@@ -200,7 +207,7 @@ static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
goto out;
}
- for (i = g->g; i < g->g + g->nr; i++) {
+ for (i = from; i < g->g + g->nr; i++) {
pref = btree_trans_abort_preference(i->trans);
if (pref > best) {
abort = i;
@@ -247,7 +254,7 @@ static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
for (i = g->g; i < g->g + g->nr; i++)
if (i->trans == trans) {
closure_put(&trans->ref);
- return break_cycle(g, cycle);
+ return break_cycle(g, cycle, i);
}
if (g->nr == ARRAY_SIZE(g->g)) {
@@ -339,7 +346,7 @@ next:
* structures - which means it can't be blocked
* waiting on a lock:
*/
- if (!lock_graph_remove_non_waiters(&g)) {
+ if (!lock_graph_remove_non_waiters(&g, g.g)) {
/*
* If lock_graph_remove_non_waiters()
* didn't do anything, it must be