aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_log_cil.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2022-07-07 18:51:59 +1000
committerDave Chinner <david@fromorbit.com>2022-07-07 18:51:59 +1000
commit1dd2a2c18e314ad89200f8296c86dd4ecd53dea6 (patch)
tree682de80e2ab7e4ce98095fe1aaa6400135ec4815 /fs/xfs/xfs_log_cil.c
parentxfs: implement percpu cil space used calculation (diff)
downloadlinux-dev-1dd2a2c18e314ad89200f8296c86dd4ecd53dea6.tar.xz
linux-dev-1dd2a2c18e314ad89200f8296c86dd4ecd53dea6.zip
xfs: track CIL ticket reservation in percpu structure
To get it out from under the cil spinlock. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Diffstat (limited to 'fs/xfs/xfs_log_cil.c')
-rw-r--r--fs/xfs/xfs_log_cil.c16
1 files changed, 12 insertions, 4 deletions
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 2d16add7a8d4..e38e10082da2 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -125,6 +125,9 @@ xlog_cil_push_pcp_aggregate(
for_each_online_cpu(cpu) {
cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
+ ctx->ticket->t_curr_res += cilpcp->space_reserved;
+ cilpcp->space_reserved = 0;
+
/*
* We're in the middle of switching cil contexts. Reset the
* counter we use to detect when the current context is nearing
@@ -608,6 +611,7 @@ xlog_cil_insert_items(
ctx_res = split_res * tp->t_ticket->t_iclog_hdrs;
atomic_sub(tp->t_ticket->t_iclog_hdrs, &cil->xc_iclog_hdrs);
}
+ cilpcp->space_reserved += ctx_res;
/*
* Accurately account when over the soft limit, otherwise fold the
@@ -632,14 +636,12 @@ xlog_cil_insert_items(
}
put_cpu_ptr(cilpcp);
- spin_lock(&cil->xc_cil_lock);
- ctx->ticket->t_curr_res += ctx_res;
-
/*
* Now (re-)position everything modified at the tail of the CIL.
* We do this here so we only need to take the CIL lock once during
* the transaction commit.
*/
+ spin_lock(&cil->xc_cil_lock);
list_for_each_entry(lip, &tp->t_items, li_trans) {
/* Skip items which aren't dirty in this transaction. */
if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
@@ -1746,9 +1748,15 @@ xlog_cil_pcp_dead(
{
struct xfs_cil *cil = log->l_cilp;
struct xlog_cil_pcp *cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
+ struct xfs_cil_ctx *ctx;
down_write(&cil->xc_ctx_lock);
- atomic_add(cilpcp->space_used, &cil->xc_ctx->space_used);
+ ctx = cil->xc_ctx;
+ if (ctx->ticket)
+ ctx->ticket->t_curr_res += cilpcp->space_reserved;
+ cilpcp->space_reserved = 0;
+
+ atomic_add(cilpcp->space_used, &ctx->space_used);
cilpcp->space_used = 0;
up_write(&cil->xc_ctx_lock);
}