// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. * All Rights Reserved. */ #ifndef __XFS_LOG_H__ #define __XFS_LOG_H__ struct xfs_cil_ctx; struct xfs_log_vec { struct xfs_log_vec *lv_next; /* next lv in build list */ int lv_niovecs; /* number of iovecs in lv */ struct xfs_log_iovec *lv_iovecp; /* iovec array */ struct xfs_log_item *lv_item; /* owner */ char *lv_buf; /* formatted buffer */ int lv_bytes; /* accounted space in buffer */ int lv_buf_len; /* aligned size of buffer */ int lv_size; /* size of allocated lv */ }; #define XFS_LOG_VEC_ORDERED (-1) static inline void * xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp, uint type) { struct xfs_log_iovec *vec = *vecp; if (vec) { ASSERT(vec - lv->lv_iovecp < lv->lv_niovecs); vec++; } else { vec = &lv->lv_iovecp[0]; } vec->i_type = type; vec->i_addr = lv->lv_buf + lv->lv_buf_len; ASSERT(IS_ALIGNED((unsigned long)vec->i_addr, sizeof(uint64_t))); *vecp = vec; return vec->i_addr; } /* * We need to make sure the next buffer is naturally aligned for the biggest * basic data type we put into it. We already accounted for this padding when * sizing the buffer. * * However, this padding does not get written into the log, and hence we have to * track the space used by the log vectors separately to prevent log space hangs * due to inaccurate accounting (i.e. a leak) of the used log space through the * CIL context ticket. */ static inline void xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec, int len) { lv->lv_buf_len += round_up(len, sizeof(uint64_t)); lv->lv_bytes += len; vec->i_len = len; } static inline void * xlog_copy_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp, uint type, void *data, int len) { void *buf; buf = xlog_prepare_iovec(lv, vecp, type); memcpy(buf, data, len); xlog_finish_iovec(lv, *vecp, len); return buf; } /* * By comparing each component, we don't have to worry about extra * endian issues in treating two 32 bit numbers as one 64 bit number */ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2) { if (CYCLE_LSN(lsn1) != CYCLE_LSN(lsn2)) return (CYCLE_LSN(lsn1)