aboutsummaryrefslogtreecommitdiffstats
path: root/fs/f2fs/segment.h
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/segment.h')
-rw-r--r--fs/f2fs/segment.h162
1 files changed, 99 insertions, 63 deletions
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 55973f7b0330..2495bec1c621 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -45,16 +45,26 @@
(secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
sbi->segs_per_sec)) \
-#define START_BLOCK(sbi, segno) \
- (SM_I(sbi)->seg0_blkaddr + \
+#define MAIN_BLKADDR(sbi) (SM_I(sbi)->main_blkaddr)
+#define SEG0_BLKADDR(sbi) (SM_I(sbi)->seg0_blkaddr)
+
+#define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments)
+#define MAIN_SECS(sbi) (sbi->total_sections)
+
+#define TOTAL_SEGS(sbi) (SM_I(sbi)->segment_count)
+#define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << sbi->log_blocks_per_seg)
+
+#define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
+#define SEGMENT_SIZE(sbi) (1ULL << (sbi->log_blocksize + \
+ sbi->log_blocks_per_seg))
+
+#define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \
(GET_R2L_SEGNO(FREE_I(sbi), segno) << sbi->log_blocks_per_seg))
+
#define NEXT_FREE_BLKADDR(sbi, curseg) \
(START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff)
-#define MAIN_BASE_BLOCK(sbi) (SM_I(sbi)->main_blkaddr)
-
-#define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) \
- ((blk_addr) - SM_I(sbi)->seg0_blkaddr)
+#define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi))
#define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \
(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> sbi->log_blocks_per_seg)
#define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \
@@ -77,23 +87,21 @@
#define SIT_ENTRY_OFFSET(sit_i, segno) \
(segno % sit_i->sents_per_block)
-#define SIT_BLOCK_OFFSET(sit_i, segno) \
+#define SIT_BLOCK_OFFSET(segno) \
(segno / SIT_ENTRY_PER_BLOCK)
-#define START_SEGNO(sit_i, segno) \
- (SIT_BLOCK_OFFSET(sit_i, segno) * SIT_ENTRY_PER_BLOCK)
+#define START_SEGNO(segno) \
+ (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
#define SIT_BLK_CNT(sbi) \
- ((TOTAL_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK)
+ ((MAIN_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK)
#define f2fs_bitmap_size(nr) \
(BITS_TO_LONGS(nr) * sizeof(unsigned long))
-#define TOTAL_SEGS(sbi) (SM_I(sbi)->main_segments)
-#define TOTAL_SECS(sbi) (sbi->total_sections)
-#define SECTOR_FROM_BLOCK(sbi, blk_addr) \
- (((sector_t)blk_addr) << (sbi)->log_sectors_per_block)
-#define SECTOR_TO_BLOCK(sbi, sectors) \
- (sectors >> (sbi)->log_sectors_per_block)
-#define MAX_BIO_BLOCKS(max_hw_blocks) \
- (min((int)max_hw_blocks, BIO_MAX_PAGES))
+#define SECTOR_FROM_BLOCK(blk_addr) \
+ (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
+#define SECTOR_TO_BLOCK(sectors) \
+ (sectors >> F2FS_LOG_SECTORS_PER_BLOCK)
+#define MAX_BIO_BLOCKS(sbi) \
+ ((int)min((int)max_hw_blocks(sbi), BIO_MAX_PAGES))
/*
* indicate a block allocation direction: RIGHT and LEFT.
@@ -167,6 +175,11 @@ struct segment_allocation {
void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
};
+struct inmem_pages {
+ struct list_head list;
+ struct page *page;
+};
+
struct sit_info {
const struct segment_allocation *s_ops;
@@ -237,6 +250,12 @@ struct curseg_info {
unsigned int next_segno; /* preallocated segment */
};
+struct sit_entry_set {
+ struct list_head set_list; /* link with all sit sets */
+ unsigned int start_segno; /* start segno of sits in set */
+ unsigned int entry_cnt; /* the # of sit entries in set */
+};
+
/*
* inline functions
*/
@@ -316,7 +335,7 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
clear_bit(segno, free_i->free_segmap);
free_i->free_segments++;
- next = find_next_bit(free_i->free_segmap, TOTAL_SEGS(sbi), start_segno);
+ next = find_next_bit(free_i->free_segmap, MAIN_SEGS(sbi), start_segno);
if (next >= start_segno + sbi->segs_per_sec) {
clear_bit(secno, free_i->free_secmap);
free_i->free_sections++;
@@ -430,8 +449,10 @@ static inline int reserved_sections(struct f2fs_sb_info *sbi)
static inline bool need_SSR(struct f2fs_sb_info *sbi)
{
- return (prefree_segments(sbi) / sbi->segs_per_sec)
- + free_sections(sbi) < overprovision_sections(sbi);
+ int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
+ int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
+ return free_sections(sbi) <= (node_secs + 2 * dent_secs +
+ reserved_sections(sbi) + 1);
}
static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
@@ -466,48 +487,47 @@ static inline int utilization(struct f2fs_sb_info *sbi)
* F2FS_IPU_UTIL - if FS utilization is over threashold,
* F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over
* threashold,
+ * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
+ * storages. IPU will be triggered only if the # of dirty
+ * pages over min_fsync_blocks.
* F2FS_IPUT_DISABLE - disable IPU. (=default option)
*/
#define DEF_MIN_IPU_UTIL 70
+#define DEF_MIN_FSYNC_BLOCKS 8
enum {
F2FS_IPU_FORCE,
F2FS_IPU_SSR,
F2FS_IPU_UTIL,
F2FS_IPU_SSR_UTIL,
- F2FS_IPU_DISABLE,
+ F2FS_IPU_FSYNC,
};
static inline bool need_inplace_update(struct inode *inode)
{
- struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ unsigned int policy = SM_I(sbi)->ipu_policy;
/* IPU can be done only for the user data */
- if (S_ISDIR(inode->i_mode))
+ if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode))
return false;
- /* this is only set during fdatasync */
- if (is_inode_flag_set(F2FS_I(inode), FI_NEED_IPU))
+ if (policy & (0x1 << F2FS_IPU_FORCE))
+ return true;
+ if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi))
+ return true;
+ if (policy & (0x1 << F2FS_IPU_UTIL) &&
+ utilization(sbi) > SM_I(sbi)->min_ipu_util)
+ return true;
+ if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && need_SSR(sbi) &&
+ utilization(sbi) > SM_I(sbi)->min_ipu_util)
return true;
- switch (SM_I(sbi)->ipu_policy) {
- case F2FS_IPU_FORCE:
+ /* this is only set during fdatasync */
+ if (policy & (0x1 << F2FS_IPU_FSYNC) &&
+ is_inode_flag_set(F2FS_I(inode), FI_NEED_IPU))
return true;
- case F2FS_IPU_SSR:
- if (need_SSR(sbi))
- return true;
- break;
- case F2FS_IPU_UTIL:
- if (utilization(sbi) > SM_I(sbi)->min_ipu_util)
- return true;
- break;
- case F2FS_IPU_SSR_UTIL:
- if (need_SSR(sbi) && utilization(sbi) > SM_I(sbi)->min_ipu_util)
- return true;
- break;
- case F2FS_IPU_DISABLE:
- break;
- }
+
return false;
}
@@ -534,28 +554,21 @@ static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type)
#ifdef CONFIG_F2FS_CHECK_FS
static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
{
- unsigned int end_segno = SM_I(sbi)->segment_count - 1;
- BUG_ON(segno > end_segno);
+ BUG_ON(segno > TOTAL_SEGS(sbi) - 1);
}
static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
{
- struct f2fs_sm_info *sm_info = SM_I(sbi);
- block_t total_blks = sm_info->segment_count << sbi->log_blocks_per_seg;
- block_t start_addr = sm_info->seg0_blkaddr;
- block_t end_addr = start_addr + total_blks - 1;
- BUG_ON(blk_addr < start_addr);
- BUG_ON(blk_addr > end_addr);
+ BUG_ON(blk_addr < SEG0_BLKADDR(sbi));
+ BUG_ON(blk_addr >= MAX_BLKADDR(sbi));
}
/*
- * Summary block is always treated as invalid block
+ * Summary block is always treated as an invalid block
*/
static inline void check_block_count(struct f2fs_sb_info *sbi,
int segno, struct f2fs_sit_entry *raw_sit)
{
- struct f2fs_sm_info *sm_info = SM_I(sbi);
- unsigned int end_segno = sm_info->segment_count - 1;
bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false;
int valid_blocks = 0;
int cur_pos = 0, next_pos;
@@ -564,7 +577,7 @@ static inline void check_block_count(struct f2fs_sb_info *sbi,
BUG_ON(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg);
/* check boundary of a given segment number */
- BUG_ON(segno > end_segno);
+ BUG_ON(segno > TOTAL_SEGS(sbi) - 1);
/* check bitmap with valid block count */
do {
@@ -583,16 +596,39 @@ static inline void check_block_count(struct f2fs_sb_info *sbi,
BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);
}
#else
-#define check_seg_range(sbi, segno)
-#define verify_block_addr(sbi, blk_addr)
-#define check_block_count(sbi, segno, raw_sit)
+static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
+{
+ if (segno > TOTAL_SEGS(sbi) - 1)
+ sbi->need_fsck = true;
+}
+
+static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
+{
+ if (blk_addr < SEG0_BLKADDR(sbi) || blk_addr >= MAX_BLKADDR(sbi))
+ sbi->need_fsck = true;
+}
+
+/*
+ * Summary block is always treated as an invalid block
+ */
+static inline void check_block_count(struct f2fs_sb_info *sbi,
+ int segno, struct f2fs_sit_entry *raw_sit)
+{
+ /* check segment usage */
+ if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg)
+ sbi->need_fsck = true;
+
+ /* check boundary of a given segment number */
+ if (segno > TOTAL_SEGS(sbi) - 1)
+ sbi->need_fsck = true;
+}
#endif
static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
unsigned int start)
{
struct sit_info *sit_i = SIT_I(sbi);
- unsigned int offset = SIT_BLOCK_OFFSET(sit_i, start);
+ unsigned int offset = SIT_BLOCK_OFFSET(start);
block_t blk_addr = sit_i->sit_base_addr + offset;
check_seg_range(sbi, start);
@@ -619,7 +655,7 @@ static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
{
- unsigned int block_off = SIT_BLOCK_OFFSET(sit_i, start);
+ unsigned int block_off = SIT_BLOCK_OFFSET(start);
if (f2fs_test_bit(block_off, sit_i->sit_bitmap))
f2fs_clear_bit(block_off, sit_i->sit_bitmap);
@@ -666,7 +702,7 @@ static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi)
{
struct block_device *bdev = sbi->sb->s_bdev;
struct request_queue *q = bdev_get_queue(bdev);
- return SECTOR_TO_BLOCK(sbi, queue_max_sectors(q));
+ return SECTOR_TO_BLOCK(queue_max_sectors(q));
}
/*
@@ -683,7 +719,7 @@ static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
else if (type == NODE)
return 3 * sbi->blocks_per_seg;
else if (type == META)
- return MAX_BIO_BLOCKS(max_hw_blocks(sbi));
+ return MAX_BIO_BLOCKS(sbi);
else
return 0;
}
@@ -706,7 +742,7 @@ static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
else if (type == NODE)
desired = 3 * max_hw_blocks(sbi);
else
- desired = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
+ desired = MAX_BIO_BLOCKS(sbi);
wbc->nr_to_write = desired;
return desired - nr_to_write;