diff options
Diffstat (limited to 'drivers/staging/lustre/lustre/osc')
-rw-r--r-- | drivers/staging/lustre/lustre/osc/lproc_osc.c | 53 | ||||
-rw-r--r-- | drivers/staging/lustre/lustre/osc/osc_cache.c | 327 | ||||
-rw-r--r-- | drivers/staging/lustre/lustre/osc/osc_cl_internal.h | 41 | ||||
-rw-r--r-- | drivers/staging/lustre/lustre/osc/osc_dev.c | 10 | ||||
-rw-r--r-- | drivers/staging/lustre/lustre/osc/osc_internal.h | 8 | ||||
-rw-r--r-- | drivers/staging/lustre/lustre/osc/osc_io.c | 32 | ||||
-rw-r--r-- | drivers/staging/lustre/lustre/osc/osc_lock.c | 140 | ||||
-rw-r--r-- | drivers/staging/lustre/lustre/osc/osc_object.c | 6 | ||||
-rw-r--r-- | drivers/staging/lustre/lustre/osc/osc_page.c | 207 | ||||
-rw-r--r-- | drivers/staging/lustre/lustre/osc/osc_quota.c | 39 | ||||
-rw-r--r-- | drivers/staging/lustre/lustre/osc/osc_request.c | 339 |
11 files changed, 627 insertions, 575 deletions
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c index 1091536fc90d..57c43c506ef2 100644 --- a/drivers/staging/lustre/lustre/osc/lproc_osc.c +++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c @@ -381,7 +381,7 @@ static int osc_checksum_type_seq_show(struct seq_file *m, void *v) DECLARE_CKSUM_NAME; - if (obd == NULL) + if (!obd) return 0; for (i = 0; i < ARRAY_SIZE(cksum_name); i++) { @@ -397,8 +397,8 @@ static int osc_checksum_type_seq_show(struct seq_file *m, void *v) } static ssize_t osc_checksum_type_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) + const char __user *buffer, + size_t count, loff_t *off) { struct obd_device *obd = ((struct seq_file *)file->private_data)->private; int i; @@ -406,7 +406,7 @@ static ssize_t osc_checksum_type_seq_write(struct file *file, DECLARE_CKSUM_NAME; char kernbuf[10]; - if (obd == NULL) + if (!obd) return 0; if (count > sizeof(kernbuf) - 1) @@ -422,8 +422,8 @@ static ssize_t osc_checksum_type_seq_write(struct file *file, if (((1 << i) & obd->u.cli.cl_supp_cksum_types) == 0) continue; if (!strcmp(kernbuf, cksum_name[i])) { - obd->u.cli.cl_cksum_type = 1 << i; - return count; + obd->u.cli.cl_cksum_type = 1 << i; + return count; } } return -EINVAL; @@ -480,9 +480,19 @@ static ssize_t contention_seconds_store(struct kobject *kobj, struct obd_device *obd = container_of(kobj, struct obd_device, obd_kobj); struct osc_device *od = obd2osc_dev(obd); + int rc; + int val; + + rc = kstrtoint(buffer, 10, &val); + if (rc) + return rc; + + if (val < 0) + return -EINVAL; + + od->od_contention_time = val; - return lprocfs_write_helper(buffer, count, &od->od_contention_time) ?: - count; + return count; } LUSTRE_RW_ATTR(contention_seconds); @@ -505,9 +515,16 @@ static ssize_t lockless_truncate_store(struct kobject *kobj, struct obd_device *obd = container_of(kobj, struct obd_device, obd_kobj); struct osc_device *od = obd2osc_dev(obd); + int rc; + unsigned int val; - return lprocfs_write_helper(buffer, count, &od->od_lockless_truncate) ?: - count; + rc = kstrtouint(buffer, 10, &val); + if (rc) + return rc; + + od->od_lockless_truncate = val; + + return count; } LUSTRE_RW_ATTR(lockless_truncate); @@ -635,10 +652,10 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v) read_cum += r; write_cum += w; seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n", - 1 << i, r, pct(r, read_tot), - pct(read_cum, read_tot), w, - pct(w, write_tot), - pct(write_cum, write_tot)); + 1 << i, r, pct(r, read_tot), + pct(read_cum, read_tot), w, + pct(w, write_tot), + pct(write_cum, write_tot)); if (read_cum == read_tot && write_cum == write_tot) break; } @@ -659,10 +676,10 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v) read_cum += r; write_cum += w; seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n", - i, r, pct(r, read_tot), - pct(read_cum, read_tot), w, - pct(w, write_tot), - pct(write_cum, write_tot)); + i, r, pct(r, read_tot), + pct(read_cum, read_tot), w, + pct(w, write_tot), + pct(write_cum, write_tot)); if (read_cum == read_tot && write_cum == write_tot) break; } diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c index 2229419b7184..63363111380c 100644 --- a/drivers/staging/lustre/lustre/osc/osc_cache.c +++ b/drivers/staging/lustre/lustre/osc/osc_cache.c @@ -140,7 +140,7 @@ static const char *oes_strings[] = { static inline struct osc_extent *rb_extent(struct rb_node *n) { - if (n == NULL) + if (!n) return NULL; return container_of(n, struct osc_extent, oe_node); @@ -148,7 +148,7 @@ static inline struct osc_extent *rb_extent(struct rb_node *n) static inline struct osc_extent *next_extent(struct osc_extent *ext) { - if (ext == NULL) + if (!ext) return NULL; LASSERT(ext->oe_intree); @@ -157,7 +157,7 @@ static inline struct osc_extent *next_extent(struct osc_extent *ext) static inline struct osc_extent *prev_extent(struct osc_extent *ext) { - if (ext == NULL) + if (!ext) return NULL; LASSERT(ext->oe_intree); @@ -240,7 +240,7 @@ static int osc_extent_sanity_check0(struct osc_extent *ext, goto out; } - if (ext->oe_osclock == NULL && ext->oe_grants > 0) { + if (!ext->oe_osclock && ext->oe_grants > 0) { rc = 90; goto out; } @@ -262,7 +262,8 @@ static int osc_extent_sanity_check0(struct osc_extent *ext, } /* Do not verify page list if extent is in RPC. This is because an - * in-RPC extent is supposed to be exclusively accessible w/o lock. */ + * in-RPC extent is supposed to be exclusively accessible w/o lock. + */ if (ext->oe_state > OES_CACHE) { rc = 0; goto out; @@ -319,7 +320,7 @@ static int osc_extent_is_overlapped(struct osc_object *obj, if (!extent_debug) return 0; - for (tmp = first_extent(obj); tmp != NULL; tmp = next_extent(tmp)) { + for (tmp = first_extent(obj); tmp; tmp = next_extent(tmp)) { if (tmp == ext) continue; if (tmp->oe_end >= ext->oe_start && @@ -346,8 +347,8 @@ static struct osc_extent *osc_extent_alloc(struct osc_object *obj) { struct osc_extent *ext; - ext = kmem_cache_alloc(osc_extent_kmem, GFP_NOFS | __GFP_ZERO); - if (ext == NULL) + ext = kmem_cache_zalloc(osc_extent_kmem, GFP_NOFS); + if (!ext) return NULL; RB_CLEAR_NODE(&ext->oe_node); @@ -415,7 +416,7 @@ static struct osc_extent *osc_extent_search(struct osc_object *obj, struct osc_extent *tmp, *p = NULL; LASSERT(osc_object_is_locked(obj)); - while (n != NULL) { + while (n) { tmp = rb_extent(n); if (index < tmp->oe_start) { n = n->rb_left; @@ -439,7 +440,7 @@ static struct osc_extent *osc_extent_lookup(struct osc_object *obj, struct osc_extent *ext; ext = osc_extent_search(obj, index); - if (ext != NULL && ext->oe_start <= index && index <= ext->oe_end) + if (ext && ext->oe_start <= index && index <= ext->oe_end) return osc_extent_get(ext); return NULL; } @@ -454,7 +455,7 @@ static void osc_extent_insert(struct osc_object *obj, struct osc_extent *ext) LASSERT(ext->oe_intree == 0); LASSERT(ext->oe_obj == obj); LASSERT(osc_object_is_locked(obj)); - while (*n != NULL) { + while (*n) { tmp = rb_extent(*n); parent = *n; @@ -463,7 +464,7 @@ static void osc_extent_insert(struct osc_object *obj, struct osc_extent *ext) else if (ext->oe_start > tmp->oe_end) n = &(*n)->rb_right; else - EASSERTF(0, tmp, EXTSTR, EXTPARA(ext)); + EASSERTF(0, tmp, EXTSTR"\n", EXTPARA(ext)); } rb_link_node(&ext->oe_node, parent, n); rb_insert_color(&ext->oe_node, &obj->oo_root); @@ -533,7 +534,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur, LASSERT(cur->oe_state == OES_CACHE); LASSERT(osc_object_is_locked(obj)); - if (victim == NULL) + if (!victim) return -EINVAL; if (victim->oe_state != OES_CACHE || victim->oe_fsync_wait) @@ -587,7 +588,8 @@ void osc_extent_release(const struct lu_env *env, struct osc_extent *ext) if (ext->oe_trunc_pending) { /* a truncate process is waiting for this extent. * This may happen due to a race, check - * osc_cache_truncate_start(). */ + * osc_cache_truncate_start(). + */ osc_extent_state_set(ext, OES_TRUNC); ext->oe_trunc_pending = 0; } else { @@ -601,7 +603,7 @@ void osc_extent_release(const struct lu_env *env, struct osc_extent *ext) if (ext->oe_urgent) list_move_tail(&ext->oe_link, - &obj->oo_urgent_exts); + &obj->oo_urgent_exts); } osc_object_unlock(obj); @@ -639,11 +641,10 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env, int rc; cur = osc_extent_alloc(obj); - if (cur == NULL) + if (!cur) return ERR_PTR(-ENOMEM); lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0); - LASSERT(lock != NULL); LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE); LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT); @@ -673,14 +674,15 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env, /* grants has been allocated by caller */ LASSERTF(*grants >= chunksize + cli->cl_extent_tax, "%u/%u/%u.\n", *grants, chunksize, cli->cl_extent_tax); - LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR, EXTPARA(cur)); + LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR"\n", + EXTPARA(cur)); restart: osc_object_lock(obj); ext = osc_extent_search(obj, cur->oe_start); - if (ext == NULL) + if (!ext) ext = first_extent(obj); - while (ext != NULL) { + while (ext) { loff_t ext_chk_start = ext->oe_start >> ppc_bits; loff_t ext_chk_end = ext->oe_end >> ppc_bits; @@ -691,7 +693,7 @@ restart: /* if covering by different locks, no chance to match */ if (lock != ext->oe_osclock) { EASSERTF(!overlapped(ext, cur), ext, - EXTSTR, EXTPARA(cur)); + EXTSTR"\n", EXTPARA(cur)); ext = next_extent(ext); continue; @@ -705,18 +707,21 @@ restart: /* ok, from now on, ext and cur have these attrs: * 1. covered by the same lock - * 2. contiguous at chunk level or overlapping. */ + * 2. contiguous at chunk level or overlapping. + */ if (overlapped(ext, cur)) { /* cur is the minimum unit, so overlapping means - * full contain. */ + * full contain. + */ EASSERTF((ext->oe_start <= cur->oe_start && ext->oe_end >= cur->oe_end), - ext, EXTSTR, EXTPARA(cur)); + ext, EXTSTR"\n", EXTPARA(cur)); if (ext->oe_state > OES_CACHE || ext->oe_fsync_wait) { /* for simplicity, we wait for this extent to - * finish before going forward. */ + * finish before going forward. + */ conflict = osc_extent_get(ext); break; } @@ -729,17 +734,20 @@ restart: if (ext->oe_state != OES_CACHE || ext->oe_fsync_wait) { /* we can't do anything for a non OES_CACHE extent, or * if there is someone waiting for this extent to be - * flushed, try next one. */ + * flushed, try next one. + */ ext = next_extent(ext); continue; } /* check if they belong to the same rpc slot before trying to * merge. the extents are not overlapped and contiguous at - * chunk level to get here. */ + * chunk level to get here. + */ if (ext->oe_max_end != max_end) { /* if they don't belong to the same RPC slot or - * max_pages_per_rpc has ever changed, do not merge. */ + * max_pages_per_rpc has ever changed, do not merge. + */ ext = next_extent(ext); continue; } @@ -748,7 +756,8 @@ restart: * level so that we know the whole extent is covered by grant * (the pages in the extent are NOT required to be contiguous). * Otherwise, it will be too much difficult to know which - * chunks have grants allocated. */ + * chunks have grants allocated. + */ /* try to do front merge - extend ext's start */ if (chunk + 1 == ext_chk_start) { @@ -768,28 +777,29 @@ restart: *grants -= chunksize; /* try to merge with the next one because we just fill - * in a gap */ + * in a gap + */ if (osc_extent_merge(env, ext, next_extent(ext)) == 0) /* we can save extent tax from next extent */ *grants += cli->cl_extent_tax; found = osc_extent_hold(ext); } - if (found != NULL) + if (found) break; ext = next_extent(ext); } osc_extent_tree_dump(D_CACHE, obj); - if (found != NULL) { - LASSERT(conflict == NULL); + if (found) { + LASSERT(!conflict); if (!IS_ERR(found)) { LASSERT(found->oe_osclock == cur->oe_osclock); OSC_EXTENT_DUMP(D_CACHE, found, "found caching ext for %lu.\n", index); } - } else if (conflict == NULL) { + } else if (!conflict) { /* create a new extent */ EASSERT(osc_extent_is_overlapped(obj, cur) == 0, cur); cur->oe_grants = chunksize + cli->cl_extent_tax; @@ -804,11 +814,12 @@ restart: } osc_object_unlock(obj); - if (conflict != NULL) { - LASSERT(found == NULL); + if (conflict) { + LASSERT(!found); /* waiting for IO to finish. Please notice that it's impossible - * to be an OES_TRUNC extent. */ + * to be an OES_TRUNC extent. + */ rc = osc_extent_wait(env, conflict, OES_INV); osc_extent_put(env, conflict); conflict = NULL; @@ -845,8 +856,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext, ext->oe_rc = rc ?: ext->oe_nr_pages; EASSERT(ergo(rc == 0, ext->oe_state == OES_RPC), ext); - list_for_each_entry_safe(oap, tmp, &ext->oe_pages, - oap_pending_item) { + list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) { list_del_init(&oap->oap_rpc_item); list_del_init(&oap->oap_pending_item); if (last_off <= oap->oap_obj_off) { @@ -865,7 +875,8 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext, last_count != PAGE_CACHE_SIZE) { /* For short writes we shouldn't count parts of pages that * span a whole chunk on the OST side, or our accounting goes - * wrong. Should match the code in filter_grant_check. */ + * wrong. Should match the code in filter_grant_check. + */ int offset = oap->oap_page_off & ~CFS_PAGE_MASK; int count = oap->oap_count + (offset & (blocksize - 1)); int end = (offset + oap->oap_count) & (blocksize - 1); @@ -909,7 +920,8 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext, osc_object_lock(obj); LASSERT(sanity_check_nolock(ext) == 0); /* `Kick' this extent only if the caller is waiting for it to be - * written out. */ + * written out. + */ if (state == OES_INV && !ext->oe_urgent && !ext->oe_hp && !ext->oe_trunc_pending) { if (ext->oe_state == OES_ACTIVE) { @@ -967,7 +979,8 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, /* Request new lu_env. * We can't use that env from osc_cache_truncate_start() because - * it's from lov_io_sub and not fully initialized. */ + * it's from lov_io_sub and not fully initialized. + */ env = cl_env_nested_get(&nest); io = &osc_env_info(env)->oti_io; io->ci_obj = cl_object_top(osc2cl(obj)); @@ -976,15 +989,15 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, goto out; /* discard all pages with index greater then trunc_index */ - list_for_each_entry_safe(oap, tmp, &ext->oe_pages, - oap_pending_item) { + list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) { struct cl_page *sub = oap2cl_page(oap); struct cl_page *page = cl_page_top(sub); LASSERT(list_empty(&oap->oap_rpc_item)); /* only discard the pages with their index greater than - * trunc_index, and ... */ + * trunc_index, and ... + */ if (sub->cp_index < trunc_index || (sub->cp_index == trunc_index && partial)) { /* accounting how many pages remaining in the chunk @@ -1028,11 +1041,13 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, pgoff_t last_index; /* if there is no pages in this chunk, we can also free grants - * for the last chunk */ + * for the last chunk + */ if (pages_in_chunk == 0) { /* if this is the 1st chunk and no pages in this chunk, * ext->oe_nr_pages must be zero, so we should be in - * the other if-clause. */ + * the other if-clause. + */ LASSERT(trunc_chunk > 0); --trunc_chunk; ++chunks; @@ -1074,13 +1089,13 @@ static int osc_extent_make_ready(const struct lu_env *env, LASSERT(sanity_check(ext) == 0); /* in locking state, any process should not touch this extent. */ EASSERT(ext->oe_state == OES_LOCKING, ext); - EASSERT(ext->oe_owner != NULL, ext); + EASSERT(ext->oe_owner, ext); OSC_EXTENT_DUMP(D_CACHE, ext, "make ready\n"); list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { ++page_count; - if (last == NULL || last->oap_obj_off < oap->oap_obj_off) + if (!last || last->oap_obj_off < oap->oap_obj_off) last = oap; /* checking ASYNC_READY is race safe */ @@ -1103,9 +1118,10 @@ static int osc_extent_make_ready(const struct lu_env *env, } LASSERT(page_count == ext->oe_nr_pages); - LASSERT(last != NULL); + LASSERT(last); /* the last page is the only one we need to refresh its count by - * the size of file. */ + * the size of file. + */ if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) { last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE); LASSERT(last->oap_count > 0); @@ -1114,7 +1130,8 @@ static int osc_extent_make_ready(const struct lu_env *env, } /* for the rest of pages, we don't need to call osf_refresh_count() - * because it's known they are not the last page */ + * because it's known they are not the last page + */ list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) { oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off; @@ -1167,9 +1184,10 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants) end_index = min(ext->oe_max_end, ((chunk + 1) << ppc_bits) - 1); next = next_extent(ext); - if (next != NULL && next->oe_start <= end_index) { + if (next && next->oe_start <= end_index) { /* complex mode - overlapped with the next extent, - * this case will be handled by osc_extent_find() */ + * this case will be handled by osc_extent_find() + */ rc = -EAGAIN; goto out; } @@ -1197,7 +1215,7 @@ static void osc_extent_tree_dump0(int level, struct osc_object *obj, /* osc_object_lock(obj); */ cnt = 1; - for (ext = first_extent(obj); ext != NULL; ext = next_extent(ext)) + for (ext = first_extent(obj); ext; ext = next_extent(ext)) OSC_EXTENT_DUMP(level, ext, "in tree %d.\n", cnt++); cnt = 1; @@ -1262,7 +1280,6 @@ static int osc_refresh_count(const struct lu_env *env, /* readpage queues with _COUNT_STABLE, shouldn't get here. */ LASSERT(!(cmd & OBD_BRW_READ)); - LASSERT(opg != NULL); obj = opg->ops_cl.cpl_obj; cl_object_attr_lock(obj); @@ -1299,16 +1316,16 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap, * page->cp_req can be NULL if io submission failed before * cl_req was allocated. */ - if (page->cp_req != NULL) + if (page->cp_req) cl_req_page_done(env, page); - LASSERT(page->cp_req == NULL); + LASSERT(!page->cp_req); crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE; /* Clear opg->ops_transfer_pinned before VM lock is released. */ opg->ops_transfer_pinned = 0; spin_lock(&obj->oo_seatbelt); - LASSERT(opg->ops_submitter != NULL); + LASSERT(opg->ops_submitter); LASSERT(!list_empty(&opg->ops_inflight)); list_del_init(&opg->ops_inflight); opg->ops_submitter = NULL; @@ -1367,7 +1384,8 @@ static void osc_consume_write_grant(struct client_obd *cli, } /* the companion to osc_consume_write_grant, called when a brw has completed. - * must be called with the loi lock held. */ + * must be called with the loi lock held. + */ static void osc_release_write_grant(struct client_obd *cli, struct brw_page *pga) { @@ -1410,7 +1428,8 @@ static void __osc_unreserve_grant(struct client_obd *cli, /* it's quite normal for us to get more grant than reserved. * Thinking about a case that two extents merged by adding a new * chunk, we can save one extent tax. If extent tax is greater than - * one chunk, we can save more grant by adding a new chunk */ + * one chunk, we can save more grant by adding a new chunk + */ cli->cl_reserved_grant -= reserved; if (unused > reserved) { cli->cl_avail_grant += reserved; @@ -1454,7 +1473,8 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages, cli->cl_lost_grant += lost_grant; if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) { /* borrow some grant from truncate to avoid the case that - * truncate uses up all avail grant */ + * truncate uses up all avail grant + */ cli->cl_lost_grant -= grant; cli->cl_avail_grant += grant; } @@ -1539,7 +1559,8 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli, client_obd_list_lock(&cli->cl_loi_list_lock); /* force the caller to try sync io. this can jump the list - * of queued writes and create a discontiguous rpc stream */ + * of queued writes and create a discontiguous rpc stream + */ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) || cli->cl_dirty_max < PAGE_CACHE_SIZE || cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) { @@ -1558,7 +1579,8 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli, * Adding a cache waiter will trigger urgent write-out no matter what * RPC size will be. * The exiting condition is no avail grants and no dirty pages caching, - * that really means there is no space on the OST. */ + * that really means there is no space on the OST. + */ init_waitqueue_head(&ocw.ocw_waitq); ocw.ocw_oap = oap; ocw.ocw_grant = bytes; @@ -1640,7 +1662,8 @@ static int osc_max_rpc_in_flight(struct client_obd *cli, struct osc_object *osc) /* This maintains the lists of pending pages to read/write for a given object * (lop). This is used by osc_check_rpcs->osc_next_obj() and osc_list_maint() - * to quickly find objects that are ready to send an RPC. */ + * to quickly find objects that are ready to send an RPC. + */ static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc, int cmd) { @@ -1649,8 +1672,9 @@ static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc, /* if we have an invalid import we want to drain the queued pages * by forcing them through rpcs that immediately fail and complete * the pages. recovery relies on this to empty the queued pages - * before canceling the locks and evicting down the llite pages */ - if ((cli->cl_import == NULL || cli->cl_import->imp_invalid)) + * before canceling the locks and evicting down the llite pages + */ + if (!cli->cl_import || cli->cl_import->imp_invalid) invalid_import = 1; if (cmd & OBD_BRW_WRITE) { @@ -1670,7 +1694,8 @@ static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc, } /* trigger a write rpc stream as long as there are dirtiers * waiting for space. as they're waiting, they're not going to - * create more pages to coalesce with what's waiting.. */ + * create more pages to coalesce with what's waiting.. + */ if (!list_empty(&cli->cl_cache_waiters)) { CDEBUG(D_CACHE, "cache waiters forcing RPC\n"); return 1; @@ -1723,7 +1748,8 @@ static void on_list(struct list_head *item, struct list_head *list, int should_b } /* maintain the osc's cli list membership invariants so that osc_send_oap_rpc - * can find pages to build into rpcs quickly */ + * can find pages to build into rpcs quickly + */ static int __osc_list_maint(struct client_obd *cli, struct osc_object *osc) { if (osc_makes_hprpc(osc)) { @@ -1761,7 +1787,8 @@ static int osc_list_maint(struct client_obd *cli, struct osc_object *osc) * application. As an async write fails we record the error code for later if * the app does an fsync. As long as errors persist we force future rpcs to be * sync so that the app can get a sync error and break the cycle of queueing - * pages for which writeback will fail. */ + * pages for which writeback will fail. + */ static void osc_process_ar(struct osc_async_rc *ar, __u64 xid, int rc) { @@ -1780,7 +1807,8 @@ static void osc_process_ar(struct osc_async_rc *ar, __u64 xid, } /* this must be called holding the loi list lock to give coverage to exit_cache, - * async_flag maintenance, and oap_request */ + * async_flag maintenance, and oap_request + */ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli, struct osc_async_page *oap, int sent, int rc) { @@ -1788,7 +1816,7 @@ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli, struct lov_oinfo *loi = osc->oo_oinfo; __u64 xid = 0; - if (oap->oap_request != NULL) { + if (oap->oap_request) { xid = ptlrpc_req_xid(oap->oap_request); ptlrpc_req_finished(oap->oap_request); oap->oap_request = NULL; @@ -1877,13 +1905,12 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist) { struct client_obd *cli = osc_cli(obj); struct osc_extent *ext; + struct osc_extent *temp; int page_count = 0; unsigned int max_pages = cli->cl_max_pages_per_rpc; LASSERT(osc_object_is_locked(obj)); - while (!list_empty(&obj->oo_hp_exts)) { - ext = list_entry(obj->oo_hp_exts.next, struct osc_extent, - oe_link); + list_for_each_entry_safe(ext, temp, &obj->oo_hp_exts, oe_link) { LASSERT(ext->oe_state == OES_CACHE); if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count, &max_pages)) @@ -1895,7 +1922,7 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist) while (!list_empty(&obj->oo_urgent_exts)) { ext = list_entry(obj->oo_urgent_exts.next, - struct osc_extent, oe_link); + struct osc_extent, oe_link); if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count, &max_pages)) return page_count; @@ -1906,7 +1933,7 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist) while ((ext = next_extent(ext)) != NULL) { if ((ext->oe_state != OES_CACHE) || (!list_empty(&ext->oe_link) && - ext->oe_owner != NULL)) + ext->oe_owner)) continue; if (!try_to_add_extent_for_io(cli, ext, rpclist, @@ -1918,10 +1945,10 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist) return page_count; ext = first_extent(obj); - while (ext != NULL) { + while (ext) { if ((ext->oe_state != OES_CACHE) || /* this extent may be already in current rpclist */ - (!list_empty(&ext->oe_link) && ext->oe_owner != NULL)) { + (!list_empty(&ext->oe_link) && ext->oe_owner)) { ext = next_extent(ext); continue; } @@ -1938,6 +1965,7 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist) static int osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli, struct osc_object *osc) + __must_hold(osc) { LIST_HEAD(rpclist); struct osc_extent *ext; @@ -1967,7 +1995,8 @@ osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli, } /* we're going to grab page lock, so release object lock because - * lock order is page lock -> object lock. */ + * lock order is page lock -> object lock. + */ osc_object_unlock(osc); list_for_each_entry_safe(ext, tmp, &rpclist, oe_link) { @@ -1979,7 +2008,7 @@ osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli, continue; } } - if (first == NULL) { + if (!first) { first = ext; srvlock = ext->oe_srvlock; } else { @@ -2010,6 +2039,7 @@ osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli, static int osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli, struct osc_object *osc) + __must_hold(osc) { struct osc_extent *ext; struct osc_extent *next; @@ -2019,8 +2049,7 @@ osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli, int rc = 0; LASSERT(osc_object_is_locked(osc)); - list_for_each_entry_safe(ext, next, - &osc->oo_reading_exts, oe_link) { + list_for_each_entry_safe(ext, next, &osc->oo_reading_exts, oe_link) { EASSERT(ext->oe_state == OES_LOCK_DONE, ext); if (!try_to_add_extent_for_io(cli, ext, &rpclist, &page_count, &max_pages)) @@ -2051,12 +2080,14 @@ osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli, }) /* This is called by osc_check_rpcs() to find which objects have pages that - * we could be sending. These lists are maintained by osc_makes_rpc(). */ + * we could be sending. These lists are maintained by osc_makes_rpc(). + */ static struct osc_object *osc_next_obj(struct client_obd *cli) { /* First return objects that have blocked locks so that they * will be flushed quickly and other clients can get the lock, - * then objects which have pages ready to be stuffed into RPCs */ + * then objects which have pages ready to be stuffed into RPCs + */ if (!list_empty(&cli->cl_loi_hp_ready_list)) return list_to_obj(&cli->cl_loi_hp_ready_list, hp_ready_item); if (!list_empty(&cli->cl_loi_ready_list)) @@ -2065,14 +2096,16 @@ static struct osc_object *osc_next_obj(struct client_obd *cli) /* then if we have cache waiters, return all objects with queued * writes. This is especially important when many small files * have filled up the cache and not been fired into rpcs because - * they don't pass the nr_pending/object threshold */ + * they don't pass the nr_pending/object threshold + */ if (!list_empty(&cli->cl_cache_waiters) && !list_empty(&cli->cl_loi_write_list)) return list_to_obj(&cli->cl_loi_write_list, write_item); /* then return all queued objects when we have an invalid import - * so that they get flushed */ - if (cli->cl_import == NULL || cli->cl_import->imp_invalid) { + * so that they get flushed + */ + if (!cli->cl_import || cli->cl_import->imp_invalid) { if (!list_empty(&cli->cl_loi_write_list)) return list_to_obj(&cli->cl_loi_write_list, write_item); if (!list_empty(&cli->cl_loi_read_list)) @@ -2083,6 +2116,7 @@ static struct osc_object *osc_next_obj(struct client_obd *cli) /* called with the loi list lock held */ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli) + __must_hold(&cli->cl_loi_list_lock) { struct osc_object *osc; int rc = 0; @@ -2108,7 +2142,8 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli) * would be redundant if we were getting read/write work items * instead of objects. we don't want send_oap_rpc to drain a * partial read pending queue when we're given this object to - * do io on writes while there are cache waiters */ + * do io on writes while there are cache waiters + */ osc_object_lock(osc); if (osc_makes_rpc(cli, osc, OBD_BRW_WRITE)) { rc = osc_send_write_rpc(env, cli, osc); @@ -2130,7 +2165,8 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli) * because it might be blocked at grabbing * the page lock as we mentioned. * - * Anyway, continue to drain pages. */ + * Anyway, continue to drain pages. + */ /* break; */ } } @@ -2155,12 +2191,13 @@ static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli, { int rc = 0; - if (osc != NULL && osc_list_maint(cli, osc) == 0) + if (osc && osc_list_maint(cli, osc) == 0) return 0; if (!async) { /* disable osc_lru_shrink() temporarily to avoid - * potential stack overrun problem. LU-2859 */ + * potential stack overrun problem. LU-2859 + */ atomic_inc(&cli->cl_lru_shrinkers); client_obd_list_lock(&cli->cl_loi_list_lock); osc_check_rpcs(env, cli); @@ -2168,7 +2205,7 @@ static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli, atomic_dec(&cli->cl_lru_shrinkers); } else { CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli); - LASSERT(cli->cl_writeback_work != NULL); + LASSERT(cli->cl_writeback_work); rc = ptlrpcd_queue_work(cli->cl_writeback_work); } return rc; @@ -2233,7 +2270,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, if (oap->oap_magic != OAP_MAGIC) return -EINVAL; - if (cli->cl_import == NULL || cli->cl_import->imp_invalid) + if (!cli->cl_import || cli->cl_import->imp_invalid) return -EIO; if (!list_empty(&oap->oap_pending_item) || @@ -2284,12 +2321,14 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, * 1. if there exists an active extent for this IO, mostly this page * can be added to the active extent and sometimes we need to * expand extent to accommodate this page; - * 2. otherwise, a new extent will be allocated. */ + * 2. otherwise, a new extent will be allocated. + */ ext = oio->oi_active; - if (ext != NULL && ext->oe_start <= index && ext->oe_max_end >= index) { + if (ext && ext->oe_start <= index && ext->oe_max_end >= index) { /* one chunk plus extent overhead must be enough to write this - * page */ + * page + */ grants = (1 << cli->cl_chunkbits) + cli->cl_extent_tax; if (ext->oe_end >= index) grants = 0; @@ -2316,7 +2355,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, } } rc = 0; - } else if (ext != NULL) { + } else if (ext) { /* index is located outside of active extent */ need_release = 1; } @@ -2326,13 +2365,14 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, ext = NULL; } - if (ext == NULL) { + if (!ext) { int tmp = (1 << cli->cl_chunkbits) + cli->cl_extent_tax; /* try to find new extent to cover this page */ - LASSERT(oio->oi_active == NULL); + LASSERT(!oio->oi_active); /* we may have allocated grant for this page if we failed - * to expand the previous active extent. */ + * to expand the previous active extent. + */ LASSERT(ergo(grants > 0, grants >= tmp)); rc = 0; @@ -2359,8 +2399,8 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, osc_unreserve_grant(cli, grants, tmp); } - LASSERT(ergo(rc == 0, ext != NULL)); - if (ext != NULL) { + LASSERT(ergo(rc == 0, ext)); + if (ext) { EASSERTF(ext->oe_end >= index && ext->oe_start <= index, ext, "index = %lu.\n", index); LASSERT((oap->oap_brw_flags & OBD_BRW_FROM_GRANT) != 0); @@ -2397,15 +2437,16 @@ int osc_teardown_async_page(const struct lu_env *env, ext = osc_extent_lookup(obj, oap2cl_page(oap)->cp_index); /* only truncated pages are allowed to be taken out. * See osc_extent_truncate() and osc_cache_truncate_start() - * for details. */ - if (ext != NULL && ext->oe_state != OES_TRUNC) { + * for details. + */ + if (ext && ext->oe_state != OES_TRUNC) { OSC_EXTENT_DUMP(D_ERROR, ext, "trunc at %lu.\n", oap2cl_page(oap)->cp_index); rc = -EBUSY; } } osc_object_unlock(obj); - if (ext != NULL) + if (ext) osc_extent_put(env, ext); return rc; } @@ -2430,7 +2471,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io, osc_object_lock(obj); ext = osc_extent_lookup(obj, index); - if (ext == NULL) { + if (!ext) { osc_extent_tree_dump(D_ERROR, obj); LASSERTF(0, "page index %lu is NOT covered.\n", index); } @@ -2448,7 +2489,8 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io, * exists a deadlock problem because other process can wait for * page writeback bit holding page lock; and meanwhile in * vvp_page_make_ready(), we need to grab page lock before - * really sending the RPC. */ + * really sending the RPC. + */ case OES_TRUNC: /* race with truncate, page will be redirtied */ case OES_ACTIVE: @@ -2456,7 +2498,8 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io, * re-dirty the page. If we continued on here, and we were the * one making the extent active, we could deadlock waiting for * the page writeback to clear but it won't because the extent - * is active and won't be written out. */ + * is active and won't be written out. + */ rc = -EAGAIN; goto out; default: @@ -2527,12 +2570,13 @@ int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops) if (ext->oe_start <= index && ext->oe_end >= index) { LASSERT(ext->oe_state == OES_LOCK_DONE); /* For OES_LOCK_DONE state extent, it has already held - * a refcount for RPC. */ + * a refcount for RPC. + */ found = osc_extent_get(ext); break; } } - if (found != NULL) { + if (found) { list_del_init(&found->oe_link); osc_update_pending(obj, cmd, -found->oe_nr_pages); osc_object_unlock(obj); @@ -2543,8 +2587,9 @@ int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops) } else { osc_object_unlock(obj); /* ok, it's been put in an rpc. only one oap gets a request - * reference */ - if (oap->oap_request != NULL) { + * reference + */ + if (oap->oap_request) { ptlrpc_mark_interrupted(oap->oap_request); ptlrpcd_wake(oap->oap_request); ptlrpc_req_finished(oap->oap_request); @@ -2579,7 +2624,7 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj, } ext = osc_extent_alloc(obj); - if (ext == NULL) { + if (!ext) { list_for_each_entry_safe(oap, tmp, list, oap_pending_item) { list_del_init(&oap->oap_pending_item); osc_ap_completion(env, cli, oap, 0, -ENOMEM); @@ -2621,6 +2666,7 @@ int osc_cache_truncate_start(const struct lu_env *env, struct osc_io *oio, { struct client_obd *cli = osc_cli(obj); struct osc_extent *ext; + struct osc_extent *temp; struct osc_extent *waiting = NULL; pgoff_t index; LIST_HEAD(list); @@ -2634,18 +2680,19 @@ int osc_cache_truncate_start(const struct lu_env *env, struct osc_io *oio, again: osc_object_lock(obj); ext = osc_extent_search(obj, index); - if (ext == NULL) + if (!ext) ext = first_extent(obj); else if (ext->oe_end < index) ext = next_extent(ext); - while (ext != NULL) { + while (ext) { EASSERT(ext->oe_state != OES_TRUNC, ext); if (ext->oe_state > OES_CACHE || ext->oe_urgent) { /* if ext is in urgent state, it means there must exist * a page already having been flushed by write_page(). * We have to wait for this extent because we can't - * truncate that page. */ + * truncate that page. + */ LASSERT(!ext->oe_hp); OSC_EXTENT_DUMP(D_CACHE, ext, "waiting for busy extent\n"); @@ -2660,7 +2707,8 @@ again: /* though we grab inode mutex for write path, but we * release it before releasing extent(in osc_io_end()), * so there is a race window that an extent is still - * in OES_ACTIVE when truncate starts. */ + * in OES_ACTIVE when truncate starts. + */ LASSERT(!ext->oe_trunc_pending); ext->oe_trunc_pending = 1; } else { @@ -2678,14 +2726,14 @@ again: osc_list_maint(cli, obj); - while (!list_empty(&list)) { + list_for_each_entry_safe(ext, temp, &list, oe_link) { int rc; - ext = list_entry(list.next, struct osc_extent, oe_link); list_del_init(&ext->oe_link); /* extent may be in OES_ACTIVE state because inode mutex - * is released before osc_io_end() in file write case */ + * is released before osc_io_end() in file write case + */ if (ext->oe_state != OES_TRUNC) osc_extent_wait(env, ext, OES_TRUNC); @@ -2710,19 +2758,21 @@ again: /* we need to hold this extent in OES_TRUNC state so * that no writeback will happen. This is to avoid - * BUG 17397. */ - LASSERT(oio->oi_trunc == NULL); + * BUG 17397. + */ + LASSERT(!oio->oi_trunc); oio->oi_trunc = osc_extent_get(ext); OSC_EXTENT_DUMP(D_CACHE, ext, "trunc at %llu\n", size); } osc_extent_put(env, ext); } - if (waiting != NULL) { + if (waiting) { int rc; /* ignore the result of osc_extent_wait the write initiator - * should take care of it. */ + * should take care of it. + */ rc = osc_extent_wait(env, waiting, OES_INV); if (rc < 0) OSC_EXTENT_DUMP(D_CACHE, waiting, "error: %d.\n", rc); @@ -2743,7 +2793,7 @@ void osc_cache_truncate_end(const struct lu_env *env, struct osc_io *oio, struct osc_extent *ext = oio->oi_trunc; oio->oi_trunc = NULL; - if (ext != NULL) { + if (ext) { bool unplug = false; EASSERT(ext->oe_nr_pages > 0, ext); @@ -2786,11 +2836,11 @@ int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj, again: osc_object_lock(obj); ext = osc_extent_search(obj, index); - if (ext == NULL) + if (!ext) ext = first_extent(obj); else if (ext->oe_end < index) ext = next_extent(ext); - while (ext != NULL) { + while (ext) { int rc; if (ext->oe_start > end) @@ -2841,11 +2891,11 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj, osc_object_lock(obj); ext = osc_extent_search(obj, start); - if (ext == NULL) + if (!ext) ext = first_extent(obj); else if (ext->oe_end < start) ext = next_extent(ext); - while (ext != NULL) { + while (ext) { if (ext->oe_start > end) break; @@ -2864,18 +2914,18 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj, ext->oe_urgent = 1; list = &obj->oo_urgent_exts; } - if (list != NULL) + if (list) list_move_tail(&ext->oe_link, list); unplug = true; } else { /* the only discarder is lock cancelling, so - * [start, end] must contain this extent */ + * [start, end] must contain this extent + */ EASSERT(ext->oe_start >= start && ext->oe_max_end <= end, ext); osc_extent_state_set(ext, OES_LOCKING); ext->oe_owner = current; - list_move_tail(&ext->oe_link, - &discard_list); + list_move_tail(&ext->oe_link, &discard_list); osc_update_pending(obj, OBD_BRW_WRITE, -ext->oe_nr_pages); } @@ -2884,14 +2934,16 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj, /* It's pretty bad to wait for ACTIVE extents, because * we don't know how long we will wait for it to be * flushed since it may be blocked at awaiting more - * grants. We do this for the correctness of fsync. */ + * grants. We do this for the correctness of fsync. + */ LASSERT(hp == 0 && discard == 0); ext->oe_urgent = 1; break; case OES_TRUNC: /* this extent is being truncated, can't do anything * for it now. it will be set to urgent after truncate - * is finished in osc_cache_truncate_end(). */ + * is finished in osc_cache_truncate_end(). + */ default: break; } @@ -2910,7 +2962,8 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj, EASSERT(ext->oe_state == OES_LOCKING, ext); /* Discard caching pages. We don't actually write this - * extent out but we complete it as if we did. */ + * extent out but we complete it as if we did. + */ rc = osc_extent_make_ready(env, ext); if (unlikely(rc < 0)) { OSC_EXTENT_DUMP(D_ERROR, ext, diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h index 415c27e4ab66..d55d04d0428b 100644 --- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h +++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h @@ -69,10 +69,12 @@ struct osc_io { /** true if this io is lockless. */ int oi_lockless; /** active extents, we know how many bytes is going to be written, - * so having an active extent will prevent it from being fragmented */ + * so having an active extent will prevent it from being fragmented + */ struct osc_extent *oi_active; /** partially truncated extent, we need to hold this extent to prevent - * page writeback from happening. */ + * page writeback from happening. + */ struct osc_extent *oi_trunc; struct obd_info oi_info; @@ -154,7 +156,8 @@ struct osc_object { atomic_t oo_nr_writes; /** Protect extent tree. Will be used to protect - * oo_{read|write}_pages soon. */ + * oo_{read|write}_pages soon. + */ spinlock_t oo_lock; }; @@ -472,7 +475,7 @@ static inline struct osc_thread_info *osc_env_info(const struct lu_env *env) struct osc_thread_info *info; info = lu_context_key_get(&env->le_ctx, &osc_key); - LASSERT(info != NULL); + LASSERT(info); return info; } @@ -481,7 +484,7 @@ static inline struct osc_session *osc_env_session(const struct lu_env *env) struct osc_session *ses; ses = lu_context_key_get(env->le_ses, &osc_session_key); - LASSERT(ses != NULL); + LASSERT(ses); return ses; } @@ -522,7 +525,7 @@ static inline struct cl_object *osc2cl(const struct osc_object *obj) return (struct cl_object *)&obj->oo_cl; } -static inline ldlm_mode_t osc_cl_lock2ldlm(enum cl_lock_mode mode) +static inline enum ldlm_mode osc_cl_lock2ldlm(enum cl_lock_mode mode) { LASSERT(mode == CLM_READ || mode == CLM_WRITE || mode == CLM_GROUP); if (mode == CLM_READ) @@ -533,7 +536,7 @@ static inline ldlm_mode_t osc_cl_lock2ldlm(enum cl_lock_mode mode) return LCK_GROUP; } -static inline enum cl_lock_mode osc_ldlm2cl_lock(ldlm_mode_t mode) +static inline enum cl_lock_mode osc_ldlm2cl_lock(enum ldlm_mode mode) { LASSERT(mode == LCK_PR || mode == LCK_PW || mode == LCK_GROUP); if (mode == LCK_PR) @@ -627,22 +630,26 @@ struct osc_extent { oe_srvlock:1, oe_memalloc:1, /** an ACTIVE extent is going to be truncated, so when this extent - * is released, it will turn into TRUNC state instead of CACHE. */ + * is released, it will turn into TRUNC state instead of CACHE. + */ oe_trunc_pending:1, /** this extent should be written asap and someone may wait for the * write to finish. This bit is usually set along with urgent if * the extent was CACHE state. * fsync_wait extent can't be merged because new extent region may - * exceed fsync range. */ + * exceed fsync range. + */ oe_fsync_wait:1, /** covering lock is being canceled */ oe_hp:1, /** this extent should be written back asap. set if one of pages is - * called by page WB daemon, or sync write or reading requests. */ + * called by page WB daemon, or sync write or reading requests. + */ oe_urgent:1; /** how many grants allocated for this extent. * Grant allocated for this extent. There is no grant allocated - * for reading extents and sync write extents. */ + * for reading extents and sync write extents. + */ unsigned int oe_grants; /** # of dirty pages in this extent */ unsigned int oe_nr_pages; @@ -655,21 +662,25 @@ struct osc_extent { struct osc_page *oe_next_page; /** start and end index of this extent, include start and end * themselves. Page offset here is the page index of osc_pages. - * oe_start is used as keyword for red-black tree. */ + * oe_start is used as keyword for red-black tree. + */ pgoff_t oe_start; pgoff_t oe_end; /** maximum ending index of this extent, this is limited by - * max_pages_per_rpc, lock extent and chunk size. */ + * max_pages_per_rpc, lock extent and chunk size. + */ pgoff_t oe_max_end; /** waitqueue - for those who want to be notified if this extent's - * state has changed. */ + * state has changed. + */ wait_queue_head_t oe_waitq; /** lock covering this extent */ struct cl_lock *oe_osclock; /** terminator of this extent. Must be true if this extent is in IO. */ struct task_struct *oe_owner; /** return value of writeback. If somebody is waiting for this extent, - * this value can be known by outside world. */ + * this value can be known by outside world. + */ int oe_rc; /** max pages per rpc when this extent was created */ unsigned int oe_mppr; diff --git a/drivers/staging/lustre/lustre/osc/osc_dev.c b/drivers/staging/lustre/lustre/osc/osc_dev.c index 7078cc57d8b9..d4fe507f165f 100644 --- a/drivers/staging/lustre/lustre/osc/osc_dev.c +++ b/drivers/staging/lustre/lustre/osc/osc_dev.c @@ -122,8 +122,8 @@ static void *osc_key_init(const struct lu_context *ctx, { struct osc_thread_info *info; - info = kmem_cache_alloc(osc_thread_kmem, GFP_NOFS | __GFP_ZERO); - if (info == NULL) + info = kmem_cache_zalloc(osc_thread_kmem, GFP_NOFS); + if (!info) info = ERR_PTR(-ENOMEM); return info; } @@ -147,8 +147,8 @@ static void *osc_session_init(const struct lu_context *ctx, { struct osc_session *info; - info = kmem_cache_alloc(osc_session_kmem, GFP_NOFS | __GFP_ZERO); - if (info == NULL) + info = kmem_cache_zalloc(osc_session_kmem, GFP_NOFS); + if (!info) info = ERR_PTR(-ENOMEM); return info; } @@ -228,7 +228,7 @@ static struct lu_device *osc_device_alloc(const struct lu_env *env, /* Setup OSC OBD */ obd = class_name2obd(lustre_cfg_string(cfg, 0)); - LASSERT(obd != NULL); + LASSERT(obd); rc = osc_setup(obd, cfg); if (rc) { osc_device_free(env, d); diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h index a4c61463b1c7..ea695c2099ee 100644 --- a/drivers/staging/lustre/lustre/osc/osc_internal.h +++ b/drivers/staging/lustre/lustre/osc/osc_internal.h @@ -47,11 +47,13 @@ struct lu_env; enum async_flags { ASYNC_READY = 0x1, /* ap_make_ready will not be called before this - page is added to an rpc */ + * page is added to an rpc + */ ASYNC_URGENT = 0x2, /* page must be put into an RPC before return */ ASYNC_COUNT_STABLE = 0x4, /* ap_refresh_count will not be called - to give the caller a chance to update - or cancel the size of the io */ + * to give the caller a chance to update + * or cancel the size of the io + */ ASYNC_HP = 0x10, }; diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c index abd0beb483fe..6bd0a45d8b06 100644 --- a/drivers/staging/lustre/lustre/osc/osc_io.c +++ b/drivers/staging/lustre/lustre/osc/osc_io.c @@ -73,7 +73,7 @@ static struct osc_page *osc_cl_page_osc(struct cl_page *page) const struct cl_page_slice *slice; slice = cl_page_at(page, &osc_device_type); - LASSERT(slice != NULL); + LASSERT(slice); return cl2osc_page(slice); } @@ -135,7 +135,7 @@ static int osc_io_submit(const struct lu_env *env, /* Top level IO. */ io = page->cp_owner; - LASSERT(io != NULL); + LASSERT(io); opg = osc_cl_page_osc(page); oap = &opg->ops_oap; @@ -266,13 +266,14 @@ static int osc_io_prepare_write(const struct lu_env *env, * This implements OBD_BRW_CHECK logic from old client. */ - if (imp == NULL || imp->imp_invalid) + if (!imp || imp->imp_invalid) result = -EIO; if (result == 0 && oio->oi_lockless) /* this page contains `invalid' data, but who cares? * nobody can access the invalid data. * in osc_io_commit_write(), we're going to write exact - * [from, to) bytes of this page to OST. -jay */ + * [from, to) bytes of this page to OST. -jay + */ cl_page_export(env, slice->cpl_page, 1); return result; @@ -349,14 +350,14 @@ static int trunc_check_cb(const struct lu_env *env, struct cl_io *io, __u64 start = *(__u64 *)cbdata; slice = cl_page_at(page, &osc_device_type); - LASSERT(slice != NULL); + LASSERT(slice); ops = cl2osc_page(slice); oap = &ops->ops_oap; if (oap->oap_cmd & OBD_BRW_WRITE && !list_empty(&oap->oap_pending_item)) CL_PAGE_DEBUG(D_ERROR, env, page, "exists %llu/%s.\n", - start, current->comm); + start, current->comm); { struct page *vmpage = cl_page_vmpage(env, page); @@ -500,7 +501,7 @@ static void osc_io_setattr_end(const struct lu_env *env, __u64 size = io->u.ci_setattr.sa_attr.lvb_size; osc_trunc_check(env, io, oio, size); - if (oio->oi_trunc != NULL) { + if (oio->oi_trunc) { osc_cache_truncate_end(env, oio, cl2osc(obj)); oio->oi_trunc = NULL; } @@ -596,7 +597,8 @@ static int osc_io_fsync_start(const struct lu_env *env, * send OST_SYNC RPC. This is bad because it causes extents * to be written osc by osc. However, we usually start * writeback before CL_FSYNC_ALL so this won't have any real - * problem. */ + * problem. + */ rc = osc_cache_wait_range(env, osc, start, end); if (result == 0) result = rc; @@ -754,13 +756,12 @@ static void osc_req_attr_set(const struct lu_env *env, opg = osc_cl_page_osc(apage); apage = opg->ops_cl.cpl_page; /* now apage is a sub-page */ lock = cl_lock_at_page(env, apage->cp_obj, apage, NULL, 1, 1); - if (lock == NULL) { + if (!lock) { struct cl_object_header *head; struct cl_lock *scan; head = cl_object_header(apage->cp_obj); - list_for_each_entry(scan, &head->coh_locks, - cll_linkage) + list_for_each_entry(scan, &head->coh_locks, cll_linkage) CL_LOCK_DEBUG(D_ERROR, env, scan, "no cover page!\n"); CL_PAGE_DEBUG(D_ERROR, env, apage, @@ -770,10 +771,9 @@ static void osc_req_attr_set(const struct lu_env *env, } olck = osc_lock_at(lock); - LASSERT(olck != NULL); - LASSERT(ergo(opg->ops_srvlock, olck->ols_lock == NULL)); + LASSERT(ergo(opg->ops_srvlock, !olck->ols_lock)); /* check for lockless io. */ - if (olck->ols_lock != NULL) { + if (olck->ols_lock) { oa->o_handle = olck->ols_lock->l_remote_handle; oa->o_valid |= OBD_MD_FLHANDLE; } @@ -803,8 +803,8 @@ int osc_req_init(const struct lu_env *env, struct cl_device *dev, struct osc_req *or; int result; - or = kmem_cache_alloc(osc_req_kmem, GFP_NOFS | __GFP_ZERO); - if (or != NULL) { + or = kmem_cache_zalloc(osc_req_kmem, GFP_NOFS); + if (or) { cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops); result = 0; } else diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c index 71f2810d18b9..013df9787f3e 100644 --- a/drivers/staging/lustre/lustre/osc/osc_lock.c +++ b/drivers/staging/lustre/lustre/osc/osc_lock.c @@ -79,7 +79,7 @@ static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle) struct ldlm_lock *lock; lock = ldlm_handle2lock(handle); - if (lock != NULL) + if (lock) LDLM_LOCK_PUT(lock); return lock; } @@ -94,42 +94,40 @@ static int osc_lock_invariant(struct osc_lock *ols) int handle_used = lustre_handle_is_used(&ols->ols_handle); if (ergo(osc_lock_is_lockless(ols), - ols->ols_locklessable && ols->ols_lock == NULL)) + ols->ols_locklessable && !ols->ols_lock)) return 1; /* * If all the following "ergo"s are true, return 1, otherwise 0 */ - if (!ergo(olock != NULL, handle_used)) + if (!ergo(olock, handle_used)) return 0; - if (!ergo(olock != NULL, - olock->l_handle.h_cookie == ols->ols_handle.cookie)) + if (!ergo(olock, olock->l_handle.h_cookie == ols->ols_handle.cookie)) return 0; if (!ergo(handle_used, - ergo(lock != NULL && olock != NULL, lock == olock) && - ergo(lock == NULL, olock == NULL))) + ergo(lock && olock, lock == olock) && + ergo(!lock, !olock))) return 0; /* * Check that ->ols_handle and ->ols_lock are consistent, but * take into account that they are set at the different time. */ if (!ergo(ols->ols_state == OLS_CANCELLED, - olock == NULL && !handle_used)) + !olock && !handle_used)) return 0; /* * DLM lock is destroyed only after we have seen cancellation * ast. */ - if (!ergo(olock != NULL && ols->ols_state < OLS_CANCELLED, - ((olock->l_flags & LDLM_FL_DESTROYED) == 0))) + if (!ergo(olock && ols->ols_state < OLS_CANCELLED, + ((olock->l_flags & LDLM_FL_DESTROYED) == 0))) return 0; if (!ergo(ols->ols_state == OLS_GRANTED, - olock != NULL && - olock->l_req_mode == olock->l_granted_mode && - ols->ols_hold)) + olock && olock->l_req_mode == olock->l_granted_mode && + ols->ols_hold)) return 0; return 1; } @@ -149,14 +147,15 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck) spin_lock(&osc_ast_guard); dlmlock = olck->ols_lock; - if (dlmlock == NULL) { + if (!dlmlock) { spin_unlock(&osc_ast_guard); return; } olck->ols_lock = NULL; /* wb(); --- for all who checks (ols->ols_lock != NULL) before - * call to osc_lock_detach() */ + * call to osc_lock_detach() + */ dlmlock->l_ast_data = NULL; olck->ols_handle.cookie = 0ULL; spin_unlock(&osc_ast_guard); @@ -171,7 +170,8 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck) /* Must get the value under the lock to avoid possible races. */ old_kms = cl2osc(obj)->oo_oinfo->loi_kms; /* Update the kms. Need to loop all granted locks. - * Not a problem for the client */ + * Not a problem for the client + */ attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms); cl_object_attr_set(env, obj, attr, CAT_KMS); @@ -223,8 +223,7 @@ static int osc_lock_unuse(const struct lu_env *env, /* * Move lock into OLS_RELEASED state before calling * osc_cancel_base() so that possible synchronous cancellation - * (that always happens e.g., for liblustre) sees that lock is - * released. + * sees that lock is released. */ ols->ols_state = OLS_RELEASED; return osc_lock_unhold(ols); @@ -247,7 +246,7 @@ static void osc_lock_fini(const struct lu_env *env, * lock is destroyed immediately after upcall. */ osc_lock_unhold(ols); - LASSERT(ols->ols_lock == NULL); + LASSERT(!ols->ols_lock); LASSERT(atomic_read(&ols->ols_pageref) == 0 || atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC); @@ -292,7 +291,7 @@ static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock) lock_res_and_lock(dlm_lock); spin_lock(&osc_ast_guard); olck = dlm_lock->l_ast_data; - if (olck != NULL) { + if (olck) { struct cl_lock *lock = olck->ols_cl.cls_lock; /* * If osc_lock holds a reference on ldlm lock, return it even @@ -359,13 +358,13 @@ static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck, __u64 size; dlmlock = olck->ols_lock; - LASSERT(dlmlock != NULL); /* re-grab LVB from a dlm lock under DLM spin-locks. */ *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data; size = lvb->lvb_size; /* Extend KMS up to the end of this lock and no further - * A lock on [x,y] means a KMS of up to y + 1 bytes! */ + * A lock on [x,y] means a KMS of up to y + 1 bytes! + */ if (size > dlmlock->l_policy_data.l_extent.end) size = dlmlock->l_policy_data.l_extent.end + 1; if (size >= oinfo->loi_kms) { @@ -429,7 +428,8 @@ static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck, * to take a semaphore on a parent lock. This is safe, because * spin-locks are needed to protect consistency of * dlmlock->l_*_mode and LVB, and we have finished processing - * them. */ + * them. + */ unlock_res_and_lock(dlmlock); cl_lock_modify(env, lock, descr); cl_lock_signal(env, lock); @@ -444,12 +444,12 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck) struct ldlm_lock *dlmlock; dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0); - LASSERT(dlmlock != NULL); + LASSERT(dlmlock); lock_res_and_lock(dlmlock); spin_lock(&osc_ast_guard); LASSERT(dlmlock->l_ast_data == olck); - LASSERT(olck->ols_lock == NULL); + LASSERT(!olck->ols_lock); olck->ols_lock = dlmlock; spin_unlock(&osc_ast_guard); @@ -470,7 +470,8 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck) olck->ols_hold = 1; /* lock reference taken by ldlm_handle2lock_long() is owned by - * osc_lock and released in osc_lock_detach() */ + * osc_lock and released in osc_lock_detach() + */ lu_ref_add(&dlmlock->l_reference, "osc_lock", olck); olck->ols_has_ref = 1; } @@ -508,10 +509,10 @@ static int osc_lock_upcall(void *cookie, int errcode) struct ldlm_lock *dlmlock; dlmlock = ldlm_handle2lock(&olck->ols_handle); - if (dlmlock != NULL) { + if (dlmlock) { lock_res_and_lock(dlmlock); spin_lock(&osc_ast_guard); - LASSERT(olck->ols_lock == NULL); + LASSERT(!olck->ols_lock); dlmlock->l_ast_data = NULL; olck->ols_handle.cookie = 0ULL; spin_unlock(&osc_ast_guard); @@ -548,7 +549,8 @@ static int osc_lock_upcall(void *cookie, int errcode) /* For AGL case, the RPC sponsor may exits the cl_lock * processing without wait() called before related OSC * lock upcall(). So update the lock status according - * to the enqueue result inside AGL upcall(). */ + * to the enqueue result inside AGL upcall(). + */ if (olck->ols_agl) { lock->cll_flags |= CLF_FROM_UPCALL; cl_wait_try(env, lock); @@ -571,7 +573,8 @@ static int osc_lock_upcall(void *cookie, int errcode) lu_ref_del(&lock->cll_reference, "upcall", lock); /* This maybe the last reference, so must be called after - * cl_lock_mutex_put(). */ + * cl_lock_mutex_put(). + */ cl_lock_put(env, lock); cl_env_nested_put(&nest, env); @@ -634,7 +637,7 @@ static int osc_dlm_blocking_ast0(const struct lu_env *env, cancel = 0; olck = osc_ast_data_get(dlmlock); - if (olck != NULL) { + if (olck) { lock = olck->ols_cl.cls_lock; cl_lock_mutex_get(env, lock); LINVRNT(osc_lock_invariant(olck)); @@ -786,17 +789,17 @@ static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock, env = cl_env_nested_get(&nest); if (!IS_ERR(env)) { olck = osc_ast_data_get(dlmlock); - if (olck != NULL) { + if (olck) { lock = olck->ols_cl.cls_lock; cl_lock_mutex_get(env, lock); /* * ldlm_handle_cp_callback() copied LVB from request * to lock->l_lvb_data, store it in osc_lock. */ - LASSERT(dlmlock->l_lvb_data != NULL); + LASSERT(dlmlock->l_lvb_data); lock_res_and_lock(dlmlock); olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data; - if (olck->ols_lock == NULL) { + if (!olck->ols_lock) { /* * upcall (osc_lock_upcall()) hasn't yet been * called. Do nothing now, upcall will bind @@ -850,14 +853,15 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data) * environment. */ olck = osc_ast_data_get(dlmlock); - if (olck != NULL) { + if (olck) { lock = olck->ols_cl.cls_lock; /* Do not grab the mutex of cl_lock for glimpse. * See LU-1274 for details. * BTW, it's okay for cl_lock to be cancelled during * this period because server can handle this race. * See ldlm_server_glimpse_ast() for details. - * cl_lock_mutex_get(env, lock); */ + * cl_lock_mutex_get(env, lock); + */ cap = &req->rq_pill; req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK); req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER, @@ -1017,7 +1021,8 @@ static int osc_lock_enqueue_wait(const struct lu_env *env, LASSERT(cl_lock_is_mutexed(lock)); /* make it enqueue anyway for glimpse lock, because we actually - * don't need to cancel any conflicting locks. */ + * don't need to cancel any conflicting locks. + */ if (olck->ols_glimpse) return 0; @@ -1051,7 +1056,8 @@ static int osc_lock_enqueue_wait(const struct lu_env *env, * imagine that client has PR lock on [0, 1000], and thread T0 * is doing lockless IO in [500, 1500] region. Concurrent * thread T1 can see lockless data in [500, 1000], which is - * wrong, because these data are possibly stale. */ + * wrong, because these data are possibly stale. + */ if (!lockless && osc_lock_compatible(olck, scan_ols)) continue; @@ -1074,7 +1080,7 @@ static int osc_lock_enqueue_wait(const struct lu_env *env, } else { CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, will wait\n", lock, conflict); - LASSERT(lock->cll_conflict == NULL); + LASSERT(!lock->cll_conflict); lu_ref_add(&conflict->cll_reference, "cancel-wait", lock); lock->cll_conflict = conflict; @@ -1111,7 +1117,7 @@ static int osc_lock_enqueue(const struct lu_env *env, "Impossible state: %d\n", ols->ols_state); LASSERTF(ergo(ols->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ), - "lock = %p, ols = %p\n", lock, ols); + "lock = %p, ols = %p\n", lock, ols); result = osc_lock_enqueue_wait(env, ols); if (result == 0) { @@ -1123,7 +1129,8 @@ static int osc_lock_enqueue(const struct lu_env *env, struct ldlm_enqueue_info *einfo = &ols->ols_einfo; /* lock will be passed as upcall cookie, - * hold ref to prevent to be released. */ + * hold ref to prevent to be released. + */ cl_lock_hold_add(env, lock, "upcall", lock); /* a user for lock also */ cl_lock_user_add(env, lock); @@ -1137,12 +1144,12 @@ static int osc_lock_enqueue(const struct lu_env *env, ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname); osc_lock_build_policy(env, lock, policy); result = osc_enqueue_base(osc_export(obj), resname, - &ols->ols_flags, policy, - &ols->ols_lvb, - obj->oo_oinfo->loi_kms_valid, - osc_lock_upcall, - ols, einfo, &ols->ols_handle, - PTLRPCD_SET, 1, ols->ols_agl); + &ols->ols_flags, policy, + &ols->ols_lvb, + obj->oo_oinfo->loi_kms_valid, + osc_lock_upcall, + ols, einfo, &ols->ols_handle, + PTLRPCD_SET, 1, ols->ols_agl); if (result != 0) { cl_lock_user_del(env, lock); cl_lock_unhold(env, lock, "upcall", lock); @@ -1174,7 +1181,8 @@ static int osc_lock_wait(const struct lu_env *env, } else if (olck->ols_agl) { if (lock->cll_flags & CLF_FROM_UPCALL) /* It is from enqueue RPC reply upcall for - * updating state. Do not re-enqueue. */ + * updating state. Do not re-enqueue. + */ return -ENAVAIL; olck->ols_state = OLS_NEW; } else { @@ -1197,7 +1205,7 @@ static int osc_lock_wait(const struct lu_env *env, } LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED && - lock->cll_error == 0, olck->ols_lock != NULL)); + lock->cll_error == 0, olck->ols_lock)); return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT; } @@ -1235,7 +1243,8 @@ static int osc_lock_use(const struct lu_env *env, LASSERT(lock->cll_state == CLS_INTRANSIT); LASSERT(lock->cll_users > 0); /* set a flag for osc_dlm_blocking_ast0() to signal the - * lock.*/ + * lock. + */ olck->ols_ast_wait = 1; rc = CLO_WAIT; } @@ -1257,11 +1266,12 @@ static int osc_lock_flush(struct osc_lock *ols, int discard) if (descr->cld_mode >= CLM_WRITE) { result = osc_cache_writeback_range(env, obj, - descr->cld_start, descr->cld_end, - 1, discard); + descr->cld_start, + descr->cld_end, + 1, discard); LDLM_DEBUG(ols->ols_lock, - "lock %p: %d pages were %s.\n", lock, result, - discard ? "discarded" : "written"); + "lock %p: %d pages were %s.\n", lock, result, + discard ? "discarded" : "written"); if (result > 0) result = 0; } @@ -1306,7 +1316,7 @@ static void osc_lock_cancel(const struct lu_env *env, LASSERT(cl_lock_is_mutexed(lock)); LINVRNT(osc_lock_invariant(olck)); - if (dlmlock != NULL) { + if (dlmlock) { int do_cancel; discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA); @@ -1318,7 +1328,8 @@ static void osc_lock_cancel(const struct lu_env *env, /* Now that we're the only user of dlm read/write reference, * mostly the ->l_readers + ->l_writers should be zero. * However, there is a corner case. - * See bug 18829 for details.*/ + * See bug 18829 for details. + */ do_cancel = (dlmlock->l_readers == 0 && dlmlock->l_writers == 0); dlmlock->l_flags |= LDLM_FL_CBPENDING; @@ -1382,7 +1393,7 @@ static void osc_lock_state(const struct lu_env *env, if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) { struct osc_io *oio = osc_env_io(env); - LASSERT(lock->ols_owner == NULL); + LASSERT(!lock->ols_owner); lock->ols_owner = oio; } else if (state != CLS_HELD) lock->ols_owner = NULL; @@ -1517,7 +1528,8 @@ static void osc_lock_lockless_state(const struct lu_env *env, lock->ols_owner = oio; /* set the io to be lockless if this lock is for io's - * host object */ + * host object + */ if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj)) oio->oi_lockless = 1; } @@ -1555,8 +1567,8 @@ int osc_lock_init(const struct lu_env *env, struct osc_lock *clk; int result; - clk = kmem_cache_alloc(osc_lock_kmem, GFP_NOFS | __GFP_ZERO); - if (clk != NULL) { + clk = kmem_cache_zalloc(osc_lock_kmem, GFP_NOFS); + if (clk) { __u32 enqflags = lock->cll_descr.cld_enq_flags; osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo); @@ -1578,8 +1590,8 @@ int osc_lock_init(const struct lu_env *env, if (clk->ols_locklessable && !(enqflags & CEF_DISCARD_DATA)) clk->ols_flags |= LDLM_FL_DENY_ON_CONTENTION; - LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx\n", - lock, clk, clk->ols_flags); + LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx", + lock, clk, clk->ols_flags); result = 0; } else @@ -1599,9 +1611,9 @@ int osc_dlm_lock_pageref(struct ldlm_lock *dlm) * doesn't matter because in the worst case we don't cancel a lock * which we actually can, that's no harm. */ - if (olock != NULL && + if (olock && atomic_add_return(_PAGEREF_MAGIC, - &olock->ols_pageref) != _PAGEREF_MAGIC) { + &olock->ols_pageref) != _PAGEREF_MAGIC) { atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref); rc = 1; } diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c index fdd6219aacf6..9d474fcdd9a7 100644 --- a/drivers/staging/lustre/lustre/osc/osc_object.c +++ b/drivers/staging/lustre/lustre/osc/osc_object.c @@ -113,7 +113,7 @@ static void osc_object_free(const struct lu_env *env, struct lu_object *obj) LASSERT(list_empty(&osc->oo_write_item)); LASSERT(list_empty(&osc->oo_read_item)); - LASSERT(osc->oo_root.rb_node == NULL); + LASSERT(!osc->oo_root.rb_node); LASSERT(list_empty(&osc->oo_hp_exts)); LASSERT(list_empty(&osc->oo_urgent_exts)); LASSERT(list_empty(&osc->oo_rpc_exts)); @@ -255,8 +255,8 @@ struct lu_object *osc_object_alloc(const struct lu_env *env, struct osc_object *osc; struct lu_object *obj; - osc = kmem_cache_alloc(osc_object_kmem, GFP_NOFS | __GFP_ZERO); - if (osc != NULL) { + osc = kmem_cache_zalloc(osc_object_kmem, GFP_NOFS); + if (osc) { obj = osc2lu(osc); lu_object_init(obj, NULL, dev); osc->oo_cl.co_ops = &osc_ops; diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c index 2439d804fe75..d720b1a1c18c 100644 --- a/drivers/staging/lustre/lustre/osc/osc_page.c +++ b/drivers/staging/lustre/lustre/osc/osc_page.c @@ -51,111 +51,12 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj, * @{ */ -/* - * Comment out osc_page_protected because it may sleep inside the - * the client_obd_list_lock. - * client_obd_list_lock -> osc_ap_completion -> osc_completion -> - * -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base - * -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep. - */ -#if 0 -static int osc_page_is_dlocked(const struct lu_env *env, - const struct osc_page *opg, - enum cl_lock_mode mode, int pending, int unref) -{ - struct cl_page *page; - struct osc_object *obj; - struct osc_thread_info *info; - struct ldlm_res_id *resname; - struct lustre_handle *lockh; - ldlm_policy_data_t *policy; - ldlm_mode_t dlmmode; - __u64 flags; - - might_sleep(); - - info = osc_env_info(env); - resname = &info->oti_resname; - policy = &info->oti_policy; - lockh = &info->oti_handle; - page = opg->ops_cl.cpl_page; - obj = cl2osc(opg->ops_cl.cpl_obj); - - flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED; - if (pending) - flags |= LDLM_FL_CBPENDING; - - dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW; - osc_lock_build_res(env, obj, resname); - osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index); - return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy, - dlmmode, &flags, NULL, lockh, unref); -} - -/** - * Checks an invariant that a page in the cache is covered by a lock, as - * needed. - */ -static int osc_page_protected(const struct lu_env *env, - const struct osc_page *opg, - enum cl_lock_mode mode, int unref) -{ - struct cl_object_header *hdr; - struct cl_lock *scan; - struct cl_page *page; - struct cl_lock_descr *descr; - int result; - - LINVRNT(!opg->ops_temp); - - page = opg->ops_cl.cpl_page; - if (page->cp_owner != NULL && - cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER) - /* - * If IO is done without locks (liblustre, or lloop), lock is - * not required. - */ - result = 1; - else - /* otherwise check for a DLM lock */ - result = osc_page_is_dlocked(env, opg, mode, 1, unref); - if (result == 0) { - /* maybe this page is a part of a lockless io? */ - hdr = cl_object_header(opg->ops_cl.cpl_obj); - descr = &osc_env_info(env)->oti_descr; - descr->cld_mode = mode; - descr->cld_start = page->cp_index; - descr->cld_end = page->cp_index; - spin_lock(&hdr->coh_lock_guard); - list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) { - /* - * Lock-less sub-lock has to be either in HELD state - * (when io is actively going on), or in CACHED state, - * when top-lock is being unlocked: - * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse(). - */ - if ((scan->cll_state == CLS_HELD || - scan->cll_state == CLS_CACHED) && - cl_lock_ext_match(&scan->cll_descr, descr)) { - struct osc_lock *olck; - - olck = osc_lock_at(scan); - result = osc_lock_is_lockless(olck); - break; - } - } - spin_unlock(&hdr->coh_lock_guard); - } - return result; -} -#else static int osc_page_protected(const struct lu_env *env, const struct osc_page *opg, enum cl_lock_mode mode, int unref) { return 1; } -#endif /***************************************************************************** * @@ -168,7 +69,7 @@ static void osc_page_fini(const struct lu_env *env, struct osc_page *opg = cl2osc_page(slice); CDEBUG(D_TRACE, "%p\n", opg); - LASSERT(opg->ops_lock == NULL); + LASSERT(!opg->ops_lock); } static void osc_page_transfer_get(struct osc_page *opg, const char *label) @@ -204,7 +105,8 @@ static void osc_page_transfer_add(const struct lu_env *env, struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj); /* ops_lru and ops_inflight share the same field, so take it from LRU - * first and then use it as inflight. */ + * first and then use it as inflight. + */ osc_lru_del(osc_cli(obj), opg, false); spin_lock(&obj->oo_seatbelt); @@ -232,9 +134,10 @@ static int osc_page_cache_add(const struct lu_env *env, /* for sync write, kernel will wait for this page to be flushed before * osc_io_end() is called, so release it earlier. - * for mkwrite(), it's known there is no further pages. */ + * for mkwrite(), it's known there is no further pages. + */ if (cl_io_is_sync_write(io) || cl_io_is_mkwrite(io)) { - if (oio->oi_active != NULL) { + if (oio->oi_active) { osc_extent_release(env, oio->oi_active); oio->oi_active = NULL; } @@ -258,7 +161,7 @@ static int osc_page_addref_lock(const struct lu_env *env, struct osc_lock *olock; int rc; - LASSERT(opg->ops_lock == NULL); + LASSERT(!opg->ops_lock); olock = osc_lock_at(lock); if (atomic_inc_return(&olock->ols_pageref) <= 0) { @@ -278,7 +181,7 @@ static void osc_page_putref_lock(const struct lu_env *env, struct cl_lock *lock = opg->ops_lock; struct osc_lock *olock; - LASSERT(lock != NULL); + LASSERT(lock); olock = osc_lock_at(lock); atomic_dec(&olock->ols_pageref); @@ -296,7 +199,7 @@ static int osc_page_is_under_lock(const struct lu_env *env, lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page, NULL, 1, 0); - if (lock != NULL) { + if (lock) { if (osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0) result = -EBUSY; cl_lock_put(env, lock); @@ -424,7 +327,7 @@ static void osc_page_delete(const struct lu_env *env, } spin_lock(&obj->oo_seatbelt); - if (opg->ops_submitter != NULL) { + if (opg->ops_submitter) { LASSERT(!list_empty(&opg->ops_inflight)); list_del_init(&opg->ops_inflight); opg->ops_submitter = NULL; @@ -434,8 +337,8 @@ static void osc_page_delete(const struct lu_env *env, osc_lru_del(osc_cli(obj), opg, true); } -void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice, - int from, int to) +static void osc_page_clip(const struct lu_env *env, + const struct cl_page_slice *slice, int from, int to) { struct osc_page *opg = cl2osc_page(slice); struct osc_async_page *oap = &opg->ops_oap; @@ -458,7 +361,8 @@ static int osc_page_cancel(const struct lu_env *env, LINVRNT(osc_page_protected(env, opg, CLM_READ, 0)); /* Check if the transferring against this page - * is completed, or not even queued. */ + * is completed, or not even queued. + */ if (opg->ops_transfer_pinned) /* FIXME: may not be interrupted.. */ rc = osc_cancel_async_page(env, opg); @@ -499,7 +403,7 @@ static const struct cl_page_operations osc_page_ops = { }; int osc_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *vmpage) + struct cl_page *page, struct page *vmpage) { struct osc_object *osc = cl2osc(obj); struct osc_page *opg = cl_object_page_slice(obj, page); @@ -509,20 +413,20 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj, opg->ops_to = PAGE_CACHE_SIZE; result = osc_prep_async_page(osc, opg, vmpage, - cl_offset(obj, page->cp_index)); + cl_offset(obj, page->cp_index)); if (result == 0) { struct osc_io *oio = osc_env_io(env); opg->ops_srvlock = osc_io_srvlock(oio); - cl_page_slice_add(page, &opg->ops_cl, obj, - &osc_page_ops); + cl_page_slice_add(page, &opg->ops_cl, obj, &osc_page_ops); } /* * Cannot assert osc_page_protected() here as read-ahead * creates temporary pages outside of a lock. */ /* ops_inflight and ops_lru are the same field, but it doesn't - * hurt to initialize it twice :-) */ + * hurt to initialize it twice :-) + */ INIT_LIST_HEAD(&opg->ops_inflight); INIT_LIST_HEAD(&opg->ops_lru); @@ -557,7 +461,7 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg, oap->oap_brw_flags = brw_flags | OBD_BRW_SYNC; if (!client_is_remote(osc_export(obj)) && - capable(CFS_CAP_SYS_RESOURCE)) { + capable(CFS_CAP_SYS_RESOURCE)) { oap->oap_brw_flags |= OBD_BRW_NOQUOTA; oap->oap_cmd |= OBD_BRW_NOQUOTA; } @@ -581,7 +485,8 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg, static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq); static atomic_t osc_lru_waiters = ATOMIC_INIT(0); /* LRU pages are freed in batch mode. OSC should at least free this - * number of pages to avoid running out of LRU budget, and.. */ + * number of pages to avoid running out of LRU budget, and.. + */ static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */ /* free this number at most otherwise it will take too long time to finish. */ static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */ @@ -590,7 +495,8 @@ static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */ * we should free slots aggressively. In this way, slots are freed in a steady * step to maintain fairness among OSCs. * - * Return how many LRU pages should be freed. */ + * Return how many LRU pages should be freed. + */ static int osc_cache_too_much(struct client_obd *cli) { struct cl_client_cache *cache = cli->cl_cache; @@ -602,7 +508,8 @@ static int osc_cache_too_much(struct client_obd *cli) return min(pages, lru_shrink_max); /* if it's going to run out LRU slots, we should free some, but not - * too much to maintain fairness among OSCs. */ + * too much to maintain fairness among OSCs. + */ if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) { unsigned long tmp; @@ -630,7 +537,8 @@ static int discard_pagevec(const struct lu_env *env, struct cl_io *io, /* free LRU page only if nobody is using it. * This check is necessary to avoid freeing the pages * having already been removed from LRU and pinned - * for IO. */ + * for IO. + */ if (!cl_page_in_use(page)) { cl_page_unmap(env, io, page); cl_page_discard(env, io, page); @@ -655,6 +563,7 @@ int osc_lru_shrink(struct client_obd *cli, int target) struct cl_object *clobj = NULL; struct cl_page **pvec; struct osc_page *opg; + struct osc_page *temp; int maxscan = 0; int count = 0; int index = 0; @@ -674,28 +583,26 @@ int osc_lru_shrink(struct client_obd *cli, int target) client_obd_list_lock(&cli->cl_lru_list_lock); atomic_inc(&cli->cl_lru_shrinkers); maxscan = min(target << 1, atomic_read(&cli->cl_lru_in_list)); - while (!list_empty(&cli->cl_lru_list)) { + list_for_each_entry_safe(opg, temp, &cli->cl_lru_list, ops_lru) { struct cl_page *page; if (--maxscan < 0) break; - opg = list_entry(cli->cl_lru_list.next, struct osc_page, - ops_lru); page = cl_page_top(opg->ops_cl.cpl_page); if (cl_page_in_use_noref(page)) { list_move_tail(&opg->ops_lru, &cli->cl_lru_list); continue; } - LASSERT(page->cp_obj != NULL); + LASSERT(page->cp_obj); if (clobj != page->cp_obj) { struct cl_object *tmp = page->cp_obj; cl_object_get(tmp); client_obd_list_unlock(&cli->cl_lru_list_lock); - if (clobj != NULL) { + if (clobj) { count -= discard_pagevec(env, io, pvec, index); index = 0; @@ -720,11 +627,13 @@ int osc_lru_shrink(struct client_obd *cli, int target) /* move this page to the end of list as it will be discarded * soon. The page will be finally removed from LRU list in - * osc_page_delete(). */ + * osc_page_delete(). + */ list_move_tail(&opg->ops_lru, &cli->cl_lru_list); /* it's okay to grab a refcount here w/o holding lock because - * it has to grab cl_lru_list_lock to delete the page. */ + * it has to grab cl_lru_list_lock to delete the page. + */ cl_page_get(page); pvec[index++] = page; if (++count >= target) @@ -740,7 +649,7 @@ int osc_lru_shrink(struct client_obd *cli, int target) } client_obd_list_unlock(&cli->cl_lru_list_lock); - if (clobj != NULL) { + if (clobj) { count -= discard_pagevec(env, io, pvec, index); cl_io_fini(env, io); @@ -775,7 +684,8 @@ static void osc_lru_add(struct client_obd *cli, struct osc_page *opg) } /* delete page from LRUlist. The page can be deleted from LRUlist for two - * reasons: redirtied or deleted from page cache. */ + * reasons: redirtied or deleted from page cache. + */ static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del) { if (opg->ops_in_lru) { @@ -797,7 +707,8 @@ static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del) * this osc occupies too many LRU pages and kernel is * stealing one of them. * cl_lru_shrinkers is to avoid recursive call in case - * we're already in the context of osc_lru_shrink(). */ + * we're already in the context of osc_lru_shrink(). + */ if (atomic_read(&cli->cl_lru_shrinkers) == 0 && !memory_pressure_get()) osc_lru_shrink(cli, osc_cache_too_much(cli)); @@ -819,22 +730,23 @@ static int osc_lru_reclaim(struct client_obd *cli) int max_scans; int rc; - LASSERT(cache != NULL); + LASSERT(cache); rc = osc_lru_shrink(cli, lru_shrink_min); if (rc != 0) { CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n", - cli->cl_import->imp_obd->obd_name, rc, cli); + cli->cl_import->imp_obd->obd_name, rc, cli); return rc; } CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n", - cli->cl_import->imp_obd->obd_name, cli, - atomic_read(&cli->cl_lru_in_list), - atomic_read(&cli->cl_lru_busy)); + cli->cl_import->imp_obd->obd_name, cli, + atomic_read(&cli->cl_lru_in_list), + atomic_read(&cli->cl_lru_busy)); /* Reclaim LRU slots from other client_obd as it can't free enough - * from its own. This should rarely happen. */ + * from its own. This should rarely happen. + */ spin_lock(&cache->ccc_lru_lock); LASSERT(!list_empty(&cache->ccc_lru)); @@ -844,12 +756,12 @@ static int osc_lru_reclaim(struct client_obd *cli) max_scans = atomic_read(&cache->ccc_users); while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) { cli = list_entry(cache->ccc_lru.next, struct client_obd, - cl_lru_osc); + cl_lru_osc); CDEBUG(D_CACHE, "%s: cli %p LRU pages: %d, busy: %d.\n", - cli->cl_import->imp_obd->obd_name, cli, - atomic_read(&cli->cl_lru_in_list), - atomic_read(&cli->cl_lru_busy)); + cli->cl_import->imp_obd->obd_name, cli, + atomic_read(&cli->cl_lru_in_list), + atomic_read(&cli->cl_lru_busy)); list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru); if (atomic_read(&cli->cl_lru_in_list) > 0) { @@ -864,7 +776,7 @@ static int osc_lru_reclaim(struct client_obd *cli) spin_unlock(&cache->ccc_lru_lock); CDEBUG(D_CACHE, "%s: cli %p freed %d pages.\n", - cli->cl_import->imp_obd->obd_name, cli, rc); + cli->cl_import->imp_obd->obd_name, cli, rc); return rc; } @@ -875,7 +787,7 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj, struct client_obd *cli = osc_cli(obj); int rc = 0; - if (cli->cl_cache == NULL) /* shall not be in LRU */ + if (!cli->cl_cache) /* shall not be in LRU */ return 0; LASSERT(atomic_read(cli->cl_lru_left) >= 0); @@ -892,15 +804,16 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj, cond_resched(); /* slowest case, all of caching pages are busy, notifying - * other OSCs that we're lack of LRU slots. */ + * other OSCs that we're lack of LRU slots. + */ atomic_inc(&osc_lru_waiters); gen = atomic_read(&cli->cl_lru_in_list); rc = l_wait_event(osc_lru_waitq, - atomic_read(cli->cl_lru_left) > 0 || - (atomic_read(&cli->cl_lru_in_list) > 0 && - gen != atomic_read(&cli->cl_lru_in_list)), - &lwi); + atomic_read(cli->cl_lru_left) > 0 || + (atomic_read(&cli->cl_lru_in_list) > 0 && + gen != atomic_read(&cli->cl_lru_in_list)), + &lwi); atomic_dec(&osc_lru_waiters); if (rc < 0) diff --git a/drivers/staging/lustre/lustre/osc/osc_quota.c b/drivers/staging/lustre/lustre/osc/osc_quota.c index e70e7961d763..194d8ede40a2 100644 --- a/drivers/staging/lustre/lustre/osc/osc_quota.c +++ b/drivers/staging/lustre/lustre/osc/osc_quota.c @@ -13,11 +13,6 @@ * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * * GPL HEADER END */ /* @@ -35,8 +30,8 @@ static inline struct osc_quota_info *osc_oqi_alloc(u32 id) { struct osc_quota_info *oqi; - oqi = kmem_cache_alloc(osc_quota_kmem, GFP_NOFS | __GFP_ZERO); - if (oqi != NULL) + oqi = kmem_cache_zalloc(osc_quota_kmem, GFP_NOFS); + if (oqi) oqi->oqi_id = id; return oqi; @@ -52,10 +47,12 @@ int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[]) oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]); if (oqi) { /* do not try to access oqi here, it could have been - * freed by osc_quota_setdq() */ + * freed by osc_quota_setdq() + */ /* the slot is busy, the user is about to run out of - * quota space on this OST */ + * quota space on this OST + */ CDEBUG(D_QUOTA, "chkdq found noquota for %s %d\n", type == USRQUOTA ? "user" : "grout", qid[type]); return NO_QUOTA; @@ -89,12 +86,13 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[], oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]); if ((flags & FL_QUOTA_FLAG(type)) != 0) { /* This ID is getting close to its quota limit, let's - * switch to sync I/O */ - if (oqi != NULL) + * switch to sync I/O + */ + if (oqi) continue; oqi = osc_oqi_alloc(qid[type]); - if (oqi == NULL) { + if (!oqi) { rc = -ENOMEM; break; } @@ -113,8 +111,9 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[], qid[type], rc); } else { /* This ID is now off the hook, let's remove it from - * the hash table */ - if (oqi == NULL) + * the hash table + */ + if (!oqi) continue; oqi = cfs_hash_del_key(cli->cl_quota_hash[type], @@ -147,7 +146,7 @@ oqi_keycmp(const void *key, struct hlist_node *hnode) struct osc_quota_info *oqi; u32 uid; - LASSERT(key != NULL); + LASSERT(key); uid = *((u32 *)key); oqi = hlist_entry(hnode, struct osc_quota_info, oqi_hash); @@ -218,7 +217,7 @@ int osc_quota_setup(struct obd_device *obd) CFS_HASH_MAX_THETA, "a_hash_ops, CFS_HASH_DEFAULT); - if (cli->cl_quota_hash[type] == NULL) + if (!cli->cl_quota_hash[type]) break; } @@ -252,7 +251,7 @@ int osc_quotactl(struct obd_device *unused, struct obd_export *exp, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_OST_QUOTACTL, LUSTRE_OST_VERSION, OST_QUOTACTL); - if (req == NULL) + if (!req) return -ENOMEM; oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); @@ -294,7 +293,7 @@ int osc_quotacheck(struct obd_device *unused, struct obd_export *exp, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_OST_QUOTACHECK, LUSTRE_OST_VERSION, OST_QUOTACHECK); - if (req == NULL) + if (!req) return -ENOMEM; body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); @@ -302,8 +301,8 @@ int osc_quotacheck(struct obd_device *unused, struct obd_export *exp, ptlrpc_request_set_replen(req); - /* the next poll will find -ENODATA, that means quotacheck is - * going on */ + /* the next poll will find -ENODATA, that means quotacheck is going on + */ cli->cl_qchk_stat = -ENODATA; rc = ptlrpc_queue_wait(req); if (rc) diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c index 7034f0a942c5..74805f1ae888 100644 --- a/drivers/staging/lustre/lustre/osc/osc_request.c +++ b/drivers/staging/lustre/lustre/osc/osc_request.c @@ -104,7 +104,6 @@ struct osc_enqueue_args { static void osc_release_ppga(struct brw_page **ppga, u32 count); static int brw_interpret(const struct lu_env *env, struct ptlrpc_request *req, void *data, int rc); -static int osc_cleanup(struct obd_device *obd); /* Pack OSC object metadata for disk storage (LE byte order). */ static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, @@ -113,18 +112,18 @@ static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, int lmm_size; lmm_size = sizeof(**lmmp); - if (lmmp == NULL) + if (!lmmp) return lmm_size; - if (*lmmp != NULL && lsm == NULL) { + if (*lmmp && !lsm) { kfree(*lmmp); *lmmp = NULL; return 0; - } else if (unlikely(lsm != NULL && ostid_id(&lsm->lsm_oi) == 0)) { + } else if (unlikely(lsm && ostid_id(&lsm->lsm_oi) == 0)) { return -EBADF; } - if (*lmmp == NULL) { + if (!*lmmp) { *lmmp = kzalloc(lmm_size, GFP_NOFS); if (!*lmmp) return -ENOMEM; @@ -143,7 +142,7 @@ static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, int lsm_size; struct obd_import *imp = class_exp2cliimp(exp); - if (lmm != NULL) { + if (lmm) { if (lmm_bytes < sizeof(*lmm)) { CERROR("%s: lov_mds_md too small: %d, need %d\n", exp->exp_obd->obd_name, lmm_bytes, @@ -160,23 +159,23 @@ static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, } lsm_size = lov_stripe_md_size(1); - if (lsmp == NULL) + if (!lsmp) return lsm_size; - if (*lsmp != NULL && lmm == NULL) { + if (*lsmp && !lmm) { kfree((*lsmp)->lsm_oinfo[0]); kfree(*lsmp); *lsmp = NULL; return 0; } - if (*lsmp == NULL) { + if (!*lsmp) { *lsmp = kzalloc(lsm_size, GFP_NOFS); - if (unlikely(*lsmp == NULL)) + if (unlikely(!*lsmp)) return -ENOMEM; (*lsmp)->lsm_oinfo[0] = kzalloc(sizeof(struct lov_oinfo), GFP_NOFS); - if (unlikely((*lsmp)->lsm_oinfo[0] == NULL)) { + if (unlikely(!(*lsmp)->lsm_oinfo[0])) { kfree(*lsmp); return -ENOMEM; } @@ -185,11 +184,11 @@ static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, return -EBADF; } - if (lmm != NULL) + if (lmm) /* XXX zero *lsmp? */ ostid_le_to_cpu(&lmm->lmm_oi, &(*lsmp)->lsm_oi); - if (imp != NULL && + if (imp && (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES)) (*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes; else @@ -246,7 +245,7 @@ static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR); @@ -276,7 +275,7 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR); @@ -294,7 +293,7 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { rc = -EPROTO; goto out; } @@ -321,7 +320,7 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp, LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR); @@ -339,7 +338,7 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { rc = -EPROTO; goto out; } @@ -362,7 +361,7 @@ static int osc_setattr_interpret(const struct lu_env *env, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { rc = -EPROTO; goto out; } @@ -384,7 +383,7 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR); @@ -451,7 +450,7 @@ static int osc_real_create(struct obd_export *exp, struct obdo *oa, } req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -482,7 +481,7 @@ static int osc_real_create(struct obd_export *exp, struct obdo *oa, goto out_req; body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { rc = -EPROTO; goto out_req; } @@ -500,7 +499,7 @@ static int osc_real_create(struct obd_export *exp, struct obdo *oa, lsm->lsm_oi = oa->o_oi; *ea = lsm; - if (oti != NULL) { + if (oti) { oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg); if (oa->o_valid & OBD_MD_FLCOOKIE) { @@ -530,7 +529,7 @@ int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH); @@ -573,7 +572,7 @@ static int osc_sync_interpret(const struct lu_env *env, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { CERROR("can't unpack ost_body\n"); rc = -EPROTO; goto out; @@ -595,7 +594,7 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC); @@ -629,10 +628,11 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo, /* Find and cancel locally locks matched by @mode in the resource found by * @objid. Found locks are added into @cancel list. Returns the amount of - * locks added to @cancels list. */ + * locks added to @cancels list. + */ static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa, struct list_head *cancels, - ldlm_mode_t mode, __u64 lock_flags) + enum ldlm_mode mode, __u64 lock_flags) { struct ldlm_namespace *ns = exp->exp_obd->obd_namespace; struct ldlm_res_id res_id; @@ -644,13 +644,14 @@ static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa, * * This distinguishes from a case when ELC is not supported originally, * when we still want to cancel locks in advance and just cancel them - * locally, without sending any RPC. */ + * locally, without sending any RPC. + */ if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns)) return 0; ostid_build_res_name(&oa->o_oi, &res_id); res = ldlm_resource_get(ns, NULL, &res_id, 0, 0); - if (res == NULL) + if (!res) return 0; LDLM_RESOURCE_ADDREF(res); @@ -723,7 +724,8 @@ static int osc_create(const struct lu_env *env, struct obd_export *exp, * If the client dies, or the OST is down when the object should be destroyed, * the records are not cancelled, and when the OST reconnects to the MDS next, * it will retrieve the llog unlink logs and then sends the log cancellation - * cookies to the MDS after committing destroy transactions. */ + * cookies to the MDS after committing destroy transactions. + */ static int osc_destroy(const struct lu_env *env, struct obd_export *exp, struct obdo *oa, struct lov_stripe_md *ea, struct obd_trans_info *oti, struct obd_export *md_export) @@ -743,7 +745,7 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp, LDLM_FL_DISCARD_DATA); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } @@ -758,7 +760,7 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp, req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */ ptlrpc_at_set_req_timeout(req); - if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE) + if (oti && oa->o_valid & OBD_MD_FLCOOKIE) oa->o_lcookie = *oti->oti_logcookies; body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); LASSERT(body); @@ -769,7 +771,8 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp, /* If osc_destroy is for destroying the unlink orphan, * sent from MDT to OST, which should not be blocked here, * because the process might be triggered by ptlrpcd, and - * it is not good to block ptlrpcd thread (b=16006)*/ + * it is not good to block ptlrpcd thread (b=16006 + **/ if (!(oa->o_flags & OBD_FL_DELORPHAN)) { req->rq_interpret_reply = osc_destroy_interpret; if (!osc_can_send_destroy(cli)) { @@ -810,7 +813,8 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa, (long)(obd_max_dirty_pages + 1))) { /* The atomic_read() allowing the atomic_inc() are * not covered by a lock thus they may safely race and trip - * this CERROR() unless we add in a small fudge factor (+1). */ + * this CERROR() unless we add in a small fudge factor (+1). + */ CERROR("dirty %d - %d > system dirty_max %d\n", atomic_read(&obd_dirty_pages), atomic_read(&obd_dirty_transit_pages), @@ -839,7 +843,7 @@ void osc_update_next_shrink(struct client_obd *cli) { cli->cl_next_shrink_grant = cfs_time_shift(cli->cl_grant_shrink_interval); - CDEBUG(D_CACHE, "next time %ld to shrink grant \n", + CDEBUG(D_CACHE, "next time %ld to shrink grant\n", cli->cl_next_shrink_grant); } @@ -900,7 +904,8 @@ static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa) /* Shrink the current grant, either from some large amount to enough for a * full set of in-flight RPCs, or if we have already shrunk to that limit * then to enough for a single RPC. This avoids keeping more grant than - * needed, and avoids shrinking the grant piecemeal. */ + * needed, and avoids shrinking the grant piecemeal. + */ static int osc_shrink_grant(struct client_obd *cli) { __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) * @@ -922,7 +927,8 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes) client_obd_list_lock(&cli->cl_loi_list_lock); /* Don't shrink if we are already above or below the desired limit * We don't want to shrink below a single RPC, as that will negatively - * impact block allocation and long-term performance. */ + * impact block allocation and long-term performance. + */ if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT) target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; @@ -970,7 +976,8 @@ static int osc_should_shrink_grant(struct client_obd *client) if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) { /* Get the current RPC size directly, instead of going via: * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export) - * Keep comment here so that it can be found by searching. */ + * Keep comment here so that it can be found by searching. + */ int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; if (client->cl_import->imp_state == LUSTRE_IMP_FULL && @@ -986,8 +993,7 @@ static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data) { struct client_obd *client; - list_for_each_entry(client, &item->ti_obd_list, - cl_grant_shrink_list) { + list_for_each_entry(client, &item->ti_obd_list, cl_grant_shrink_list) { if (osc_should_shrink_grant(client)) osc_shrink_grant(client); } @@ -1004,10 +1010,10 @@ static int osc_add_shrink_grant(struct client_obd *client) &client->cl_grant_shrink_list); if (rc) { CERROR("add grant client %s error %d\n", - client->cl_import->imp_obd->obd_name, rc); + client->cl_import->imp_obd->obd_name, rc); return rc; } - CDEBUG(D_CACHE, "add grant client %s \n", + CDEBUG(D_CACHE, "add grant client %s\n", client->cl_import->imp_obd->obd_name); osc_update_next_shrink(client); return 0; @@ -1040,7 +1046,8 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd) cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant, ocd->ocd_grant, cli->cl_dirty); /* workaround for servers which do not have the patch from - * LU-2679 */ + * LU-2679 + */ cli->cl_avail_grant = ocd->ocd_grant; } @@ -1060,7 +1067,8 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd) /* We assume that the reason this OSC got a short read is because it read * beyond the end of a stripe file; i.e. lustre is reading a sparse file * via the LOV, and it _knows_ it's reading inside the file, it's just that - * this stripe never got written at or beyond this stripe offset yet. */ + * this stripe never got written at or beyond this stripe offset yet. + */ static void handle_short_read(int nob_read, u32 page_count, struct brw_page **pga) { @@ -1106,7 +1114,7 @@ static int check_write_rcs(struct ptlrpc_request *req, remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS, sizeof(*remote_rcs) * niocount); - if (remote_rcs == NULL) { + if (!remote_rcs) { CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n"); return -EPROTO; } @@ -1118,7 +1126,7 @@ static int check_write_rcs(struct ptlrpc_request *req, if (remote_rcs[i] != 0) { CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n", - i, remote_rcs[i], req); + i, remote_rcs[i], req); return -EPROTO; } } @@ -1139,7 +1147,8 @@ static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2) OBD_BRW_SYNC | OBD_BRW_ASYNC|OBD_BRW_NOQUOTA); /* warn if we try to combine flags that we don't know to be - * safe to combine */ + * safe to combine + */ if (unlikely((p1->flag & mask) != (p2->flag & mask))) { CWARN("Saw flags 0x%x and 0x%x in the same brw, please report this at http://bugs.whamcloud.com/\n", p1->flag, p2->flag); @@ -1152,7 +1161,7 @@ static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2) static u32 osc_checksum_bulk(int nob, u32 pg_count, struct brw_page **pga, int opc, - cksum_type_t cksum_type) + enum cksum_type cksum_type) { __u32 cksum; int i = 0; @@ -1174,7 +1183,8 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count, int count = pga[i]->count > nob ? nob : pga[i]->count; /* corrupt the data before we compute the checksum, to - * simulate an OST->client data error */ + * simulate an OST->client data error + */ if (i == 0 && opc == OST_READ && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) { unsigned char *ptr = kmap(pga[i]->pg); @@ -1184,7 +1194,7 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count, kunmap(pga[i]->pg); } cfs_crypto_hash_update_page(hdesc, pga[i]->pg, - pga[i]->off & ~CFS_PAGE_MASK, + pga[i]->off & ~CFS_PAGE_MASK, count); CDEBUG(D_PAGE, "page %p map %p index %lu flags %lx count %u priv %0lx: off %d\n", @@ -1205,7 +1215,8 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count, cfs_crypto_hash_final(hdesc, NULL, NULL); /* For sending we only compute the wrong checksum instead - * of corrupting the data so it is still correct on a redo */ + * of corrupting the data so it is still correct on a redo + */ if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND)) cksum++; @@ -1244,7 +1255,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, opc = OST_READ; req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ); } - if (req == NULL) + if (!req) return -ENOMEM; for (niocount = i = 1; i < page_count; i++) { @@ -1266,7 +1277,8 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */ ptlrpc_at_set_req_timeout(req); /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own - * retry logic */ + * retry logic + */ req->rq_no_retry_einprogress = 1; desc = ptlrpc_prep_bulk_imp(req, page_count, @@ -1274,7 +1286,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, opc == OST_WRITE ? BULK_GET_SOURCE : BULK_PUT_SINK, OST_BULK_PORTAL); - if (desc == NULL) { + if (!desc) { rc = -ENOMEM; goto out; } @@ -1283,7 +1295,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, body = req_capsule_client_get(pill, &RMF_OST_BODY); ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ); niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE); - LASSERT(body != NULL && ioobj != NULL && niobuf != NULL); + LASSERT(body && ioobj && niobuf); lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa); @@ -1293,7 +1305,8 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, * that might be send for this request. The actual number is decided * when the RPC is finally sent in ptlrpc_register_bulk(). It sends * "max - 1" for old client compatibility sending "0", and also so the - * the actual maximum is a power-of-two number, not one less. LU-1431 */ + * the actual maximum is a power-of-two number, not one less. LU-1431 + */ ioobj_max_brw_set(ioobj, desc->bd_md_max_brw); LASSERT(page_count > 0); pg_prev = pga[0]; @@ -1355,8 +1368,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, if (cli->cl_checksum && !sptlrpc_flavor_has_bulk(&req->rq_flvr)) { /* store cl_cksum_type in a local variable since - * it can be changed via lprocfs */ - cksum_type_t cksum_type = cli->cl_cksum_type; + * it can be changed via lprocfs + */ + enum cksum_type cksum_type = cli->cl_cksum_type; if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) { oa->o_flags &= OBD_FL_LOCAL_MASK; @@ -1375,7 +1389,8 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, oa->o_flags |= cksum_type_pack(cksum_type); } else { /* clear out the checksum flag, in case this is a - * resend but cl_checksum is no longer set. b=11238 */ + * resend but cl_checksum is no longer set. b=11238 + */ oa->o_valid &= ~OBD_MD_FLCKSUM; } oa->o_cksum = body->oa.o_cksum; @@ -1415,11 +1430,11 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer, __u32 client_cksum, __u32 server_cksum, int nob, u32 page_count, struct brw_page **pga, - cksum_type_t client_cksum_type) + enum cksum_type client_cksum_type) { __u32 new_cksum; char *msg; - cksum_type_t cksum_type; + enum cksum_type cksum_type; if (server_cksum == client_cksum) { CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum); @@ -1472,9 +1487,9 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) return rc; } - LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc); + LASSERTF(req->rq_repmsg, "rc = %d\n", rc); body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { DEBUG_REQ(D_INFO, req, "Can't unpack body\n"); return -EPROTO; } @@ -1538,7 +1553,7 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) if (rc != req->rq_bulk->bd_nob_transferred) { CERROR("Unexpected rc %d (%d transferred)\n", - rc, req->rq_bulk->bd_nob_transferred); + rc, req->rq_bulk->bd_nob_transferred); return -EPROTO; } @@ -1550,7 +1565,7 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) __u32 server_cksum = body->oa.o_cksum; char *via = ""; char *router = ""; - cksum_type_t cksum_type; + enum cksum_type cksum_type; cksum_type = cksum_type_unpack(body->oa.o_valid&OBD_MD_FLFLAGS ? body->oa.o_flags : 0); @@ -1627,7 +1642,7 @@ static int osc_brw_redo_request(struct ptlrpc_request *request, return rc; list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) { - if (oap->oap_request != NULL) { + if (oap->oap_request) { LASSERTF(request == oap->oap_request, "request %p != oap_request %p\n", request, oap->oap_request); @@ -1638,12 +1653,14 @@ static int osc_brw_redo_request(struct ptlrpc_request *request, } } /* New request takes over pga and oaps from old request. - * Note that copying a list_head doesn't work, need to move it... */ + * Note that copying a list_head doesn't work, need to move it... + */ aa->aa_resends++; new_req->rq_interpret_reply = request->rq_interpret_reply; new_req->rq_async_args = request->rq_async_args; /* cap resend delay to the current request timeout, this is similar to - * what ptlrpc does (see after_reply()) */ + * what ptlrpc does (see after_reply()) + */ if (aa->aa_resends > new_req->rq_timeout) new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout; else @@ -1669,7 +1686,8 @@ static int osc_brw_redo_request(struct ptlrpc_request *request, /* XXX: This code will run into problem if we're going to support * to add a series of BRW RPCs into a self-defined ptlrpc_request_set * and wait for all of them to be finished. We should inherit request - * set from old request. */ + * set from old request. + */ ptlrpcd_add_req(new_req); DEBUG_REQ(D_INFO, new_req, "new request"); @@ -1709,7 +1727,7 @@ static void sort_brw_pages(struct brw_page **array, int num) static void osc_release_ppga(struct brw_page **ppga, u32 count) { - LASSERT(ppga != NULL); + LASSERT(ppga); kfree(ppga); } @@ -1725,7 +1743,8 @@ static int brw_interpret(const struct lu_env *env, rc = osc_brw_fini_request(req, rc); CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc); /* When server return -EINPROGRESS, client should always retry - * regardless of the number of times the bulk was resent already. */ + * regardless of the number of times the bulk was resent already. + */ if (osc_recoverable_error(rc)) { if (req->rq_import_generation != req->rq_import->imp_generation) { @@ -1748,7 +1767,7 @@ static int brw_interpret(const struct lu_env *env, } list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) { - if (obj == NULL && rc == 0) { + if (!obj && rc == 0) { obj = osc2cl(ext->oe_obj); cl_object_get(obj); } @@ -1759,7 +1778,7 @@ static int brw_interpret(const struct lu_env *env, LASSERT(list_empty(&aa->aa_exts)); LASSERT(list_empty(&aa->aa_oaps)); - if (obj != NULL) { + if (obj) { struct obdo *oa = aa->aa_oa; struct cl_attr *attr = &osc_env_info(env)->oti_attr; unsigned long valid = 0; @@ -1798,7 +1817,8 @@ static int brw_interpret(const struct lu_env *env, client_obd_list_lock(&cli->cl_loi_list_lock); /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters * is called so we know whether to go to sync BRWs or wait for more - * RPCs to complete */ + * RPCs to complete + */ if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) cli->cl_w_in_flight--; else @@ -1871,13 +1891,13 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, } pga = kcalloc(page_count, sizeof(*pga), GFP_NOFS); - if (pga == NULL) { + if (!pga) { rc = -ENOMEM; goto out; } - oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO); - if (oa == NULL) { + oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); + if (!oa) { rc = -ENOMEM; goto out; } @@ -1886,7 +1906,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, list_for_each_entry(oap, &rpc_list, oap_rpc_item) { struct cl_page *page = oap2cl_page(oap); - if (clerq == NULL) { + if (!clerq) { clerq = cl_req_alloc(env, page, crt, 1 /* only 1-object rpcs for now */); if (IS_ERR(clerq)) { @@ -1907,7 +1927,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, } /* always get the data for the obdo for the rpc */ - LASSERT(clerq != NULL); + LASSERT(clerq); crattr->cra_oa = oa; cl_req_attr_set(env, clerq, crattr, ~0ULL); if (lock) { @@ -1923,7 +1943,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, sort_brw_pages(pga, page_count); rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count, - pga, &req, 1, 0); + pga, &req, 1, 0); if (rc != 0) { CERROR("prep_req failed: %d\n", rc); goto out; @@ -1938,7 +1958,8 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, * we race with setattr (locally or in queue at OST). If OST gets * later setattr before earlier BRW (as determined by the request xid), * the OST will not use BRW timestamps. Sadly, there is no obvious - * way to do this in a single call. bug 10150 */ + * way to do this in a single call. bug 10150 + */ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); crattr->cra_oa = &body->oa; cl_req_attr_set(env, clerq, crattr, @@ -1955,19 +1976,20 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, aa->aa_clerq = clerq; /* queued sync pages can be torn down while the pages - * were between the pending list and the rpc */ + * were between the pending list and the rpc + */ tmp = NULL; list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) { /* only one oap gets a request reference */ - if (tmp == NULL) + if (!tmp) tmp = oap; if (oap->oap_interrupted && !req->rq_intr) { CDEBUG(D_INODE, "oap %p in req %p interrupted\n", - oap, req); + oap, req); ptlrpc_mark_interrupted(req); } } - if (tmp != NULL) + if (tmp) tmp->oap_request = ptlrpc_request_addref(req); client_obd_list_lock(&cli->cl_loi_list_lock); @@ -2001,16 +2023,17 @@ out: kfree(crattr); if (rc != 0) { - LASSERT(req == NULL); + LASSERT(!req); if (oa) kmem_cache_free(obdo_cachep, oa); kfree(pga); /* this should happen rarely and is pretty bad, it makes the - * pending list not follow the dirty order */ + * pending list not follow the dirty order + */ while (!list_empty(ext_list)) { ext = list_entry(ext_list->next, struct osc_extent, - oe_link); + oe_link); list_del_init(&ext->oe_link); osc_extent_finish(env, ext, 0, rc); } @@ -2026,7 +2049,6 @@ static int osc_set_lock_data_with_check(struct ldlm_lock *lock, void *data = einfo->ei_cbdata; int set = 0; - LASSERT(lock != NULL); LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl); LASSERT(lock->l_resource->lr_type == einfo->ei_type); LASSERT(lock->l_completion_ast == einfo->ei_cb_cp); @@ -2035,7 +2057,7 @@ static int osc_set_lock_data_with_check(struct ldlm_lock *lock, lock_res_and_lock(lock); spin_lock(&osc_ast_guard); - if (lock->l_ast_data == NULL) + if (!lock->l_ast_data) lock->l_ast_data = data; if (lock->l_ast_data == data) set = 1; @@ -2052,7 +2074,7 @@ static int osc_set_data_with_check(struct lustre_handle *lockh, struct ldlm_lock *lock = ldlm_handle2lock(lockh); int set = 0; - if (lock != NULL) { + if (lock) { set = osc_set_lock_data_with_check(lock, einfo); LDLM_LOCK_PUT(lock); } else @@ -2064,7 +2086,8 @@ static int osc_set_data_with_check(struct lustre_handle *lockh, /* find any ldlm lock of the inode in osc * return 0 not find * 1 find one - * < 0 error */ + * < 0 error + */ static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm, ldlm_iterator_t replace, void *data) { @@ -2095,7 +2118,6 @@ static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb, rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - LASSERT(rep != NULL); rep->lock_policy_res1 = ptlrpc_status_ntoh(rep->lock_policy_res1); if (rep->lock_policy_res1) @@ -2127,18 +2149,21 @@ static int osc_enqueue_interpret(const struct lu_env *env, __u64 *flags = aa->oa_flags; /* Make a local copy of a lock handle and a mode, because aa->oa_* - * might be freed anytime after lock upcall has been called. */ + * might be freed anytime after lock upcall has been called. + */ lustre_handle_copy(&handle, aa->oa_lockh); mode = aa->oa_ei->ei_mode; /* ldlm_cli_enqueue is holding a reference on the lock, so it must - * be valid. */ + * be valid. + */ lock = ldlm_handle2lock(&handle); /* Take an additional reference so that a blocking AST that * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed * to arrive after an upcall has been executed by - * osc_enqueue_fini(). */ + * osc_enqueue_fini(). + */ ldlm_lock_addref(&handle, mode); /* Let CP AST to grant the lock first. */ @@ -2170,7 +2195,7 @@ static int osc_enqueue_interpret(const struct lu_env *env, */ ldlm_lock_decref(&handle, mode); - LASSERTF(lock != NULL, "lockh %p, req %p, aa %p - client evicted?\n", + LASSERTF(lock, "lockh %p, req %p, aa %p - client evicted?\n", aa->oa_lockh, req, aa); ldlm_lock_decref(&handle, mode); LDLM_LOCK_PUT(lock); @@ -2185,7 +2210,8 @@ struct ptlrpc_request_set *PTLRPCD_SET = (void *)1; * others may take a considerable amount of time in a case of ost failure; and * when other sync requests do not get released lock from a client, the client * is excluded from the cluster -- such scenarious make the life difficult, so - * release locks just after they are obtained. */ + * release locks just after they are obtained. + */ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, __u64 *flags, ldlm_policy_data_t *policy, struct ost_lvb *lvb, int kms_valid, @@ -2198,11 +2224,12 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, struct ptlrpc_request *req = NULL; int intent = *flags & LDLM_FL_HAS_INTENT; __u64 match_lvb = (agl != 0 ? 0 : LDLM_FL_LVB_READY); - ldlm_mode_t mode; + enum ldlm_mode mode; int rc; /* Filesystem lock extents are extended to page boundaries so that - * dealing with the page cache is a little smoother. */ + * dealing with the page cache is a little smoother. + */ policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK; policy->l_extent.end |= ~CFS_PAGE_MASK; @@ -2226,7 +2253,8 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, * * At some point we should cancel the read lock instead of making them * send us a blocking callback, but there are problems with canceling - * locks out from other users right now, too. */ + * locks out from other users right now, too. + */ mode = einfo->ei_mode; if (einfo->ei_mode == LCK_PR) mode |= LCK_PW; @@ -2238,7 +2266,8 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, if ((agl != 0) && !(matched->l_flags & LDLM_FL_LVB_READY)) { /* For AGL, if enqueue RPC is sent but the lock is not * granted, then skip to process this strpe. - * Return -ECANCELED to tell the caller. */ + * Return -ECANCELED to tell the caller. + */ ldlm_lock_decref(lockh, mode); LDLM_LOCK_PUT(matched); return -ECANCELED; @@ -2247,19 +2276,22 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, if (osc_set_lock_data_with_check(matched, einfo)) { *flags |= LDLM_FL_LVB_READY; /* addref the lock only if not async requests and PW - * lock is matched whereas we asked for PR. */ + * lock is matched whereas we asked for PR. + */ if (!rqset && einfo->ei_mode != mode) ldlm_lock_addref(lockh, LCK_PR); if (intent) { /* I would like to be able to ASSERT here that * rss <= kms, but I can't, for reasons which - * are explained in lov_enqueue() */ + * are explained in lov_enqueue() + */ } /* We already have a lock, and it's referenced. * * At this point, the cl_lock::cll_state is CLS_QUEUING, - * AGL upcall may change it to CLS_HELD directly. */ + * AGL upcall may change it to CLS_HELD directly. + */ (*upcall)(cookie, ELDLM_OK); if (einfo->ei_mode != mode) @@ -2281,7 +2313,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE_LVB); - if (req == NULL) + if (!req) return -ENOMEM; rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0); @@ -2341,27 +2373,29 @@ int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id, { struct obd_device *obd = exp->exp_obd; __u64 lflags = *flags; - ldlm_mode_t rc; + enum ldlm_mode rc; if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH)) return -EIO; /* Filesystem lock extents are extended to page boundaries so that - * dealing with the page cache is a little smoother */ + * dealing with the page cache is a little smoother + */ policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK; policy->l_extent.end |= ~CFS_PAGE_MASK; /* Next, search for already existing extent locks that will cover us */ /* If we're trying to read, we also search for an existing PW lock. The * VFS and page cache already protect us locally, so lots of readers/ - * writers can share a single PW lock. */ + * writers can share a single PW lock. + */ rc = mode; if (mode == LCK_PR) rc |= LCK_PW; rc = ldlm_lock_match(obd->obd_namespace, lflags, res_id, type, policy, rc, lockh, unref); if (rc) { - if (data != NULL) { + if (data) { if (!osc_set_data_with_check(lockh, data)) { if (!(lflags & LDLM_FL_TEST_LOCK)) ldlm_lock_decref(lockh, rc); @@ -2398,8 +2432,9 @@ static int osc_statfs_interpret(const struct lu_env *env, * due to issues at a higher level (LOV). * Exit immediately since the caller is * aware of the problem and takes care - * of the clean up */ - return rc; + * of the clean up + */ + return rc; if ((rc == -ENOTCONN || rc == -EAGAIN) && (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY)) { @@ -2411,7 +2446,7 @@ static int osc_statfs_interpret(const struct lu_env *env, goto out; msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS); - if (msfs == NULL) { + if (!msfs) { rc = -EPROTO; goto out; } @@ -2436,9 +2471,10 @@ static int osc_statfs_async(struct obd_export *exp, * extra calls into the filesystem if that isn't necessary (e.g. * during mount that would help a bit). Having relative timestamps * is not so great if request processing is slow, while absolute - * timestamps are not ideal because they need time synchronization. */ + * timestamps are not ideal because they need time synchronization. + */ req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS); @@ -2474,8 +2510,9 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp, struct obd_import *imp = NULL; int rc; - /*Since the request might also come from lprocfs, so we need - *sync this with client_disconnect_export Bug15684*/ + /* Since the request might also come from lprocfs, so we need + * sync this with client_disconnect_export Bug15684 + */ down_read(&obd->u.cli.cl_sem); if (obd->u.cli.cl_import) imp = class_import_get(obd->u.cli.cl_import); @@ -2488,12 +2525,13 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp, * extra calls into the filesystem if that isn't necessary (e.g. * during mount that would help a bit). Having relative timestamps * is not so great if request processing is slow, while absolute - * timestamps are not ideal because they need time synchronization. */ + * timestamps are not ideal because they need time synchronization. + */ req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS); class_import_put(imp); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS); @@ -2516,7 +2554,7 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp, goto out; msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS); - if (msfs == NULL) { + if (!msfs) { rc = -EPROTO; goto out; } @@ -2534,7 +2572,8 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp, * the maximum number of OST indices which will fit in the user buffer. * lmm_magic must be LOV_MAGIC (we only use 1 slot here). */ -static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump) +static int osc_getstripe(struct lov_stripe_md *lsm, + struct lov_user_md __user *lump) { /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */ struct lov_user_md_v3 lum, *lumk; @@ -2545,7 +2584,8 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump) return -ENODATA; /* we only need the header part from user space to get lmm_magic and - * lmm_stripe_count, (the header part is common to v1 and v3) */ + * lmm_stripe_count, (the header part is common to v1 and v3) + */ lum_size = sizeof(struct lov_user_md_v1); if (copy_from_user(&lum, lump, lum_size)) return -EFAULT; @@ -2560,7 +2600,8 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump) LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lumk->lmm_objects[0])); /* we can use lov_mds_md_size() to compute lum_size - * because lov_user_md_vX and lov_mds_md_vX have the same size */ + * because lov_user_md_vX and lov_mds_md_vX have the same size + */ if (lum.lmm_stripe_count > 0) { lum_size = lov_mds_md_size(lum.lmm_stripe_count, lum.lmm_magic); lumk = kzalloc(lum_size, GFP_NOFS); @@ -2591,14 +2632,15 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump) } static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, - void *karg, void *uarg) + void *karg, void __user *uarg) { struct obd_device *obd = exp->exp_obd; struct obd_ioctl_data *data = karg; int err = 0; if (!try_module_get(THIS_MODULE)) { - CERROR("Can't get module. Is it alive?"); + CERROR("%s: cannot get module '%s'\n", obd->obd_name, + module_name(THIS_MODULE)); return -EINVAL; } switch (cmd) { @@ -2700,7 +2742,7 @@ static int osc_get_info(const struct lu_env *env, struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GET_INFO_LAST_ID); - if (req == NULL) + if (!req) return -ENOMEM; req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY, @@ -2721,7 +2763,7 @@ static int osc_get_info(const struct lu_env *env, struct obd_export *exp, goto out; reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID); - if (reply == NULL) { + if (!reply) { rc = -EPROTO; goto out; } @@ -2735,7 +2777,7 @@ out: struct ldlm_res_id res_id; ldlm_policy_data_t policy; struct lustre_handle lockh; - ldlm_mode_t mode = 0; + enum ldlm_mode mode = 0; struct ptlrpc_request *req; struct ll_user_fiemap *reply; char *tmp; @@ -2774,7 +2816,7 @@ out: skip_locking: req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GET_INFO_FIEMAP); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto drop_lock; } @@ -2803,7 +2845,7 @@ skip_locking: goto fini_req; reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL); - if (reply == NULL) { + if (!reply) { rc = -EPROTO; goto fini_req; } @@ -2852,7 +2894,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp, if (KEY_IS(KEY_CACHE_SET)) { struct client_obd *cli = &obd->u.cli; - LASSERT(cli->cl_cache == NULL); /* only once */ + LASSERT(!cli->cl_cache); /* only once */ cli->cl_cache = val; atomic_inc(&cli->cl_cache->ccc_users); cli->cl_lru_left = &cli->cl_cache->ccc_lru_left; @@ -2880,16 +2922,17 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp, return -EINVAL; /* We pass all other commands directly to OST. Since nobody calls osc - methods directly and everybody is supposed to go through LOV, we - assume lov checked invalid values for us. - The only recognised values so far are evict_by_nid and mds_conn. - Even if something bad goes through, we'd get a -EINVAL from OST - anyway. */ + * methods directly and everybody is supposed to go through LOV, we + * assume lov checked invalid values for us. + * The only recognised values so far are evict_by_nid and mds_conn. + * Even if something bad goes through, we'd get a -EINVAL from OST + * anyway. + */ req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ? &RQF_OST_SET_GRANT_INFO : &RQF_OBD_SET_INFO); - if (req == NULL) + if (!req) return -ENOMEM; req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY, @@ -2916,7 +2959,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp, CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); aa = ptlrpc_req_async_args(req); - oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO); + oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); if (!oa) { ptlrpc_req_finished(req); return -ENOMEM; @@ -2928,7 +2971,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp, ptlrpc_request_set_replen(req); if (!KEY_IS(KEY_GRANT_SHRINK)) { - LASSERT(set != NULL); + LASSERT(set); ptlrpc_set_add_req(set, req); ptlrpc_check_set(NULL, set); } else { @@ -2946,7 +2989,7 @@ static int osc_reconnect(const struct lu_env *env, { struct client_obd *cli = &obd->u.cli; - if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) { + if (data && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) { long lost_grant; client_obd_list_lock(&cli->cl_loi_list_lock); @@ -2987,7 +3030,7 @@ static int osc_disconnect(struct obd_export *exp) * So the osc should be disconnected from the shrink list, after we * are sure the import has been destroyed. BUG18662 */ - if (obd->u.cli.cl_import == NULL) + if (!obd->u.cli.cl_import) osc_del_shrink_grant(&obd->u.cli); return rc; } @@ -3024,7 +3067,8 @@ static int osc_import_event(struct obd_device *obd, /* Reset grants */ cli = &obd->u.cli; /* all pages go to failing rpcs due to the invalid - * import */ + * import + */ osc_io_unplug(env, cli, NULL); ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY); @@ -3206,13 +3250,13 @@ static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) return 0; } -int osc_cleanup(struct obd_device *obd) +static int osc_cleanup(struct obd_device *obd) { struct client_obd *cli = &obd->u.cli; int rc; /* lru cleanup */ - if (cli->cl_cache != NULL) { + if (cli->cl_cache) { LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0); spin_lock(&cli->cl_cache->ccc_lru_lock); list_del_init(&cli->cl_lru_osc); @@ -3255,7 +3299,7 @@ static int osc_process_config(struct obd_device *obd, u32 len, void *buf) return osc_process_config_base(obd, buf); } -struct obd_ops osc_obd_ops = { +static struct obd_ops osc_obd_ops = { .owner = THIS_MODULE, .setup = osc_setup, .precleanup = osc_precleanup, @@ -3298,7 +3342,8 @@ static int __init osc_init(void) /* print an address of _any_ initialized kernel symbol from this * module, to allow debugging with gdb that doesn't support data - * symbols from modules.*/ + * symbols from modules. + */ CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches); rc = lu_kmem_init(osc_caches); |