aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/lustre/lustre/ldlm/ldlm_pool.c')
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c63
1 files changed, 33 insertions, 30 deletions
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 6054eee848d3..4c838f615a64 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -54,10 +54,10 @@
* calculated as the number of locks in LRU * lock live time in seconds. If
* CLV > SLV - lock is canceled.
*
- * Client has LVF, that is, lock volume factor which regulates how much sensitive
- * client should be about last SLV from server. The higher LVF is the more locks
- * will be canceled on client. Default value for it is 1. Setting LVF to 2 means
- * that client will cancel locks 2 times faster.
+ * Client has LVF, that is, lock volume factor which regulates how much
+ * sensitive client should be about last SLV from server. The higher LVF is the
+ * more locks will be canceled on client. Default value for it is 1. Setting LVF
+ * to 2 means that client will cancel locks 2 times faster.
*
* Locks on a client will be canceled more intensively in these cases:
* (1) if SLV is smaller, that is, load is higher on the server;
@@ -70,11 +70,12 @@
* if flow is getting thinner, more and more particles become outside of it and
* as particles are locks, they should be canceled.
*
- * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com). Andreas
- * Dilger (adilger@clusterfs.com) proposed few nice ideas like using LVF and many
- * cleanups. Flow definition to allow more easy understanding of the logic belongs
- * to Nikita Danilov (nikita@clusterfs.com) as well as many cleanups and fixes.
- * And design and implementation are done by Yury Umanets (umka@clusterfs.com).
+ * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com).
+ * Andreas Dilger (adilger@clusterfs.com) proposed few nice ideas like using
+ * LVF and many cleanups. Flow definition to allow more easy understanding of
+ * the logic belongs to Nikita Danilov (nikita@clusterfs.com) as well as many
+ * cleanups and fixes. And design and implementation are done by Yury Umanets
+ * (umka@clusterfs.com).
*
* Glossary for terms used:
*
@@ -265,16 +266,15 @@ static void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
* SLV. And the opposite, the more grant plan is over-consumed
* (load time) the faster drops SLV.
*/
- slv_factor = (grant_usage << LDLM_POOL_SLV_SHIFT);
+ slv_factor = grant_usage << LDLM_POOL_SLV_SHIFT;
do_div(slv_factor, limit);
slv = slv * slv_factor;
slv = dru(slv, LDLM_POOL_SLV_SHIFT, round_up);
- if (slv > ldlm_pool_slv_max(limit)) {
+ if (slv > ldlm_pool_slv_max(limit))
slv = ldlm_pool_slv_max(limit);
- } else if (slv < ldlm_pool_slv_min(limit)) {
+ else if (slv < ldlm_pool_slv_min(limit))
slv = ldlm_pool_slv_min(limit);
- }
pl->pl_server_lock_volume = slv;
}
@@ -614,8 +614,8 @@ int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
lprocfs_counter_add(pl->pl_stats,
LDLM_POOL_SHRINK_FREED_STAT,
cancel);
- CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, "
- "shrunk %d\n", pl->pl_name, nr, cancel);
+ CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, shrunk %d\n",
+ pl->pl_name, nr, cancel);
}
}
return cancel;
@@ -636,7 +636,7 @@ int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
}
EXPORT_SYMBOL(ldlm_pool_setup);
-#if defined (CONFIG_PROC_FS)
+#if defined(CONFIG_PROC_FS)
static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
{
int granted, grant_rate, cancel_rate, grant_step;
@@ -696,8 +696,9 @@ LPROC_SEQ_FOPS_RO(lprocfs_grant_plan);
LDLM_POOL_PROC_READER_SEQ_SHOW(recalc_period, int);
LDLM_POOL_PROC_WRITER(recalc_period, int);
-static ssize_t lprocfs_recalc_period_seq_write(struct file *file, const char *buf,
- size_t len, loff_t *off)
+static ssize_t lprocfs_recalc_period_seq_write(struct file *file,
+ const char *buf, size_t len,
+ loff_t *off)
{
struct seq_file *seq = file->private_data;
@@ -943,6 +944,7 @@ EXPORT_SYMBOL(ldlm_pool_del);
__u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
{
__u64 slv;
+
spin_lock(&pl->pl_lock);
slv = pl->pl_server_lock_volume;
spin_unlock(&pl->pl_lock);
@@ -971,6 +973,7 @@ EXPORT_SYMBOL(ldlm_pool_set_slv);
__u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
{
__u64 slv;
+
spin_lock(&pl->pl_lock);
slv = pl->pl_client_lock_volume;
spin_unlock(&pl->pl_lock);
@@ -1132,23 +1135,27 @@ static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask)
return (client == LDLM_NAMESPACE_SERVER) ? SHRINK_STOP : freed;
}
-static unsigned long ldlm_pools_srv_count(struct shrinker *s, struct shrink_control *sc)
+static unsigned long ldlm_pools_srv_count(struct shrinker *s,
+ struct shrink_control *sc)
{
return ldlm_pools_count(LDLM_NAMESPACE_SERVER, sc->gfp_mask);
}
-static unsigned long ldlm_pools_srv_scan(struct shrinker *s, struct shrink_control *sc)
+static unsigned long ldlm_pools_srv_scan(struct shrinker *s,
+ struct shrink_control *sc)
{
return ldlm_pools_scan(LDLM_NAMESPACE_SERVER, sc->nr_to_scan,
sc->gfp_mask);
}
-static unsigned long ldlm_pools_cli_count(struct shrinker *s, struct shrink_control *sc)
+static unsigned long ldlm_pools_cli_count(struct shrinker *s,
+ struct shrink_control *sc)
{
return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
}
-static unsigned long ldlm_pools_cli_scan(struct shrinker *s, struct shrink_control *sc)
+static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
+ struct shrink_control *sc)
{
return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
sc->gfp_mask);
@@ -1194,10 +1201,8 @@ int ldlm_pools_recalc(ldlm_side_t client)
* of limit.
*/
if (nr_l >= 2 * (LDLM_POOL_HOST_L / 3)) {
- CWARN("\"Modest\" pools eat out 2/3 of server locks "
- "limit (%d of %lu). This means that you have too "
- "many clients for this amount of server RAM. "
- "Upgrade server!\n", nr_l, LDLM_POOL_HOST_L);
+ CWARN("\"Modest\" pools eat out 2/3 of server locks limit (%d of %lu). This means that you have too many clients for this amount of server RAM. Upgrade server!\n",
+ nr_l, LDLM_POOL_HOST_L);
equal = 1;
}
@@ -1205,8 +1210,7 @@ int ldlm_pools_recalc(ldlm_side_t client)
* The rest is given to greedy namespaces.
*/
list_for_each_entry(ns, ldlm_namespace_list(client),
- ns_list_chain)
- {
+ ns_list_chain) {
if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
continue;
@@ -1383,9 +1387,8 @@ static int ldlm_pools_thread_start(void)
static void ldlm_pools_thread_stop(void)
{
- if (ldlm_pools_thread == NULL) {
+ if (ldlm_pools_thread == NULL)
return;
- }
thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
wake_up(&ldlm_pools_thread->t_ctl_waitq);