aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/lustre/lustre/osc/osc_page.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/lustre/lustre/osc/osc_page.c')
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c278
1 files changed, 210 insertions, 68 deletions
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index 355f496a2093..2a7a70aa9e80 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -323,32 +323,6 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
return result;
}
-int osc_over_unstable_soft_limit(struct client_obd *cli)
-{
- long obd_upages, obd_dpages, osc_upages;
-
- /* Can't check cli->cl_unstable_count, therefore, no soft limit */
- if (!cli)
- return 0;
-
- obd_upages = atomic_read(&obd_unstable_pages);
- obd_dpages = atomic_read(&obd_dirty_pages);
-
- osc_upages = atomic_read(&cli->cl_unstable_count);
-
- /*
- * obd_max_dirty_pages is the max number of (dirty + unstable)
- * pages allowed at any given time. To simulate an unstable page
- * only limit, we subtract the current number of dirty pages
- * from this max. This difference is roughly the amount of pages
- * currently available for unstable pages. Thus, the soft limit
- * is half of that difference. Check osc_upages to ensure we don't
- * set SOFT_SYNC for OSCs without any outstanding unstable pages.
- */
- return osc_upages &&
- obd_upages >= (obd_max_dirty_pages - obd_dpages) / 2;
-}
-
/**
* Helper function called by osc_io_submit() for every page in an immediate
* transfer (i.e., transferred synchronously).
@@ -368,9 +342,6 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
oap->oap_count = opg->ops_to - opg->ops_from;
oap->oap_brw_flags = brw_flags | OBD_BRW_SYNC;
- if (osc_over_unstable_soft_limit(oap->oap_cli))
- oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
-
if (capable(CFS_CAP_SYS_RESOURCE)) {
oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
oap->oap_cmd |= OBD_BRW_NOQUOTA;
@@ -409,7 +380,7 @@ static const int lru_shrink_max = 8 << (20 - PAGE_SHIFT); /* 8M */
static int osc_cache_too_much(struct client_obd *cli)
{
struct cl_client_cache *cache = cli->cl_cache;
- int pages = atomic_read(&cli->cl_lru_in_list);
+ long pages = atomic_long_read(&cli->cl_lru_in_list);
unsigned long budget;
budget = cache->ccc_lru_max / (atomic_read(&cache->ccc_users) - 2);
@@ -417,7 +388,7 @@ static int osc_cache_too_much(struct client_obd *cli)
/* if it's going to run out LRU slots, we should free some, but not
* too much to maintain fairness among OSCs.
*/
- if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
+ if (atomic_long_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
if (pages >= budget)
return lru_shrink_max;
else if (pages >= budget / 2)
@@ -444,7 +415,7 @@ void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist)
{
LIST_HEAD(lru);
struct osc_async_page *oap;
- int npages = 0;
+ long npages = 0;
list_for_each_entry(oap, plist, oap_pending_item) {
struct osc_page *opg = oap2osc_page(oap);
@@ -460,8 +431,8 @@ void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist)
if (npages > 0) {
spin_lock(&cli->cl_lru_list_lock);
list_splice_tail(&lru, &cli->cl_lru_list);
- atomic_sub(npages, &cli->cl_lru_busy);
- atomic_add(npages, &cli->cl_lru_in_list);
+ atomic_long_sub(npages, &cli->cl_lru_busy);
+ atomic_long_add(npages, &cli->cl_lru_in_list);
spin_unlock(&cli->cl_lru_list_lock);
/* XXX: May set force to be true for better performance */
@@ -472,9 +443,9 @@ void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist)
static void __osc_lru_del(struct client_obd *cli, struct osc_page *opg)
{
- LASSERT(atomic_read(&cli->cl_lru_in_list) > 0);
+ LASSERT(atomic_long_read(&cli->cl_lru_in_list) > 0);
list_del_init(&opg->ops_lru);
- atomic_dec(&cli->cl_lru_in_list);
+ atomic_long_dec(&cli->cl_lru_in_list);
}
/**
@@ -488,12 +459,12 @@ static void osc_lru_del(struct client_obd *cli, struct osc_page *opg)
if (!list_empty(&opg->ops_lru)) {
__osc_lru_del(cli, opg);
} else {
- LASSERT(atomic_read(&cli->cl_lru_busy) > 0);
- atomic_dec(&cli->cl_lru_busy);
+ LASSERT(atomic_long_read(&cli->cl_lru_busy) > 0);
+ atomic_long_dec(&cli->cl_lru_busy);
}
spin_unlock(&cli->cl_lru_list_lock);
- atomic_inc(cli->cl_lru_left);
+ atomic_long_inc(cli->cl_lru_left);
/* this is a great place to release more LRU pages if
* this osc occupies too many LRU pages and kernel is
* stealing one of them.
@@ -518,7 +489,7 @@ static void osc_lru_use(struct client_obd *cli, struct osc_page *opg)
spin_lock(&cli->cl_lru_list_lock);
__osc_lru_del(cli, opg);
spin_unlock(&cli->cl_lru_list_lock);
- atomic_inc(&cli->cl_lru_busy);
+ atomic_long_inc(&cli->cl_lru_busy);
}
}
@@ -540,10 +511,32 @@ static void discard_pagevec(const struct lu_env *env, struct cl_io *io,
}
/**
+ * Check if a cl_page can be released, i.e, it's not being used.
+ *
+ * If unstable account is turned on, bulk transfer may hold one refcount
+ * for recovery so we need to check vmpage refcount as well; otherwise,
+ * even we can destroy cl_page but the corresponding vmpage can't be reused.
+ */
+static inline bool lru_page_busy(struct client_obd *cli, struct cl_page *page)
+{
+ if (cl_page_in_use_noref(page))
+ return true;
+
+ if (cli->cl_cache->ccc_unstable_check) {
+ struct page *vmpage = cl_page_vmpage(page);
+
+ /* vmpage have two known users: cl_page and VM page cache */
+ if (page_count(vmpage) - page_mapcount(vmpage) > 2)
+ return true;
+ }
+ return false;
+}
+
+/**
* Drop @target of pages from LRU at most.
*/
-int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
- int target, bool force)
+long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
+ long target, bool force)
{
struct cl_io *io;
struct cl_object *clobj = NULL;
@@ -551,12 +544,12 @@ int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
struct osc_page *opg;
struct osc_page *temp;
int maxscan = 0;
- int count = 0;
+ long count = 0;
int index = 0;
int rc = 0;
- LASSERT(atomic_read(&cli->cl_lru_in_list) >= 0);
- if (atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
+ LASSERT(atomic_long_read(&cli->cl_lru_in_list) >= 0);
+ if (atomic_long_read(&cli->cl_lru_in_list) == 0 || target <= 0)
return 0;
if (!force) {
@@ -575,7 +568,7 @@ int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
io = &osc_env_info(env)->oti_io;
spin_lock(&cli->cl_lru_list_lock);
- maxscan = min(target << 1, atomic_read(&cli->cl_lru_in_list));
+ maxscan = min(target << 1, atomic_long_read(&cli->cl_lru_in_list));
list_for_each_entry_safe(opg, temp, &cli->cl_lru_list, ops_lru) {
struct cl_page *page;
bool will_free = false;
@@ -584,7 +577,7 @@ int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
break;
page = opg->ops_cl.cpl_page;
- if (cl_page_in_use_noref(page)) {
+ if (lru_page_busy(cli, page)) {
list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
continue;
}
@@ -620,7 +613,7 @@ int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
}
if (cl_page_own_try(env, io, page) == 0) {
- if (!cl_page_in_use_noref(page)) {
+ if (!lru_page_busy(cli, page)) {
/* remove it from lru list earlier to avoid
* lock contention
*/
@@ -663,24 +656,19 @@ int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
atomic_dec(&cli->cl_lru_shrinkers);
if (count > 0) {
- atomic_add(count, cli->cl_lru_left);
+ atomic_long_add(count, cli->cl_lru_left);
wake_up_all(&osc_lru_waitq);
}
return count > 0 ? count : rc;
}
-static inline int max_to_shrink(struct client_obd *cli)
-{
- return min(atomic_read(&cli->cl_lru_in_list) >> 1, lru_shrink_max);
-}
-
-int osc_lru_reclaim(struct client_obd *cli)
+long osc_lru_reclaim(struct client_obd *cli)
{
struct cl_env_nest nest;
struct lu_env *env;
struct cl_client_cache *cache = cli->cl_cache;
int max_scans;
- int rc = 0;
+ long rc = 0;
LASSERT(cache);
@@ -693,15 +681,15 @@ int osc_lru_reclaim(struct client_obd *cli)
if (rc == -EBUSY)
rc = 0;
- CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n",
+ CDEBUG(D_CACHE, "%s: Free %ld pages from own LRU: %p.\n",
cli->cl_import->imp_obd->obd_name, rc, cli);
goto out;
}
- CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n",
+ CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %ld, busy: %ld.\n",
cli->cl_import->imp_obd->obd_name, cli,
- atomic_read(&cli->cl_lru_in_list),
- atomic_read(&cli->cl_lru_busy));
+ atomic_long_read(&cli->cl_lru_in_list),
+ atomic_long_read(&cli->cl_lru_busy));
/* Reclaim LRU slots from other client_obd as it can't free enough
* from its own. This should rarely happen.
@@ -717,10 +705,10 @@ int osc_lru_reclaim(struct client_obd *cli)
cli = list_entry(cache->ccc_lru.next, struct client_obd,
cl_lru_osc);
- CDEBUG(D_CACHE, "%s: cli %p LRU pages: %d, busy: %d.\n",
+ CDEBUG(D_CACHE, "%s: cli %p LRU pages: %ld, busy: %ld.\n",
cli->cl_import->imp_obd->obd_name, cli,
- atomic_read(&cli->cl_lru_in_list),
- atomic_read(&cli->cl_lru_busy));
+ atomic_long_read(&cli->cl_lru_in_list),
+ atomic_long_read(&cli->cl_lru_busy));
list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
if (osc_cache_too_much(cli) > 0) {
@@ -737,11 +725,18 @@ int osc_lru_reclaim(struct client_obd *cli)
out:
cl_env_nested_put(&nest, env);
- CDEBUG(D_CACHE, "%s: cli %p freed %d pages.\n",
+ CDEBUG(D_CACHE, "%s: cli %p freed %ld pages.\n",
cli->cl_import->imp_obd->obd_name, cli, rc);
return rc;
}
+/**
+ * osc_lru_reserve() is called to reserve an LRU slot for a cl_page.
+ *
+ * Usually the LRU slots are reserved in osc_io_iter_rw_init().
+ * Only in the case that the LRU slots are in extreme shortage, it should
+ * have reserved enough slots for an IO.
+ */
static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
struct osc_page *opg)
{
@@ -758,8 +753,8 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
goto out;
}
- LASSERT(atomic_read(cli->cl_lru_left) >= 0);
- while (!atomic_add_unless(cli->cl_lru_left, -1, 0)) {
+ LASSERT(atomic_long_read(cli->cl_lru_left) >= 0);
+ while (!atomic_long_add_unless(cli->cl_lru_left, -1, 0)) {
/* run out of LRU spaces, try to drop some by itself */
rc = osc_lru_reclaim(cli);
if (rc < 0)
@@ -770,7 +765,7 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
cond_resched();
rc = l_wait_event(osc_lru_waitq,
- atomic_read(cli->cl_lru_left) > 0,
+ atomic_long_read(cli->cl_lru_left) > 0,
&lwi);
if (rc < 0)
@@ -779,7 +774,7 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
out:
if (rc >= 0) {
- atomic_inc(&cli->cl_lru_busy);
+ atomic_long_inc(&cli->cl_lru_busy);
opg->ops_in_lru = 1;
rc = 0;
}
@@ -787,4 +782,151 @@ out:
return rc;
}
+/**
+ * Atomic operations are expensive. We accumulate the accounting for the
+ * same page pgdat to get better performance.
+ * In practice this can work pretty good because the pages in the same RPC
+ * are likely from the same page zone.
+ */
+static inline void unstable_page_accounting(struct ptlrpc_bulk_desc *desc,
+ int factor)
+{
+ int page_count = desc->bd_iov_count;
+ pg_data_t *last = NULL;
+ int count = 0;
+ int i;
+
+ for (i = 0; i < page_count; i++) {
+ pg_data_t *pgdat = page_pgdat(desc->bd_iov[i].bv_page);
+
+ if (likely(pgdat == last)) {
+ ++count;
+ continue;
+ }
+
+ if (count > 0) {
+ mod_node_page_state(pgdat, NR_UNSTABLE_NFS,
+ factor * count);
+ count = 0;
+ }
+ last = pgdat;
+ ++count;
+ }
+ if (count > 0)
+ mod_node_page_state(last, NR_UNSTABLE_NFS, factor * count);
+}
+
+static inline void add_unstable_page_accounting(struct ptlrpc_bulk_desc *desc)
+{
+ unstable_page_accounting(desc, 1);
+}
+
+static inline void dec_unstable_page_accounting(struct ptlrpc_bulk_desc *desc)
+{
+ unstable_page_accounting(desc, -1);
+}
+
+/**
+ * Performs "unstable" page accounting. This function balances the
+ * increment operations performed in osc_inc_unstable_pages. It is
+ * registered as the RPC request callback, and is executed when the
+ * bulk RPC is committed on the server. Thus at this point, the pages
+ * involved in the bulk transfer are no longer considered unstable.
+ *
+ * If this function is called, the request should have been committed
+ * or req:rq_unstable must have been set; it implies that the unstable
+ * statistic have been added.
+ */
+void osc_dec_unstable_pages(struct ptlrpc_request *req)
+{
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ int page_count = desc->bd_iov_count;
+ long unstable_count;
+
+ LASSERT(page_count >= 0);
+ dec_unstable_page_accounting(desc);
+
+ unstable_count = atomic_long_sub_return(page_count,
+ &cli->cl_unstable_count);
+ LASSERT(unstable_count >= 0);
+
+ unstable_count = atomic_long_sub_return(page_count,
+ &cli->cl_cache->ccc_unstable_nr);
+ LASSERT(unstable_count >= 0);
+ if (!unstable_count)
+ wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
+
+ if (osc_cache_too_much(cli))
+ (void)ptlrpcd_queue_work(cli->cl_lru_work);
+}
+
+/**
+ * "unstable" page accounting. See: osc_dec_unstable_pages.
+ */
+void osc_inc_unstable_pages(struct ptlrpc_request *req)
+{
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ long page_count = desc->bd_iov_count;
+
+ /* No unstable page tracking */
+ if (!cli->cl_cache || !cli->cl_cache->ccc_unstable_check)
+ return;
+
+ add_unstable_page_accounting(desc);
+ atomic_long_add(page_count, &cli->cl_unstable_count);
+ atomic_long_add(page_count, &cli->cl_cache->ccc_unstable_nr);
+
+ /*
+ * If the request has already been committed (i.e. brw_commit
+ * called via rq_commit_cb), we need to undo the unstable page
+ * increments we just performed because rq_commit_cb wont be
+ * called again.
+ */
+ spin_lock(&req->rq_lock);
+ if (unlikely(req->rq_committed)) {
+ spin_unlock(&req->rq_lock);
+
+ osc_dec_unstable_pages(req);
+ } else {
+ req->rq_unstable = 1;
+ spin_unlock(&req->rq_lock);
+ }
+}
+
+/**
+ * Check if it piggybacks SOFT_SYNC flag to OST from this OSC.
+ * This function will be called by every BRW RPC so it's critical
+ * to make this function fast.
+ */
+bool osc_over_unstable_soft_limit(struct client_obd *cli)
+{
+ long unstable_nr, osc_unstable_count;
+
+ /* Can't check cli->cl_unstable_count, therefore, no soft limit */
+ if (!cli->cl_cache || !cli->cl_cache->ccc_unstable_check)
+ return false;
+
+ osc_unstable_count = atomic_long_read(&cli->cl_unstable_count);
+ unstable_nr = atomic_long_read(&cli->cl_cache->ccc_unstable_nr);
+
+ CDEBUG(D_CACHE,
+ "%s: cli: %p unstable pages: %lu, osc unstable pages: %lu\n",
+ cli->cl_import->imp_obd->obd_name, cli,
+ unstable_nr, osc_unstable_count);
+
+ /*
+ * If the LRU slots are in shortage - 25% remaining AND this OSC
+ * has one full RPC window of unstable pages, it's a good chance
+ * to piggyback a SOFT_SYNC flag.
+ * Please notice that the OST won't take immediate response for the
+ * SOFT_SYNC request so active OSCs will have more chance to carry
+ * the flag, this is reasonable.
+ */
+ return unstable_nr > cli->cl_cache->ccc_lru_max >> 2 &&
+ osc_unstable_count > cli->cl_max_pages_per_rpc *
+ cli->cl_max_rpcs_in_flight;
+}
+
/** @} osc */