@@ -308,6 +308,7 @@ struct rds_ib_statistics {
uint64_t s_ib_rdma_mr_1m_pool_flush;
uint64_t s_ib_rdma_mr_1m_pool_wait;
uint64_t s_ib_rdma_mr_1m_pool_depleted;
+ uint64_t s_ib_rdma_flush_mr_pool_avoided;
uint64_t s_ib_rdma_mr_8k_reused;
uint64_t s_ib_rdma_mr_1m_reused;
uint64_t s_ib_atomic_cswp;
@@ -105,6 +105,8 @@ struct rds_ib_mr_pool {
unsigned long max_items_soft;
unsigned long max_free_pinned;
unsigned int max_pages;
+
+ bool flush_ongoing; /* To avoid redundant flushes */
};
extern struct workqueue_struct *rds_ib_mr_wq;
@@ -393,6 +393,8 @@ int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
*/
dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
+ WRITE_ONCE(pool->flush_ongoing, true);
+ smp_wmb();
if (free_all) {
unsigned long flags;
@@ -430,6 +432,8 @@ int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
atomic_sub(nfreed, &pool->item_count);
out:
+ WRITE_ONCE(pool->flush_ongoing, false);
+ smp_wmb();
mutex_unlock(&pool->flush_lock);
if (waitqueue_active(&pool->flush_wait))
wake_up(&pool->flush_wait);
@@ -507,8 +511,17 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
/* If we've pinned too many pages, request a flush */
if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
- atomic_read(&pool->dirty_count) >= pool->max_items / 5)
- queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
+ atomic_read(&pool->dirty_count) >= pool->max_items / 5) {
+ smp_rmb();
+ if (!READ_ONCE(pool->flush_ongoing)) {
+ queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
+ } else {
+ /* This counter indicates the number of redundant
+ * flush calls avoided, and provides an indication
+ * of the load pattern imposed on kernel.
+ */
+ rds_ib_stats_inc(s_ib_rdma_flush_mr_pool_avoided);
+ }
if (invalidate) {
if (likely(!in_interrupt())) {
@@ -670,6 +683,7 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
pool->max_free_pinned = pool->max_items * pool->max_pages / 4;
pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4;
+ pool->flush_ongoing = false;
return pool;
}
@@ -75,6 +75,7 @@
"ib_rdma_mr_1m_pool_flush",
"ib_rdma_mr_1m_pool_wait",
"ib_rdma_mr_1m_pool_depleted",
+ "ib_rdma_flush_mr_pool_avoided",
"ib_rdma_mr_8k_reused",
"ib_rdma_mr_1m_reused",
"ib_atomic_cswp",
This patch aims to reduce the number of asynchronous workers being spawned to execute the function "rds_ib_flush_mr_pool" during the high I/O situations. Synchronous call path's to this function "rds_ib_flush_mr_pool" will be executed without being disturbed. By reducing the number of processes contending to flush the mr pool, the total number of D state processes waiting to acquire the mutex lock will be greatly reduced, which otherwise were causing DB instance crash as the corresponding processes were not progressing while waiting to acquire the mutex lock. Signed-off-by: Praveen Kumar Kannoju <praveen.kannoju@oracle.com> --- net/rds/ib.h | 1 + net/rds/ib_mr.h | 2 ++ net/rds/ib_rdma.c | 18 ++++++++++++++++-- net/rds/ib_stats.c | 1 + 4 files changed, 20 insertions(+), 2 deletions(-)