diff mbox series

[02/11] ibmvfc: implement channel queue depth and event buffer accounting

Message ID 20230913230457.2575849-3-tyreld@linux.ibm.com (mailing list archive)
State Superseded
Headers show
Series ibmvfc: fixes and generic prep work for NVMeoF support | expand

Commit Message

Tyrel Datwyler Sept. 13, 2023, 11:04 p.m. UTC
Extend ibmvfc_queue, ibmvfc_event, and ibmvfc_event_pool to provide
queue depths for general IO commands and reserved commands as well as
proper accounting of the free events of each type from the general event
pool. Further, calculate the negotiated max command limit with the VIOS
at NPIV login time as a function of the number of queues times their
total queue depth (general and reserved depths combined).

This does away with the legacy max_request value, and allows the driver
to better manage and track it resources.

Signed-off-by: Tyrel Datwyler <tyreld@linux.ibm.com>
Reviewed-by: Brian King <brking@linux.vnet.ibm.com>
---
 drivers/scsi/ibmvscsi/ibmvfc.c | 108 +++++++++++++++++++++------------
 drivers/scsi/ibmvscsi/ibmvfc.h |   9 +++
 2 files changed, 78 insertions(+), 39 deletions(-)

Comments

kernel test robot Sept. 19, 2023, 4:12 a.m. UTC | #1
Hi Tyrel,

kernel test robot noticed the following build warnings:

[auto build test WARNING on linus/master]
[also build test WARNING on v6.6-rc2 next-20230918]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Tyrel-Datwyler/ibmvfc-remove-BUG_ON-in-the-case-of-an-empty-event-pool/20230914-085530
base:   linus/master
patch link:    https://lore.kernel.org/r/20230913230457.2575849-3-tyreld%40linux.ibm.com
patch subject: [PATCH 02/11] ibmvfc: implement channel queue depth and event buffer accounting
config: powerpc-allyesconfig (https://download.01.org/0day-ci/archive/20230919/202309191225.q759yNtz-lkp@intel.com/config)
compiler: clang version 17.0.0 (https://github.com/llvm/llvm-project.git 4a5ac14ee968ff0ad5d2cc1ffa0299048db4c88a)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20230919/202309191225.q759yNtz-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202309191225.q759yNtz-lkp@intel.com/

All warnings (new ones prefixed by >>):

>> drivers/scsi/ibmvscsi/ibmvfc.c:789: warning: Excess function parameter 'size' description in 'ibmvfc_init_event_pool'
>> drivers/scsi/ibmvscsi/ibmvfc.c:1534: warning: Function parameter or member 'reserved' not described in '__ibmvfc_get_event'
>> drivers/scsi/ibmvscsi/ibmvfc.c:1534: warning: expecting prototype for ibmvfc_get_event(). Prototype was for __ibmvfc_get_event() instead


vim +789 drivers/scsi/ibmvscsi/ibmvfc.c

072b91f9c6510d Brian King     2008-07-01  778  
225acf5f1aba3b Tyrel Datwyler 2021-01-14  779  /**
225acf5f1aba3b Tyrel Datwyler 2021-01-14  780   * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
225acf5f1aba3b Tyrel Datwyler 2021-01-14  781   * @vhost:	ibmvfc host who owns the event pool
dd9c772971485d Lee Jones      2021-03-17  782   * @queue:      ibmvfc queue struct
dd9c772971485d Lee Jones      2021-03-17  783   * @size:       pool size
225acf5f1aba3b Tyrel Datwyler 2021-01-14  784   *
225acf5f1aba3b Tyrel Datwyler 2021-01-14  785   * Returns zero on success.
225acf5f1aba3b Tyrel Datwyler 2021-01-14  786   **/
225acf5f1aba3b Tyrel Datwyler 2021-01-14  787  static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
a7ed558d0b9030 Tyrel Datwyler 2023-09-13  788  				  struct ibmvfc_queue *queue)
225acf5f1aba3b Tyrel Datwyler 2021-01-14 @789  {
225acf5f1aba3b Tyrel Datwyler 2021-01-14  790  	int i;
225acf5f1aba3b Tyrel Datwyler 2021-01-14  791  	struct ibmvfc_event_pool *pool = &queue->evt_pool;
225acf5f1aba3b Tyrel Datwyler 2021-01-14  792  
225acf5f1aba3b Tyrel Datwyler 2021-01-14  793  	ENTER;
a7ed558d0b9030 Tyrel Datwyler 2023-09-13  794  	if (!queue->total_depth)
bb35ecb2a949d9 Tyrel Datwyler 2021-01-14  795  		return 0;
bb35ecb2a949d9 Tyrel Datwyler 2021-01-14  796  
a7ed558d0b9030 Tyrel Datwyler 2023-09-13  797  	pool->size = queue->total_depth;
a7ed558d0b9030 Tyrel Datwyler 2023-09-13  798  	pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
225acf5f1aba3b Tyrel Datwyler 2021-01-14  799  	if (!pool->events)
225acf5f1aba3b Tyrel Datwyler 2021-01-14  800  		return -ENOMEM;
225acf5f1aba3b Tyrel Datwyler 2021-01-14  801  
225acf5f1aba3b Tyrel Datwyler 2021-01-14  802  	pool->iu_storage = dma_alloc_coherent(vhost->dev,
a7ed558d0b9030 Tyrel Datwyler 2023-09-13  803  					      pool->size * sizeof(*pool->iu_storage),
225acf5f1aba3b Tyrel Datwyler 2021-01-14  804  					      &pool->iu_token, 0);
225acf5f1aba3b Tyrel Datwyler 2021-01-14  805  
225acf5f1aba3b Tyrel Datwyler 2021-01-14  806  	if (!pool->iu_storage) {
225acf5f1aba3b Tyrel Datwyler 2021-01-14  807  		kfree(pool->events);
225acf5f1aba3b Tyrel Datwyler 2021-01-14  808  		return -ENOMEM;
225acf5f1aba3b Tyrel Datwyler 2021-01-14  809  	}
225acf5f1aba3b Tyrel Datwyler 2021-01-14  810  
225acf5f1aba3b Tyrel Datwyler 2021-01-14  811  	INIT_LIST_HEAD(&queue->sent);
225acf5f1aba3b Tyrel Datwyler 2021-01-14  812  	INIT_LIST_HEAD(&queue->free);
a7ed558d0b9030 Tyrel Datwyler 2023-09-13  813  	queue->evt_free = queue->evt_depth;
a7ed558d0b9030 Tyrel Datwyler 2023-09-13  814  	queue->reserved_free = queue->reserved_depth;
225acf5f1aba3b Tyrel Datwyler 2021-01-14  815  	spin_lock_init(&queue->l_lock);
225acf5f1aba3b Tyrel Datwyler 2021-01-14  816  
a7ed558d0b9030 Tyrel Datwyler 2023-09-13  817  	for (i = 0; i < pool->size; ++i) {
225acf5f1aba3b Tyrel Datwyler 2021-01-14  818  		struct ibmvfc_event *evt = &pool->events[i];
225acf5f1aba3b Tyrel Datwyler 2021-01-14  819  
a264cf5e81c78e Tyrel Datwyler 2021-07-16  820  		/*
a264cf5e81c78e Tyrel Datwyler 2021-07-16  821  		 * evt->active states
a264cf5e81c78e Tyrel Datwyler 2021-07-16  822  		 *  1 = in flight
a264cf5e81c78e Tyrel Datwyler 2021-07-16  823  		 *  0 = being completed
a264cf5e81c78e Tyrel Datwyler 2021-07-16  824  		 * -1 = free/freed
a264cf5e81c78e Tyrel Datwyler 2021-07-16  825  		 */
a264cf5e81c78e Tyrel Datwyler 2021-07-16  826  		atomic_set(&evt->active, -1);
225acf5f1aba3b Tyrel Datwyler 2021-01-14  827  		atomic_set(&evt->free, 1);
225acf5f1aba3b Tyrel Datwyler 2021-01-14  828  		evt->crq.valid = 0x80;
225acf5f1aba3b Tyrel Datwyler 2021-01-14  829  		evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
225acf5f1aba3b Tyrel Datwyler 2021-01-14  830  		evt->xfer_iu = pool->iu_storage + i;
225acf5f1aba3b Tyrel Datwyler 2021-01-14  831  		evt->vhost = vhost;
225acf5f1aba3b Tyrel Datwyler 2021-01-14  832  		evt->queue = queue;
225acf5f1aba3b Tyrel Datwyler 2021-01-14  833  		evt->ext_list = NULL;
225acf5f1aba3b Tyrel Datwyler 2021-01-14  834  		list_add_tail(&evt->queue_list, &queue->free);
225acf5f1aba3b Tyrel Datwyler 2021-01-14  835  	}
225acf5f1aba3b Tyrel Datwyler 2021-01-14  836  
225acf5f1aba3b Tyrel Datwyler 2021-01-14  837  	LEAVE;
225acf5f1aba3b Tyrel Datwyler 2021-01-14  838  	return 0;
225acf5f1aba3b Tyrel Datwyler 2021-01-14  839  }
225acf5f1aba3b Tyrel Datwyler 2021-01-14  840
diff mbox series

Patch

diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 10435ddddfe5..9cd11cab4f3e 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -38,6 +38,7 @@  static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
 static u64 max_lun = IBMVFC_MAX_LUN;
 static unsigned int max_targets = IBMVFC_MAX_TARGETS;
 static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
+static u16 scsi_qdepth = IBMVFC_SCSI_QDEPTH;
 static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
 static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
 static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
@@ -83,6 +84,9 @@  MODULE_PARM_DESC(default_timeout,
 module_param_named(max_requests, max_requests, uint, S_IRUGO);
 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
 		 "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
+module_param_named(scsi_qdepth, scsi_qdepth, ushort, S_IRUGO);
+MODULE_PARM_DESC(scsi_qdepth, "Maximum scsi command depth per adapter queue. "
+		 "[Default=" __stringify(IBMVFC_SCSI_QDEPTH) "]");
 module_param_named(max_lun, max_lun, ullong, S_IRUGO);
 MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
 		 "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
@@ -781,23 +785,22 @@  static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
  * Returns zero on success.
  **/
 static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
-				  struct ibmvfc_queue *queue,
-				  unsigned int size)
+				  struct ibmvfc_queue *queue)
 {
 	int i;
 	struct ibmvfc_event_pool *pool = &queue->evt_pool;
 
 	ENTER;
-	if (!size)
+	if (!queue->total_depth)
 		return 0;
 
-	pool->size = size;
-	pool->events = kcalloc(size, sizeof(*pool->events), GFP_KERNEL);
+	pool->size = queue->total_depth;
+	pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
 	if (!pool->events)
 		return -ENOMEM;
 
 	pool->iu_storage = dma_alloc_coherent(vhost->dev,
-					      size * sizeof(*pool->iu_storage),
+					      pool->size * sizeof(*pool->iu_storage),
 					      &pool->iu_token, 0);
 
 	if (!pool->iu_storage) {
@@ -807,9 +810,11 @@  static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
 
 	INIT_LIST_HEAD(&queue->sent);
 	INIT_LIST_HEAD(&queue->free);
+	queue->evt_free = queue->evt_depth;
+	queue->reserved_free = queue->reserved_depth;
 	spin_lock_init(&queue->l_lock);
 
-	for (i = 0; i < size; ++i) {
+	for (i = 0; i < pool->size; ++i) {
 		struct ibmvfc_event *evt = &pool->events[i];
 
 		/*
@@ -1033,6 +1038,12 @@  static void ibmvfc_free_event(struct ibmvfc_event *evt)
 
 	spin_lock_irqsave(&evt->queue->l_lock, flags);
 	list_add_tail(&evt->queue_list, &evt->queue->free);
+	if (evt->reserved) {
+		evt->reserved = 0;
+		evt->queue->reserved_free++;
+	} else {
+		evt->queue->evt_free++;
+	}
 	if (evt->eh_comp)
 		complete(evt->eh_comp);
 	spin_unlock_irqrestore(&evt->queue->l_lock, flags);
@@ -1475,6 +1486,12 @@  static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
 	struct ibmvfc_queue *async_crq = &vhost->async_crq;
 	struct device_node *of_node = vhost->dev->of_node;
 	const char *location;
+	u16 max_cmds;
+
+	max_cmds = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ;
+	if (mq_enabled)
+		max_cmds += (scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ) *
+			vhost->client_scsi_channels;
 
 	memset(login_info, 0, sizeof(*login_info));
 
@@ -1489,7 +1506,7 @@  static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
 	if (vhost->client_migrated)
 		login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
 
-	login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ);
+	login_info->max_cmds = cpu_to_be32(max_cmds);
 	login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
 
 	if (vhost->mq_enabled || vhost->using_channels)
@@ -1513,24 +1530,33 @@  static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
  *
  * Returns a free event from the pool.
  **/
-static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue)
+static struct ibmvfc_event *__ibmvfc_get_event(struct ibmvfc_queue *queue, int reserved)
 {
-	struct ibmvfc_event *evt;
+	struct ibmvfc_event *evt = NULL;
 	unsigned long flags;
 
 	spin_lock_irqsave(&queue->l_lock, flags);
-	if (list_empty(&queue->free)) {
-		ibmvfc_log(queue->vhost, 4, "empty event pool on queue:%ld\n", queue->hwq_id);
-		spin_unlock_irqrestore(&queue->l_lock, flags);
-		return NULL;
+	if (reserved && queue->reserved_free) {
+		evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
+		evt->reserved = 1;
+		queue->reserved_free--;
+	} else if (queue->evt_free) {
+		evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
+		queue->evt_free--;
+	} else {
+		goto out;
 	}
-	evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
+
 	atomic_set(&evt->free, 0);
 	list_del(&evt->queue_list);
+out:
 	spin_unlock_irqrestore(&queue->l_lock, flags);
 	return evt;
 }
 
+#define ibmvfc_get_event(queue) __ibmvfc_get_event(queue, 0)
+#define ibmvfc_get_reserved_event(queue) __ibmvfc_get_event(queue, 1)
+
 /**
  * ibmvfc_locked_done - Calls evt completion with host_lock held
  * @evt:	ibmvfc evt to complete
@@ -2047,7 +2073,7 @@  static int ibmvfc_bsg_timeout(struct bsg_job *job)
 	}
 
 	vhost->aborting_passthru = 1;
-	evt = ibmvfc_get_event(&vhost->crq);
+	evt = ibmvfc_get_reserved_event(&vhost->crq);
 	if (!evt) {
 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
 		return -ENOMEM;
@@ -2110,7 +2136,7 @@  static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
 	if (unlikely((rc = ibmvfc_host_chkready(vhost))))
 		goto unlock_out;
 
-	evt = ibmvfc_get_event(&vhost->crq);
+	evt = ibmvfc_get_reserved_event(&vhost->crq);
 	if (!evt) {
 		rc = -ENOMEM;
 		goto unlock_out;
@@ -2232,7 +2258,7 @@  static int ibmvfc_bsg_request(struct bsg_job *job)
 		goto out;
 	}
 
-	evt = ibmvfc_get_event(&vhost->crq);
+	evt = ibmvfc_get_reserved_event(&vhost->crq);
 	if (!evt) {
 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
 		rc = -ENOMEM;
@@ -2533,7 +2559,7 @@  static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
 	struct ibmvfc_event *evt;
 	struct ibmvfc_tmf *tmf;
 
-	evt = ibmvfc_get_event(queue);
+	evt = ibmvfc_get_reserved_event(queue);
 	if (!evt)
 		return NULL;
 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
@@ -3673,7 +3699,6 @@  static const struct scsi_host_template driver_template = {
 	.max_sectors = IBMVFC_MAX_SECTORS,
 	.shost_groups = ibmvfc_host_groups,
 	.track_queue_depth = 1,
-	.host_tagset = 1,
 };
 
 /**
@@ -4071,7 +4096,7 @@  static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
 		return;
 
 	kref_get(&tgt->kref);
-	evt = ibmvfc_get_event(&vhost->crq);
+	evt = ibmvfc_get_reserved_event(&vhost->crq);
 	if (!evt) {
 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
 		kref_put(&tgt->kref, ibmvfc_release_tgt);
@@ -4184,7 +4209,7 @@  static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
 
 	kref_get(&tgt->kref);
 	tgt->logo_rcvd = 0;
-	evt = ibmvfc_get_event(&vhost->crq);
+	evt = ibmvfc_get_reserved_event(&vhost->crq);
 	if (!evt) {
 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
 		kref_put(&tgt->kref, ibmvfc_release_tgt);
@@ -4266,7 +4291,7 @@  static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_t
 	struct ibmvfc_event *evt;
 
 	kref_get(&tgt->kref);
-	evt = ibmvfc_get_event(&vhost->crq);
+	evt = ibmvfc_get_reserved_event(&vhost->crq);
 	if (!evt)
 		return NULL;
 	ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
@@ -4441,7 +4466,7 @@  static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
 		return;
 
 	kref_get(&tgt->kref);
-	evt = ibmvfc_get_event(&vhost->crq);
+	evt = ibmvfc_get_reserved_event(&vhost->crq);
 	if (!evt) {
 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 		kref_put(&tgt->kref, ibmvfc_release_tgt);
@@ -4613,7 +4638,7 @@  static void ibmvfc_adisc_timeout(struct timer_list *t)
 
 	vhost->abort_threads++;
 	kref_get(&tgt->kref);
-	evt = ibmvfc_get_event(&vhost->crq);
+	evt = ibmvfc_get_reserved_event(&vhost->crq);
 	if (!evt) {
 		tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
 		vhost->abort_threads--;
@@ -4671,7 +4696,7 @@  static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
 		return;
 
 	kref_get(&tgt->kref);
-	evt = ibmvfc_get_event(&vhost->crq);
+	evt = ibmvfc_get_reserved_event(&vhost->crq);
 	if (!evt) {
 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
 		kref_put(&tgt->kref, ibmvfc_release_tgt);
@@ -4780,7 +4805,7 @@  static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
 		return;
 
 	kref_get(&tgt->kref);
-	evt = ibmvfc_get_event(&vhost->crq);
+	evt = ibmvfc_get_reserved_event(&vhost->crq);
 	if (!evt) {
 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
 		kref_put(&tgt->kref, ibmvfc_release_tgt);
@@ -4958,7 +4983,7 @@  static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
 static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
 {
 	struct ibmvfc_discover_targets *mad;
-	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
+	struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
 
 	if (!evt) {
@@ -5039,7 +5064,7 @@  static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
 {
 	struct ibmvfc_channel_setup_mad *mad;
 	struct ibmvfc_channel_setup *setup_buf = vhost->channel_setup_buf;
-	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
+	struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
 	struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
 	unsigned int num_channels =
 		min(vhost->client_scsi_channels, vhost->max_vios_scsi_channels);
@@ -5112,7 +5137,7 @@  static void ibmvfc_channel_enquiry_done(struct ibmvfc_event *evt)
 static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
 {
 	struct ibmvfc_channel_enquiry *mad;
-	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
+	struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
 
 	if (!evt) {
@@ -5240,7 +5265,7 @@  static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
 static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
 {
 	struct ibmvfc_npiv_login_mad *mad;
-	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
+	struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
 
 	if (!evt) {
 		ibmvfc_dbg(vhost, "NPIV Login failed: no available events\n");
@@ -5311,7 +5336,7 @@  static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
 	struct ibmvfc_npiv_logout_mad *mad;
 	struct ibmvfc_event *evt;
 
-	evt = ibmvfc_get_event(&vhost->crq);
+	evt = ibmvfc_get_reserved_event(&vhost->crq);
 	if (!evt) {
 		ibmvfc_dbg(vhost, "NPIV Logout failed: no available events\n");
 		ibmvfc_hard_reset_host(vhost);
@@ -5765,7 +5790,6 @@  static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
 {
 	struct device *dev = vhost->dev;
 	size_t fmt_size;
-	unsigned int pool_size = 0;
 
 	ENTER;
 	spin_lock_init(&queue->_lock);
@@ -5774,7 +5798,9 @@  static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
 	switch (fmt) {
 	case IBMVFC_CRQ_FMT:
 		fmt_size = sizeof(*queue->msgs.crq);
-		pool_size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
+		queue->total_depth = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ;
+		queue->evt_depth = scsi_qdepth;
+		queue->reserved_depth = IBMVFC_NUM_INTERNAL_REQ;
 		break;
 	case IBMVFC_ASYNC_FMT:
 		fmt_size = sizeof(*queue->msgs.async);
@@ -5782,14 +5808,17 @@  static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
 	case IBMVFC_SUB_CRQ_FMT:
 		fmt_size = sizeof(*queue->msgs.scrq);
 		/* We need one extra event for Cancel Commands */
-		pool_size = max_requests + 1;
+		queue->total_depth = scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ;
+		queue->evt_depth = scsi_qdepth;
+		queue->reserved_depth = IBMVFC_NUM_INTERNAL_SUBQ_REQ;
 		break;
 	default:
 		dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt);
 		return -EINVAL;
 	}
 
-	if (ibmvfc_init_event_pool(vhost, queue, pool_size)) {
+	queue->fmt = fmt;
+	if (ibmvfc_init_event_pool(vhost, queue)) {
 		dev_err(dev, "Couldn't initialize event pool.\n");
 		return -ENOMEM;
 	}
@@ -5808,7 +5837,6 @@  static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
 	}
 
 	queue->cur = 0;
-	queue->fmt = fmt;
 	queue->size = PAGE_SIZE / fmt_size;
 
 	queue->vhost = vhost;
@@ -6243,7 +6271,7 @@  static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 	}
 
 	shost->transportt = ibmvfc_transport_template;
-	shost->can_queue = max_requests;
+	shost->can_queue = scsi_qdepth;
 	shost->max_lun = max_lun;
 	shost->max_id = max_targets;
 	shost->max_sectors = IBMVFC_MAX_SECTORS;
@@ -6402,7 +6430,9 @@  static int ibmvfc_resume(struct device *dev)
  */
 static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
 {
-	unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
+	unsigned long pool_dma;
+
+	pool_dma = (IBMVFC_MAX_SCSI_QUEUES * scsi_qdepth) * sizeof(union ibmvfc_iu);
 	return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
 }
 
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index c39a245f43d0..0e641a880e1c 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -27,6 +27,7 @@ 
 #define IBMVFC_ABORT_TIMEOUT		8
 #define IBMVFC_ABORT_WAIT_TIMEOUT	40
 #define IBMVFC_MAX_REQUESTS_DEFAULT	100
+#define IBMVFC_SCSI_QDEPTH		128
 
 #define IBMVFC_DEBUG			0
 #define IBMVFC_MAX_TARGETS		1024
@@ -57,6 +58,8 @@ 
  * 2 for each discovery thread
  */
 #define IBMVFC_NUM_INTERNAL_REQ	(1 + 1 + 1 + 2 + (disc_threads * 2))
+/* Reserved suset of events for cancelling channelized IO commands */
+#define IBMVFC_NUM_INTERNAL_SUBQ_REQ 4
 
 #define IBMVFC_MAD_SUCCESS		0x00
 #define IBMVFC_MAD_NOT_SUPPORTED	0xF1
@@ -758,6 +761,7 @@  struct ibmvfc_event {
 	struct completion *eh_comp;
 	struct timer_list timer;
 	u16 hwq;
+	u8 reserved;
 };
 
 /* a pool of event structs for use */
@@ -793,6 +797,11 @@  struct ibmvfc_queue {
 	struct ibmvfc_event_pool evt_pool;
 	struct list_head sent;
 	struct list_head free;
+	u16 total_depth;
+	u16 evt_depth;
+	u16 reserved_depth;
+	u16 evt_free;
+	u16 reserved_free;
 	spinlock_t l_lock;
 
 	union ibmvfc_iu cancel_rsp;