From patchwork Wed Aug 12 20:54:04 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jim Gill X-Patchwork-Id: 11711339 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 5321B14E3 for ; Wed, 12 Aug 2020 20:54:06 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 3F6C02076C for ; Wed, 12 Aug 2020 20:54:06 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726528AbgHLUyF (ORCPT ); Wed, 12 Aug 2020 16:54:05 -0400 Received: from ex13-edg-ou-001.vmware.com ([208.91.0.189]:54951 "EHLO EX13-EDG-OU-001.vmware.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726282AbgHLUyF (ORCPT ); Wed, 12 Aug 2020 16:54:05 -0400 Received: from sc9-mailhost2.vmware.com (10.113.161.72) by EX13-EDG-OU-001.vmware.com (10.113.208.155) with Microsoft SMTP Server id 15.0.1156.6; Wed, 12 Aug 2020 13:54:01 -0700 Received: from petr-dev3.eng.vmware.com (petr-dev2.eng.vmware.com [10.20.78.5]) by sc9-mailhost2.vmware.com (Postfix) with ESMTP id 98D62B24B6; Wed, 12 Aug 2020 16:54:04 -0400 (EDT) Received: by petr-dev3.eng.vmware.com (Postfix, from userid 1078) id 9184DA00680; Wed, 12 Aug 2020 13:54:04 -0700 (PDT) Date: Wed, 12 Aug 2020 13:54:04 -0700 From: Jim Gill To: CC: , , , Subject: [PATCH 1/3 for-next] pvscsi: Use coherent memory instead of dma mapping sg lists Message-ID: <20200812205404.GA17846@petr-dev3.eng.vmware.com> MIME-Version: 1.0 Content-Disposition: inline Received-SPF: None (EX13-EDG-OU-001.vmware.com: jgill@vmware.com does not designate permitted sender hosts) Sender: linux-scsi-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-scsi@vger.kernel.org Use coherent memory instead of dma mapping sg lists each time they are used. This becomes important with SEV/swiotlb where dma mapping otherwise implies bouncing of the data. It also gets rid of a point of potential failure. Tested using a "bonnie++" run on an 8GB pvscsi disk on a swiotlb=force booted kernel. Signed-off-by: Thomas Hellstrom [jgill@vmware.com: forwarding patch on behalf of thellstrom] Acked-by: jgill@vmware.com --- drivers/scsi/vmw_pvscsi.c | 48 +++++++++++++++++++++++------------------------ 1 file changed, 23 insertions(+), 25 deletions(-) diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 8dbb4db..0573e94 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -98,6 +99,8 @@ struct pvscsi_adapter { struct list_head cmd_pool; struct pvscsi_ctx *cmd_map; + + struct dma_pool *sg_pool; }; @@ -372,15 +375,6 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter, pvscsi_create_sg(ctx, sg, segs); e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST; - ctx->sglPA = dma_map_single(&adapter->dev->dev, - ctx->sgl, SGL_SIZE, DMA_TO_DEVICE); - if (dma_mapping_error(&adapter->dev->dev, ctx->sglPA)) { - scmd_printk(KERN_ERR, cmd, - "vmw_pvscsi: Failed to map ctx sglist for DMA.\n"); - scsi_dma_unmap(cmd); - ctx->sglPA = 0; - return -ENOMEM; - } e->dataAddr = ctx->sglPA; } else e->dataAddr = sg_dma_address(sg); @@ -425,14 +419,9 @@ static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter, if (bufflen != 0) { unsigned count = scsi_sg_count(cmd); - if (count != 0) { + if (count != 0) scsi_dma_unmap(cmd); - if (ctx->sglPA) { - dma_unmap_single(&adapter->dev->dev, ctx->sglPA, - SGL_SIZE, DMA_TO_DEVICE); - ctx->sglPA = 0; - } - } else + else dma_unmap_single(&adapter->dev->dev, ctx->dataPA, bufflen, cmd->sc_data_direction); } @@ -1206,7 +1195,9 @@ static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter) unsigned i; for (i = 0; i < adapter->req_depth; ++i, ++ctx) - free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE)); + dma_pool_free(adapter->sg_pool, ctx->sgl, ctx->sglPA); + + dma_pool_destroy(adapter->sg_pool); } static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter) @@ -1225,10 +1216,11 @@ static void pvscsi_release_resources(struct pvscsi_adapter *adapter) pci_release_regions(adapter->dev); - if (adapter->cmd_map) { + if (adapter->sg_pool) pvscsi_free_sgls(adapter); + + if (adapter->cmd_map) kfree(adapter->cmd_map); - } if (adapter->rings_state) dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, @@ -1268,20 +1260,26 @@ static int pvscsi_allocate_sg(struct pvscsi_adapter *adapter) struct pvscsi_ctx *ctx; int i; + /* Use a dma pool so that we can impose alignment constraints. */ + adapter->sg_pool = dma_pool_create("pvscsi_sg", pvscsi_dev(adapter), + SGL_SIZE, PAGE_SIZE, 0); + if (!adapter->sg_pool) + return -ENOMEM; + ctx = adapter->cmd_map; BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE); for (i = 0; i < adapter->req_depth; ++i, ++ctx) { - ctx->sgl = (void *)__get_free_pages(GFP_KERNEL, - get_order(SGL_SIZE)); - ctx->sglPA = 0; - BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE)); + ctx->sgl = dma_pool_alloc(adapter->sg_pool, GFP_KERNEL, + &ctx->sglPA); if (!ctx->sgl) { for (; i >= 0; --i, --ctx) { - free_pages((unsigned long)ctx->sgl, - get_order(SGL_SIZE)); + dma_pool_free(adapter->sg_pool, ctx->sgl, + ctx->sglPA); ctx->sgl = NULL; } + dma_pool_destroy(adapter->sg_pool); + adapter->sg_pool = NULL; return -ENOMEM; } } From patchwork Wed Aug 12 20:55:02 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jim Gill X-Patchwork-Id: 11711341 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 82A62618 for ; Wed, 12 Aug 2020 20:55:04 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 69BC720774 for ; Wed, 12 Aug 2020 20:55:04 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726564AbgHLUzE (ORCPT ); Wed, 12 Aug 2020 16:55:04 -0400 Received: from ex13-edg-ou-002.vmware.com ([208.91.0.190]:31619 "EHLO EX13-EDG-OU-002.vmware.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726282AbgHLUzE (ORCPT ); Wed, 12 Aug 2020 16:55:04 -0400 Received: from sc9-mailhost2.vmware.com (10.113.161.72) by EX13-EDG-OU-002.vmware.com (10.113.208.156) with Microsoft SMTP Server id 15.0.1156.6; Wed, 12 Aug 2020 13:54:59 -0700 Received: from petr-dev3.eng.vmware.com (petr-dev2.eng.vmware.com [10.20.78.5]) by sc9-mailhost2.vmware.com (Postfix) with ESMTP id E672CB2441; Wed, 12 Aug 2020 16:55:02 -0400 (EDT) Received: by petr-dev3.eng.vmware.com (Postfix, from userid 1078) id E136BA00680; Wed, 12 Aug 2020 13:55:02 -0700 (PDT) Date: Wed, 12 Aug 2020 13:55:02 -0700 From: Jim Gill To: CC: , , , Subject: [PATCH 2/3 for-next] pvscsi: Limit ring pages for swiotlb Message-ID: <20200812205502.GA18382@petr-dev3.eng.vmware.com> MIME-Version: 1.0 Content-Disposition: inline Received-SPF: None (EX13-EDG-OU-002.vmware.com: jgill@vmware.com does not designate permitted sender hosts) Sender: linux-scsi-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-scsi@vger.kernel.org A large number of outstanding scsi commands can completely fill up the allowable DMA size. Typically this happens with SWIOTLB and SEV encryption active. While this is harmless for the scsi middle layer, it floods the kernel log with error messages and can cause other device drivers to error. Reduce the number of ring pages to 1 if we detect DMA size restrictions. Signed-off-by: Thomas Hellstrom [jgill@vmware.com: Forwarding patch on behalf of thellstrom] Acked-by: jgill@vmware.com --- drivers/scsi/vmw_pvscsi.c | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 0573e94..fa2748f 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -45,6 +46,7 @@ MODULE_LICENSE("GPL"); MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING); #define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8 +#define PVSCSI_RESTRICT_NUM_PAGES_PER_RING 1 #define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1 #define PVSCSI_DEFAULT_QUEUE_DEPTH 254 #define SGL_SIZE PAGE_SIZE @@ -1416,14 +1418,26 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) max_id = pvscsi_get_max_targets(adapter); printk(KERN_INFO "vmw_pvscsi: max_id: %u\n", max_id); - if (pvscsi_ring_pages == 0) - /* - * Set the right default value. Up to 16 it is 8, above it is - * max. - */ - pvscsi_ring_pages = (max_id > 16) ? - PVSCSI_SETUP_RINGS_MAX_NUM_PAGES : - PVSCSI_DEFAULT_NUM_PAGES_PER_RING; + if (pvscsi_ring_pages == 0) { + struct sysinfo si; + + si_meminfo(&si); + if (mem_encrypt_active()) + /* + * There are DMA size restrictions. Reduce the queue + * depth to try to avoid exhausting DMA size and trigger + * dma mapping errors. + */ + pvscsi_ring_pages = PVSCSI_RESTRICT_NUM_PAGES_PER_RING; + else + /* + * Set the right default value. Up to 16 it is 8, + * above it is max. + */ + pvscsi_ring_pages = (max_id > 16) ? + PVSCSI_SETUP_RINGS_MAX_NUM_PAGES : + PVSCSI_DEFAULT_NUM_PAGES_PER_RING; + } printk(KERN_INFO "vmw_pvscsi: setting ring_pages to %d\n", pvscsi_ring_pages); From patchwork Wed Aug 12 20:56:15 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jim Gill X-Patchwork-Id: 11711343 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 0B19A618 for ; Wed, 12 Aug 2020 20:56:16 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id E66672076C for ; Wed, 12 Aug 2020 20:56:15 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726542AbgHLU4P (ORCPT ); Wed, 12 Aug 2020 16:56:15 -0400 Received: from ex13-edg-ou-002.vmware.com ([208.91.0.190]:26277 "EHLO EX13-EDG-OU-002.vmware.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726282AbgHLU4P (ORCPT ); Wed, 12 Aug 2020 16:56:15 -0400 Received: from sc9-mailhost3.vmware.com (10.113.161.73) by EX13-EDG-OU-002.vmware.com (10.113.208.156) with Microsoft SMTP Server id 15.0.1156.6; Wed, 12 Aug 2020 13:56:12 -0700 Received: from petr-dev3.eng.vmware.com (petr-dev2.eng.vmware.com [10.20.78.5]) by sc9-mailhost3.vmware.com (Postfix) with ESMTP id 3273E40097; Wed, 12 Aug 2020 13:56:15 -0700 (PDT) Received: by petr-dev3.eng.vmware.com (Postfix, from userid 1078) id 2E025A00680; Wed, 12 Aug 2020 13:56:15 -0700 (PDT) Date: Wed, 12 Aug 2020 13:56:15 -0700 From: Jim Gill To: CC: , , , Subject: [PATCH 3/3 for-next] pvscsi: Fix uninitialized sense buffer with swiotlb Message-ID: <20200812205615.GA18423@petr-dev3.eng.vmware.com> MIME-Version: 1.0 Content-Disposition: inline Received-SPF: None (EX13-EDG-OU-002.vmware.com: jgill@vmware.com does not designate permitted sender hosts) Sender: linux-scsi-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-scsi@vger.kernel.org It seems like the device sometimes leaves part of the sense buffer uninitialized. This causes massive problems with swiotlb where any previous initialization of the sense buffer by the scsi middle-layer is not propagated to the device since we use DMA_FROM_DEVICE when dma-mapping the sense buffer. Fix this by specifying DMA_BIDIRECTIONAL instead. Makes the scsi errors go away. Tested using a bonnie++ run on an swiotlb=force booted kernel. Signed-off-by: Thomas Hellstrom [jgill@vmware.com: Forwarding patch on behalf of thellstrom] Acked-by: jgill@vmware.com --- drivers/scsi/vmw_pvscsi.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index fa2748f..c179a5d 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -429,7 +429,7 @@ static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter, } if (cmd->sense_buffer) dma_unmap_single(&adapter->dev->dev, ctx->sensePA, - SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); + SCSI_SENSE_BUFFERSIZE, DMA_BIDIRECTIONAL); } static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter) @@ -714,7 +714,7 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter, if (cmd->sense_buffer) { ctx->sensePA = dma_map_single(&adapter->dev->dev, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, - DMA_FROM_DEVICE); + DMA_BIDIRECTIONAL); if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) { scmd_printk(KERN_DEBUG, cmd, "vmw_pvscsi: Failed to map sense buffer for DMA.\n"); @@ -746,7 +746,7 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter, if (cmd->sense_buffer) { dma_unmap_single(&adapter->dev->dev, ctx->sensePA, SCSI_SENSE_BUFFERSIZE, - DMA_FROM_DEVICE); + DMA_BIDIRECTIONAL); ctx->sensePA = 0; } return -ENOMEM;