diff mbox series

[v7,2/8] cxl/mem: Wire up event interrupts

Message ID 20221216-cxl-ev-log-v7-2-2316a5c8f7d8@intel.com (mailing list archive)
State Handled Elsewhere, archived
Headers show
Series cxl: Process event logs | expand

Commit Message

Ira Weiny Jan. 18, 2023, 5:53 a.m. UTC
From: Davidlohr Bueso <dave@stgolabs.net>

Currently the only CXL features targeted for irq support require their
message numbers to be within the first 16 entries.  The device may
however support less than 16 entries depending on the support it
provides.

Attempt to allocate these 16 irq vectors.  If the device supports less
then the PCI infrastructure will allocate that number.  Upon successful
allocation, users can plug in their respective isr at any point
thereafter.

CXL device events are signaled via interrupts.  Each event log may have
a different interrupt message number.  These message numbers are
reported in the Get Event Interrupt Policy mailbox command.

Add interrupt support for event logs.  Interrupts are allocated as
shared interrupts.  Therefore, all or some event logs can share the same
message number.

In addition all logs are queried on any interrupt in order of the most
to least severe based on the status register.

Finally place all event configuration logic into cxl_event_config().
Previously the logic was a simple 'read all' on start up.  But
interrupts must be configured prior to any reads to ensure no events are
missed.  A single event configuration function results in a cleaner over
all implementation.

Cc: Bjorn Helgaas <helgaas@kernel.org>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Co-developed-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
---
Changes in v7:
	<no change>
---
 drivers/cxl/cxl.h    |   4 +
 drivers/cxl/cxlmem.h |  19 +++++
 drivers/cxl/cxlpci.h |   6 ++
 drivers/cxl/pci.c    | 221 ++++++++++++++++++++++++++++++++++++++++++++++++---
 4 files changed, 240 insertions(+), 10 deletions(-)

Comments

Dan Williams Jan. 26, 2023, 10 p.m. UTC | #1
ira.weiny@ wrote:
> From: Davidlohr Bueso <dave@stgolabs.net>
> 
> Currently the only CXL features targeted for irq support require their
> message numbers to be within the first 16 entries.  The device may
> however support less than 16 entries depending on the support it
> provides.
> 
> Attempt to allocate these 16 irq vectors.  If the device supports less
> then the PCI infrastructure will allocate that number.  Upon successful
> allocation, users can plug in their respective isr at any point
> thereafter.
> 
> CXL device events are signaled via interrupts.  Each event log may have
> a different interrupt message number.  These message numbers are
> reported in the Get Event Interrupt Policy mailbox command.
> 
> Add interrupt support for event logs.  Interrupts are allocated as
> shared interrupts.  Therefore, all or some event logs can share the same
> message number.
> 
> In addition all logs are queried on any interrupt in order of the most
> to least severe based on the status register.
> 
> Finally place all event configuration logic into cxl_event_config().
> Previously the logic was a simple 'read all' on start up.  But
> interrupts must be configured prior to any reads to ensure no events are
> missed.  A single event configuration function results in a cleaner over
> all implementation.
> 
> Cc: Bjorn Helgaas <helgaas@kernel.org>
> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
> Co-developed-by: Ira Weiny <ira.weiny@intel.com>
> Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
> Signed-off-by: Ira Weiny <ira.weiny@intel.com>
> ---
> Changes in v7:
> 	<no change>
> ---
>  drivers/cxl/cxl.h    |   4 +
>  drivers/cxl/cxlmem.h |  19 +++++
>  drivers/cxl/cxlpci.h |   6 ++
>  drivers/cxl/pci.c    | 221 ++++++++++++++++++++++++++++++++++++++++++++++++---
>  4 files changed, 240 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
> index 5974d1082210..b3964149c77b 100644
> --- a/drivers/cxl/cxl.h
> +++ b/drivers/cxl/cxl.h
> @@ -168,6 +168,10 @@ static inline int ways_to_eiw(unsigned int ways, u8 *eiw)
>  				 CXLDEV_EVENT_STATUS_FAIL |	\
>  				 CXLDEV_EVENT_STATUS_FATAL)
>  
> +/* CXL rev 3.0 section 8.2.9.2.4; Table 8-52 */
> +#define CXLDEV_EVENT_INT_MODE_MASK	GENMASK(1, 0)
> +#define CXLDEV_EVENT_INT_MSGNUM_MASK	GENMASK(7, 4)
> +
>  /* CXL 2.0 8.2.8.4 Mailbox Registers */
>  #define CXLDEV_MBOX_CAPS_OFFSET 0x00
>  #define   CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK GENMASK(4, 0)
> diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
> index 0f272206e294..5bef485cd8d9 100644
> --- a/drivers/cxl/cxlmem.h
> +++ b/drivers/cxl/cxlmem.h
> @@ -194,6 +194,23 @@ struct cxl_endpoint_dvsec_info {
>  	struct range dvsec_range[2];
>  };
>  
> +/**
> + * Event Interrupt Policy
> + *
> + * CXL rev 3.0 section 8.2.9.2.4; Table 8-52
> + */
> +enum cxl_event_int_mode {
> +	CXL_INT_NONE		= 0x00,
> +	CXL_INT_MSI_MSIX	= 0x01,
> +	CXL_INT_FW		= 0x02
> +};
> +struct cxl_event_interrupt_policy {
> +	u8 info_settings;
> +	u8 warn_settings;
> +	u8 failure_settings;
> +	u8 fatal_settings;
> +} __packed;
> +
>  /**
>   * struct cxl_event_state - Event log driver state
>   *
> @@ -288,6 +305,8 @@ enum cxl_opcode {
>  	CXL_MBOX_OP_RAW			= CXL_MBOX_OP_INVALID,
>  	CXL_MBOX_OP_GET_EVENT_RECORD	= 0x0100,
>  	CXL_MBOX_OP_CLEAR_EVENT_RECORD	= 0x0101,
> +	CXL_MBOX_OP_GET_EVT_INT_POLICY	= 0x0102,
> +	CXL_MBOX_OP_SET_EVT_INT_POLICY	= 0x0103,
>  	CXL_MBOX_OP_GET_FW_INFO		= 0x0200,
>  	CXL_MBOX_OP_ACTIVATE_FW		= 0x0202,
>  	CXL_MBOX_OP_GET_SUPPORTED_LOGS	= 0x0400,
> diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
> index 77dbdb980b12..a8ea04f536ab 100644
> --- a/drivers/cxl/cxlpci.h
> +++ b/drivers/cxl/cxlpci.h
> @@ -53,6 +53,12 @@
>  #define	    CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK			GENMASK(15, 8)
>  #define     CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK		GENMASK(31, 16)
>  
> +/*
> + * NOTE: Currently all the functions which are enabled for CXL require their
> + * vectors to be in the first 16.  Use this as the default max.
> + */
> +#define CXL_PCI_DEFAULT_MAX_VECTORS 16
> +
>  /* Register Block Identifier (RBI) */
>  enum cxl_regloc_type {
>  	CXL_REGLOC_RBI_EMPTY = 0,
> diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
> index 0fd7742ee70c..a4ee073fd16d 100644
> --- a/drivers/cxl/pci.c
> +++ b/drivers/cxl/pci.c
> @@ -447,6 +447,209 @@ static int cxl_mem_alloc_event_buf(struct cxl_dev_state *cxlds)
>  	return 0;
>  }
>  
> +static int cxl_alloc_irq_vectors(struct pci_dev *pdev)
> +{
> +	int nvecs;
> +
> +	/*
> +	 * Per CXL 3.0 3.1.1 CXL.io Endpoint a function on a CXL device must
> +	 * not generate INTx messages if that function participates in
> +	 * CXL.cache or CXL.mem.
> +	 *
> +	 * Additionally pci_alloc_irq_vectors() handles calling
> +	 * pci_free_irq_vectors() automatically despite not being called
> +	 * pcim_*.  See pci_setup_msi_context().
> +	 */
> +	nvecs = pci_alloc_irq_vectors(pdev, 1, CXL_PCI_DEFAULT_MAX_VECTORS,
> +				      PCI_IRQ_MSIX | PCI_IRQ_MSI);
> +	if (nvecs < 1) {
> +		dev_dbg(&pdev->dev, "Failed to alloc irq vectors: %d\n", nvecs);
> +		return -ENXIO;
> +	}
> +	return 0;
> +}
> +
> +struct cxl_dev_id {
> +	struct cxl_dev_state *cxlds;
> +};
> +
> +static irqreturn_t cxl_event_thread(int irq, void *id)
> +{
> +	struct cxl_dev_id *dev_id = id;
> +	struct cxl_dev_state *cxlds = dev_id->cxlds;
> +	u32 status;
> +
> +	do {
> +		/*
> +		 * CXL 3.0 8.2.8.3.1: The lower 32 bits are the status;
> +		 * ignore the reserved upper 32 bits
> +		 */
> +		status = readl(cxlds->regs.status + CXLDEV_DEV_EVENT_STATUS_OFFSET);
> +		/* Ignore logs unknown to the driver */
> +		status &= CXLDEV_EVENT_STATUS_ALL;
> +		if (!status)
> +			break;
> +		cxl_mem_get_event_records(cxlds, status);
> +		cond_resched();
> +	} while (status);
> +
> +	return IRQ_HANDLED;
> +}
> +
> +static int cxl_event_req_irq(struct cxl_dev_state *cxlds, u8 setting)
> +{
> +	struct device *dev = cxlds->dev;
> +	struct pci_dev *pdev = to_pci_dev(dev);
> +	struct cxl_dev_id *dev_id;
> +	int irq;
> +
> +	if (FIELD_GET(CXLDEV_EVENT_INT_MODE_MASK, setting) != CXL_INT_MSI_MSIX)
> +		return -ENXIO;
> +
> +	/* dev_id must be globally unique and must contain the cxlds */
> +	dev_id = devm_kzalloc(dev, sizeof(*dev_id), GFP_KERNEL);
> +	if (!dev_id)
> +		return -ENOMEM;
> +	dev_id->cxlds = cxlds;
> +
> +	irq =  pci_irq_vector(pdev,
> +			      FIELD_GET(CXLDEV_EVENT_INT_MSGNUM_MASK, setting));
> +	if (irq < 0)
> +		return irq;
> +
> +	return devm_request_threaded_irq(dev, irq, NULL, cxl_event_thread,
> +					 IRQF_SHARED, NULL, dev_id);
> +}
> +
> +static int cxl_event_get_int_policy(struct cxl_dev_state *cxlds,
> +				    struct cxl_event_interrupt_policy *policy)
> +{
> +	struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) {

At declaration time no need for the cast, I'll zap it on applying.

Rest of this patch looks good me.
Dave Jiang May 9, 2023, 9:42 p.m. UTC | #2
On 1/17/23 10:53 PM, ira.weiny@intel.com wrote:
> From: Davidlohr Bueso <dave@stgolabs.net>
> 
> Currently the only CXL features targeted for irq support require their
> message numbers to be within the first 16 entries.  The device may
> however support less than 16 entries depending on the support it
> provides.
> 
> Attempt to allocate these 16 irq vectors.  If the device supports less
> then the PCI infrastructure will allocate that number.  Upon successful
> allocation, users can plug in their respective isr at any point
> thereafter.
> 
> CXL device events are signaled via interrupts.  Each event log may have
> a different interrupt message number.  These message numbers are
> reported in the Get Event Interrupt Policy mailbox command.
> 
> Add interrupt support for event logs.  Interrupts are allocated as
> shared interrupts.  Therefore, all or some event logs can share the same
> message number.
> 
> In addition all logs are queried on any interrupt in order of the most
> to least severe based on the status register.
> 
> Finally place all event configuration logic into cxl_event_config().
> Previously the logic was a simple 'read all' on start up.  But
> interrupts must be configured prior to any reads to ensure no events are
> missed.  A single event configuration function results in a cleaner over
> all implementation.
> 
> Cc: Bjorn Helgaas <helgaas@kernel.org>
> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
> Co-developed-by: Ira Weiny <ira.weiny@intel.com>
> Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
> Signed-off-by: Ira Weiny <ira.weiny@intel.com>

Reviewed-by: Dave Jiang <dave.jiang@intel.com>

> ---
> Changes in v7:
> 	<no change>
> ---
>   drivers/cxl/cxl.h    |   4 +
>   drivers/cxl/cxlmem.h |  19 +++++
>   drivers/cxl/cxlpci.h |   6 ++
>   drivers/cxl/pci.c    | 221 ++++++++++++++++++++++++++++++++++++++++++++++++---
>   4 files changed, 240 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
> index 5974d1082210..b3964149c77b 100644
> --- a/drivers/cxl/cxl.h
> +++ b/drivers/cxl/cxl.h
> @@ -168,6 +168,10 @@ static inline int ways_to_eiw(unsigned int ways, u8 *eiw)
>   				 CXLDEV_EVENT_STATUS_FAIL |	\
>   				 CXLDEV_EVENT_STATUS_FATAL)
>   
> +/* CXL rev 3.0 section 8.2.9.2.4; Table 8-52 */
> +#define CXLDEV_EVENT_INT_MODE_MASK	GENMASK(1, 0)
> +#define CXLDEV_EVENT_INT_MSGNUM_MASK	GENMASK(7, 4)
> +
>   /* CXL 2.0 8.2.8.4 Mailbox Registers */
>   #define CXLDEV_MBOX_CAPS_OFFSET 0x00
>   #define   CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK GENMASK(4, 0)
> diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
> index 0f272206e294..5bef485cd8d9 100644
> --- a/drivers/cxl/cxlmem.h
> +++ b/drivers/cxl/cxlmem.h
> @@ -194,6 +194,23 @@ struct cxl_endpoint_dvsec_info {
>   	struct range dvsec_range[2];
>   };
>   
> +/**
> + * Event Interrupt Policy
> + *
> + * CXL rev 3.0 section 8.2.9.2.4; Table 8-52
> + */
> +enum cxl_event_int_mode {
> +	CXL_INT_NONE		= 0x00,
> +	CXL_INT_MSI_MSIX	= 0x01,
> +	CXL_INT_FW		= 0x02
> +};
> +struct cxl_event_interrupt_policy {
> +	u8 info_settings;
> +	u8 warn_settings;
> +	u8 failure_settings;
> +	u8 fatal_settings;
> +} __packed;
> +
>   /**
>    * struct cxl_event_state - Event log driver state
>    *
> @@ -288,6 +305,8 @@ enum cxl_opcode {
>   	CXL_MBOX_OP_RAW			= CXL_MBOX_OP_INVALID,
>   	CXL_MBOX_OP_GET_EVENT_RECORD	= 0x0100,
>   	CXL_MBOX_OP_CLEAR_EVENT_RECORD	= 0x0101,
> +	CXL_MBOX_OP_GET_EVT_INT_POLICY	= 0x0102,
> +	CXL_MBOX_OP_SET_EVT_INT_POLICY	= 0x0103,
>   	CXL_MBOX_OP_GET_FW_INFO		= 0x0200,
>   	CXL_MBOX_OP_ACTIVATE_FW		= 0x0202,
>   	CXL_MBOX_OP_GET_SUPPORTED_LOGS	= 0x0400,
> diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
> index 77dbdb980b12..a8ea04f536ab 100644
> --- a/drivers/cxl/cxlpci.h
> +++ b/drivers/cxl/cxlpci.h
> @@ -53,6 +53,12 @@
>   #define	    CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK			GENMASK(15, 8)
>   #define     CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK		GENMASK(31, 16)
>   
> +/*
> + * NOTE: Currently all the functions which are enabled for CXL require their
> + * vectors to be in the first 16.  Use this as the default max.
> + */
> +#define CXL_PCI_DEFAULT_MAX_VECTORS 16
> +
>   /* Register Block Identifier (RBI) */
>   enum cxl_regloc_type {
>   	CXL_REGLOC_RBI_EMPTY = 0,
> diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
> index 0fd7742ee70c..a4ee073fd16d 100644
> --- a/drivers/cxl/pci.c
> +++ b/drivers/cxl/pci.c
> @@ -447,6 +447,209 @@ static int cxl_mem_alloc_event_buf(struct cxl_dev_state *cxlds)
>   	return 0;
>   }
>   
> +static int cxl_alloc_irq_vectors(struct pci_dev *pdev)
> +{
> +	int nvecs;
> +
> +	/*
> +	 * Per CXL 3.0 3.1.1 CXL.io Endpoint a function on a CXL device must
> +	 * not generate INTx messages if that function participates in
> +	 * CXL.cache or CXL.mem.
> +	 *
> +	 * Additionally pci_alloc_irq_vectors() handles calling
> +	 * pci_free_irq_vectors() automatically despite not being called
> +	 * pcim_*.  See pci_setup_msi_context().
> +	 */
> +	nvecs = pci_alloc_irq_vectors(pdev, 1, CXL_PCI_DEFAULT_MAX_VECTORS,
> +				      PCI_IRQ_MSIX | PCI_IRQ_MSI);
> +	if (nvecs < 1) {
> +		dev_dbg(&pdev->dev, "Failed to alloc irq vectors: %d\n", nvecs);
> +		return -ENXIO;
> +	}
> +	return 0;
> +}
> +
> +struct cxl_dev_id {
> +	struct cxl_dev_state *cxlds;
> +};
> +
> +static irqreturn_t cxl_event_thread(int irq, void *id)
> +{
> +	struct cxl_dev_id *dev_id = id;
> +	struct cxl_dev_state *cxlds = dev_id->cxlds;
> +	u32 status;
> +
> +	do {
> +		/*
> +		 * CXL 3.0 8.2.8.3.1: The lower 32 bits are the status;
> +		 * ignore the reserved upper 32 bits
> +		 */
> +		status = readl(cxlds->regs.status + CXLDEV_DEV_EVENT_STATUS_OFFSET);
> +		/* Ignore logs unknown to the driver */
> +		status &= CXLDEV_EVENT_STATUS_ALL;
> +		if (!status)
> +			break;
> +		cxl_mem_get_event_records(cxlds, status);
> +		cond_resched();
> +	} while (status);
> +
> +	return IRQ_HANDLED;
> +}
> +
> +static int cxl_event_req_irq(struct cxl_dev_state *cxlds, u8 setting)
> +{
> +	struct device *dev = cxlds->dev;
> +	struct pci_dev *pdev = to_pci_dev(dev);
> +	struct cxl_dev_id *dev_id;
> +	int irq;
> +
> +	if (FIELD_GET(CXLDEV_EVENT_INT_MODE_MASK, setting) != CXL_INT_MSI_MSIX)
> +		return -ENXIO;
> +
> +	/* dev_id must be globally unique and must contain the cxlds */
> +	dev_id = devm_kzalloc(dev, sizeof(*dev_id), GFP_KERNEL);
> +	if (!dev_id)
> +		return -ENOMEM;
> +	dev_id->cxlds = cxlds;
> +
> +	irq =  pci_irq_vector(pdev,
> +			      FIELD_GET(CXLDEV_EVENT_INT_MSGNUM_MASK, setting));
> +	if (irq < 0)
> +		return irq;
> +
> +	return devm_request_threaded_irq(dev, irq, NULL, cxl_event_thread,
> +					 IRQF_SHARED, NULL, dev_id);
> +}
> +
> +static int cxl_event_get_int_policy(struct cxl_dev_state *cxlds,
> +				    struct cxl_event_interrupt_policy *policy)
> +{
> +	struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) {
> +		.opcode = CXL_MBOX_OP_GET_EVT_INT_POLICY,
> +		.payload_out = policy,
> +		.size_out = sizeof(*policy),
> +	};
> +	int rc;
> +
> +	rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
> +	if (rc < 0)
> +		dev_err(cxlds->dev, "Failed to get event interrupt policy : %d",
> +			rc);
> +
> +	return rc;
> +}
> +
> +static int cxl_event_config_msgnums(struct cxl_dev_state *cxlds,
> +				    struct cxl_event_interrupt_policy *policy)
> +{
> +	struct cxl_mbox_cmd mbox_cmd;
> +	int rc;
> +
> +	*policy = (struct cxl_event_interrupt_policy) {
> +		.info_settings = CXL_INT_MSI_MSIX,
> +		.warn_settings = CXL_INT_MSI_MSIX,
> +		.failure_settings = CXL_INT_MSI_MSIX,
> +		.fatal_settings = CXL_INT_MSI_MSIX,
> +	};
> +
> +	mbox_cmd = (struct cxl_mbox_cmd) {
> +		.opcode = CXL_MBOX_OP_SET_EVT_INT_POLICY,
> +		.payload_in = policy,
> +		.size_in = sizeof(*policy),
> +	};
> +
> +	rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
> +	if (rc < 0) {
> +		dev_err(cxlds->dev, "Failed to set event interrupt policy : %d",
> +			rc);
> +		return rc;
> +	}
> +
> +	/* Retrieve final interrupt settings */
> +	return cxl_event_get_int_policy(cxlds, policy);
> +}
> +
> +static int cxl_event_irqsetup(struct cxl_dev_state *cxlds)
> +{
> +	struct cxl_event_interrupt_policy policy;
> +	int rc;
> +
> +	rc = cxl_event_config_msgnums(cxlds, &policy);
> +	if (rc)
> +		return rc;
> +
> +	rc = cxl_event_req_irq(cxlds, policy.info_settings);
> +	if (rc) {
> +		dev_err(cxlds->dev, "Failed to get interrupt for event Info log\n");
> +		return rc;
> +	}
> +
> +	rc = cxl_event_req_irq(cxlds, policy.warn_settings);
> +	if (rc) {
> +		dev_err(cxlds->dev, "Failed to get interrupt for event Warn log\n");
> +		return rc;
> +	}
> +
> +	rc = cxl_event_req_irq(cxlds, policy.failure_settings);
> +	if (rc) {
> +		dev_err(cxlds->dev, "Failed to get interrupt for event Failure log\n");
> +		return rc;
> +	}
> +
> +	rc = cxl_event_req_irq(cxlds, policy.fatal_settings);
> +	if (rc) {
> +		dev_err(cxlds->dev, "Failed to get interrupt for event Fatal log\n");
> +		return rc;
> +	}
> +
> +	return 0;
> +}
> +
> +static bool cxl_event_int_is_fw(u8 setting)
> +{
> +	u8 mode = FIELD_GET(CXLDEV_EVENT_INT_MODE_MASK, setting);
> +
> +	return mode == CXL_INT_FW;
> +}
> +
> +static int cxl_event_config(struct pci_host_bridge *host_bridge,
> +			    struct cxl_dev_state *cxlds)
> +{
> +	struct cxl_event_interrupt_policy policy;
> +	int rc;
> +
> +	/*
> +	 * When BIOS maintains CXL error reporting control, it will process
> +	 * event records.  Only one agent can do so.
> +	 */
> +	if (!host_bridge->native_cxl_error)
> +		return 0;
> +
> +	rc = cxl_mem_alloc_event_buf(cxlds);
> +	if (rc)
> +		return rc;
> +
> +	rc = cxl_event_get_int_policy(cxlds, &policy);
> +	if (rc)
> +		return rc;
> +
> +	if (cxl_event_int_is_fw(policy.info_settings) ||
> +	    cxl_event_int_is_fw(policy.warn_settings) ||
> +	    cxl_event_int_is_fw(policy.failure_settings) ||
> +	    cxl_event_int_is_fw(policy.fatal_settings)) {
> +		dev_err(cxlds->dev, "FW still in control of Event Logs despite _OSC settings\n");
> +		return -EBUSY;
> +	}
> +
> +	rc = cxl_event_irqsetup(cxlds);
> +	if (rc)
> +		return rc;
> +
> +	cxl_mem_get_event_records(cxlds, CXLDEV_EVENT_STATUS_ALL);
> +
> +	return 0;
> +}
> +
>   static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
>   {
>   	struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus);
> @@ -465,6 +668,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
>   	rc = pcim_enable_device(pdev);
>   	if (rc)
>   		return rc;
> +	pci_set_master(pdev);
>   
>   	cxlds = cxl_dev_state_create(&pdev->dev);
>   	if (IS_ERR(cxlds))
> @@ -521,20 +725,17 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
>   	if (rc)
>   		return rc;
>   
> +	rc = cxl_alloc_irq_vectors(pdev);
> +	if (rc)
> +		return rc;
> +
>   	cxlmd = devm_cxl_add_memdev(cxlds);
>   	if (IS_ERR(cxlmd))
>   		return PTR_ERR(cxlmd);
>   
> -	/*
> -	 * When BIOS maintains CXL error reporting control, it will process
> -	 * event records.  Only one agent can do so.
> -	 */
> -	if (host_bridge->native_cxl_error) {
> -		rc = cxl_mem_alloc_event_buf(cxlds);
> -		if (rc)
> -			return rc;
> -		cxl_mem_get_event_records(cxlds, CXLDEV_EVENT_STATUS_ALL);
> -	}
> +	rc = cxl_event_config(host_bridge, cxlds);
> +	if (rc)
> +		return rc;
>   
>   	if (cxlds->regs.ras) {
>   		pci_enable_pcie_error_reporting(pdev);
>
Davidlohr Bueso May 19, 2023, 1:38 a.m. UTC | #3
On Fri, 19 May 2023, LiuLele wrote:

>In my testing CXL device /sys/bus/cxl/devices/mem0  not created, and the get error messages :
>
>```
>cxl_pci 0000:0d:00.0: Failed to get interrupt for event Info log
>```
>
>My test environment is a qemu CXL emulator with qemu v8.0.0, Linux kernel v6.3.0.
>While with kernel 5.9.13,  /sys/bus/cxl/devices/mem0  can be created.

Yes, this can be annoying and would argue the probe should not error out.
Regardless, the actual qemu support is in Jonathan's tree:

https://gitlab.com/jic23/qemu/-/commit/a04e6476df363d1f6bc160577b30dda6564d3f67

Thanks,
Davidlohr
Jonathan Cameron May 19, 2023, 3:10 p.m. UTC | #4
On Thu, 18 May 2023 18:38:46 -0700
Davidlohr Bueso <dave@stgolabs.net> wrote:

> On Fri, 19 May 2023, LiuLele wrote:
> 
> >In my testing CXL device /sys/bus/cxl/devices/mem0  not created, and the get error messages :
> >
> >```
> >cxl_pci 0000:0d:00.0: Failed to get interrupt for event Info log
> >```
> >
> >My test environment is a qemu CXL emulator with qemu v8.0.0, Linux kernel v6.3.0.
> >While with kernel 5.9.13,  /sys/bus/cxl/devices/mem0  can be created.  
> 
> Yes, this can be annoying and would argue the probe should not error out.
> Regardless, the actual qemu support is in Jonathan's tree:
> 
> https://gitlab.com/jic23/qemu/-/commit/a04e6476df363d1f6bc160577b30dda6564d3f67

That just failed to make it into an upstream pull request today due to some
bugs in a the poison list set that came before it :( 

v6 or poison list and events support are both on list now and hopefully will make
this QEMU cycle so be in 8.1

Jonathan


> 
> Thanks,
> Davidlohr
Ira Weiny May 19, 2023, 3:20 p.m. UTC | #5
Davidlohr Bueso wrote:
> On Fri, 19 May 2023, LiuLele wrote:
> 
> >In my testing CXL device /sys/bus/cxl/devices/mem0  not created, and the get error messages :
> >
> >```
> >cxl_pci 0000:0d:00.0: Failed to get interrupt for event Info log
> >```
> >
> >My test environment is a qemu CXL emulator with qemu v8.0.0, Linux kernel v6.3.0.
> >While with kernel 5.9.13,  /sys/bus/cxl/devices/mem0  can be created.
> 
> Yes, this can be annoying and would argue the probe should not error out.

I had to double check.  Events are mandatory on devices.  On checking
again interrupt support is mandatory as well.  So that is why I errored
out here.  With real HW this should not be an issue.

> Regardless, the actual qemu support is in Jonathan's tree:
> 
> https://gitlab.com/jic23/qemu/-/commit/a04e6476df363d1f6bc160577b30dda6564d3f67

That is the commit you need but it is probably best to use one of
Jonathans 'official' branches.  Looks like he just pushed a new one today.

https://gitlab.com/jic23/qemu/-/tree/cxl-2023-05-19

I've not run that one yet.  So if you have issues try his previous one it
is working well for me.

https://gitlab.com/jic23/qemu/-/tree/cxl-2023-04-19

Ira

> 
> Thanks,
> Davidlohr
Jonathan Cameron May 19, 2023, 3:37 p.m. UTC | #6
On Fri, 19 May 2023 08:20:44 -0700
Ira Weiny <ira.weiny@intel.com> wrote:

> Davidlohr Bueso wrote:
> > On Fri, 19 May 2023, LiuLele wrote:
> >   
> > >In my testing CXL device /sys/bus/cxl/devices/mem0  not created, and the get error messages :
> > >
> > >```
> > >cxl_pci 0000:0d:00.0: Failed to get interrupt for event Info log
> > >```
> > >
> > >My test environment is a qemu CXL emulator with qemu v8.0.0, Linux kernel v6.3.0.
> > >While with kernel 5.9.13,  /sys/bus/cxl/devices/mem0  can be created.  
> > 
> > Yes, this can be annoying and would argue the probe should not error out.  
> 
> I had to double check.  Events are mandatory on devices.  On checking
> again interrupt support is mandatory as well.  So that is why I errored
> out here.  With real HW this should not be an issue.
> 
> > Regardless, the actual qemu support is in Jonathan's tree:
> > 
> > https://gitlab.com/jic23/qemu/-/commit/a04e6476df363d1f6bc160577b30dda6564d3f67  
> 
> That is the commit you need but it is probably best to use one of
> Jonathans 'official' branches.  Looks like he just pushed a new one today.
> 
> https://gitlab.com/jic23/qemu/-/tree/cxl-2023-05-19

Leave that one for now.  It was to get the CI tests to run. I need to tidy up
a bit and will announce when I have a clean one...

> 
> I've not run that one yet.  So if you have issues try his previous one it
> is working well for me.
> 
> https://gitlab.com/jic23/qemu/-/tree/cxl-2023-04-19

That one should be good to go still I think

Jonathan

> 
> Ira
> 
> > 
> > Thanks,
> > Davidlohr  
> 
>
Luis Chamberlain May 31, 2023, 2:18 a.m. UTC | #7
On Fri, May 19, 2023 at 08:20:44AM -0700, Ira Weiny wrote:
> Davidlohr Bueso wrote:
> > On Fri, 19 May 2023, LiuLele wrote:
> > 
> > >In my testing CXL device /sys/bus/cxl/devices/mem0  not created, and the get error messages :
> > >
> > >```
> > >cxl_pci 0000:0d:00.0: Failed to get interrupt for event Info log
> > >```
> > >
> > >My test environment is a qemu CXL emulator with qemu v8.0.0, Linux kernel v6.3.0.
> > >While with kernel 5.9.13,  /sys/bus/cxl/devices/mem0  can be created.
> > 
> > Yes, this can be annoying and would argue the probe should not error out.
> 
> I had to double check.  Events are mandatory on devices.  On checking
> again interrupt support is mandatory as well.  So that is why I errored
> out here.

The failure essentially creates a user visible regression whereas
booting an older kernel fixes it. It is not a friendly error message
when testing kernels / upgrading / test environments. The only thing
I can think of is if a new kconfig symbol is introduced so to make
such cases a bit more clearer for now as things get settled.

Otherwise for testing this creates a few cycles of just noise. And I'd
imagine even a few developer hours.

  Luis
Ira Weiny June 1, 2023, 3:12 a.m. UTC | #8
Luis Chamberlain wrote:
> On Fri, May 19, 2023 at 08:20:44AM -0700, Ira Weiny wrote:
> > Davidlohr Bueso wrote:
> > > On Fri, 19 May 2023, LiuLele wrote:
> > > 
> > > >In my testing CXL device /sys/bus/cxl/devices/mem0  not created, and the get error messages :
> > > >
> > > >```
> > > >cxl_pci 0000:0d:00.0: Failed to get interrupt for event Info log
> > > >```
> > > >
> > > >My test environment is a qemu CXL emulator with qemu v8.0.0, Linux kernel v6.3.0.
> > > >While with kernel 5.9.13,  /sys/bus/cxl/devices/mem0  can be created.
> > > 
> > > Yes, this can be annoying and would argue the probe should not error out.
> > 
> > I had to double check.  Events are mandatory on devices.  On checking
> > again interrupt support is mandatory as well.  So that is why I errored
> > out here.
> 
> The failure essentially creates a user visible regression whereas
> booting an older kernel fixes it. It is not a friendly error message
> when testing kernels / upgrading / test environments. The only thing
> I can think of is if a new kconfig symbol is introduced so to make
> such cases a bit more clearer for now as things get settled.

Ah I see now.  This is a qemu without the event support.  :-/

> 
> Otherwise for testing this creates a few cycles of just noise. And I'd
> imagine even a few developer hours.

I don't think the kernel should be changed for following the spec.  But I
do sympathize with you.  I know Jonathan is working to get the event
support into qemu soon.  I've reviewed that series (the patches I did not
author) so I think it will land soon.

Can this be weathered until then?

Ira
Davidlohr Bueso June 1, 2023, 3:46 a.m. UTC | #9
On Wed, 31 May 2023, Ira Weiny wrote:

>I don't think the kernel should be changed for following the spec.

Agreed. If events are mandatory we just have to bite the bullet.

Thanks,
Davidlohr
diff mbox series

Patch

diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 5974d1082210..b3964149c77b 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -168,6 +168,10 @@  static inline int ways_to_eiw(unsigned int ways, u8 *eiw)
 				 CXLDEV_EVENT_STATUS_FAIL |	\
 				 CXLDEV_EVENT_STATUS_FATAL)
 
+/* CXL rev 3.0 section 8.2.9.2.4; Table 8-52 */
+#define CXLDEV_EVENT_INT_MODE_MASK	GENMASK(1, 0)
+#define CXLDEV_EVENT_INT_MSGNUM_MASK	GENMASK(7, 4)
+
 /* CXL 2.0 8.2.8.4 Mailbox Registers */
 #define CXLDEV_MBOX_CAPS_OFFSET 0x00
 #define   CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK GENMASK(4, 0)
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 0f272206e294..5bef485cd8d9 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -194,6 +194,23 @@  struct cxl_endpoint_dvsec_info {
 	struct range dvsec_range[2];
 };
 
+/**
+ * Event Interrupt Policy
+ *
+ * CXL rev 3.0 section 8.2.9.2.4; Table 8-52
+ */
+enum cxl_event_int_mode {
+	CXL_INT_NONE		= 0x00,
+	CXL_INT_MSI_MSIX	= 0x01,
+	CXL_INT_FW		= 0x02
+};
+struct cxl_event_interrupt_policy {
+	u8 info_settings;
+	u8 warn_settings;
+	u8 failure_settings;
+	u8 fatal_settings;
+} __packed;
+
 /**
  * struct cxl_event_state - Event log driver state
  *
@@ -288,6 +305,8 @@  enum cxl_opcode {
 	CXL_MBOX_OP_RAW			= CXL_MBOX_OP_INVALID,
 	CXL_MBOX_OP_GET_EVENT_RECORD	= 0x0100,
 	CXL_MBOX_OP_CLEAR_EVENT_RECORD	= 0x0101,
+	CXL_MBOX_OP_GET_EVT_INT_POLICY	= 0x0102,
+	CXL_MBOX_OP_SET_EVT_INT_POLICY	= 0x0103,
 	CXL_MBOX_OP_GET_FW_INFO		= 0x0200,
 	CXL_MBOX_OP_ACTIVATE_FW		= 0x0202,
 	CXL_MBOX_OP_GET_SUPPORTED_LOGS	= 0x0400,
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index 77dbdb980b12..a8ea04f536ab 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -53,6 +53,12 @@ 
 #define	    CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK			GENMASK(15, 8)
 #define     CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK		GENMASK(31, 16)
 
+/*
+ * NOTE: Currently all the functions which are enabled for CXL require their
+ * vectors to be in the first 16.  Use this as the default max.
+ */
+#define CXL_PCI_DEFAULT_MAX_VECTORS 16
+
 /* Register Block Identifier (RBI) */
 enum cxl_regloc_type {
 	CXL_REGLOC_RBI_EMPTY = 0,
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 0fd7742ee70c..a4ee073fd16d 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -447,6 +447,209 @@  static int cxl_mem_alloc_event_buf(struct cxl_dev_state *cxlds)
 	return 0;
 }
 
+static int cxl_alloc_irq_vectors(struct pci_dev *pdev)
+{
+	int nvecs;
+
+	/*
+	 * Per CXL 3.0 3.1.1 CXL.io Endpoint a function on a CXL device must
+	 * not generate INTx messages if that function participates in
+	 * CXL.cache or CXL.mem.
+	 *
+	 * Additionally pci_alloc_irq_vectors() handles calling
+	 * pci_free_irq_vectors() automatically despite not being called
+	 * pcim_*.  See pci_setup_msi_context().
+	 */
+	nvecs = pci_alloc_irq_vectors(pdev, 1, CXL_PCI_DEFAULT_MAX_VECTORS,
+				      PCI_IRQ_MSIX | PCI_IRQ_MSI);
+	if (nvecs < 1) {
+		dev_dbg(&pdev->dev, "Failed to alloc irq vectors: %d\n", nvecs);
+		return -ENXIO;
+	}
+	return 0;
+}
+
+struct cxl_dev_id {
+	struct cxl_dev_state *cxlds;
+};
+
+static irqreturn_t cxl_event_thread(int irq, void *id)
+{
+	struct cxl_dev_id *dev_id = id;
+	struct cxl_dev_state *cxlds = dev_id->cxlds;
+	u32 status;
+
+	do {
+		/*
+		 * CXL 3.0 8.2.8.3.1: The lower 32 bits are the status;
+		 * ignore the reserved upper 32 bits
+		 */
+		status = readl(cxlds->regs.status + CXLDEV_DEV_EVENT_STATUS_OFFSET);
+		/* Ignore logs unknown to the driver */
+		status &= CXLDEV_EVENT_STATUS_ALL;
+		if (!status)
+			break;
+		cxl_mem_get_event_records(cxlds, status);
+		cond_resched();
+	} while (status);
+
+	return IRQ_HANDLED;
+}
+
+static int cxl_event_req_irq(struct cxl_dev_state *cxlds, u8 setting)
+{
+	struct device *dev = cxlds->dev;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct cxl_dev_id *dev_id;
+	int irq;
+
+	if (FIELD_GET(CXLDEV_EVENT_INT_MODE_MASK, setting) != CXL_INT_MSI_MSIX)
+		return -ENXIO;
+
+	/* dev_id must be globally unique and must contain the cxlds */
+	dev_id = devm_kzalloc(dev, sizeof(*dev_id), GFP_KERNEL);
+	if (!dev_id)
+		return -ENOMEM;
+	dev_id->cxlds = cxlds;
+
+	irq =  pci_irq_vector(pdev,
+			      FIELD_GET(CXLDEV_EVENT_INT_MSGNUM_MASK, setting));
+	if (irq < 0)
+		return irq;
+
+	return devm_request_threaded_irq(dev, irq, NULL, cxl_event_thread,
+					 IRQF_SHARED, NULL, dev_id);
+}
+
+static int cxl_event_get_int_policy(struct cxl_dev_state *cxlds,
+				    struct cxl_event_interrupt_policy *policy)
+{
+	struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) {
+		.opcode = CXL_MBOX_OP_GET_EVT_INT_POLICY,
+		.payload_out = policy,
+		.size_out = sizeof(*policy),
+	};
+	int rc;
+
+	rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+	if (rc < 0)
+		dev_err(cxlds->dev, "Failed to get event interrupt policy : %d",
+			rc);
+
+	return rc;
+}
+
+static int cxl_event_config_msgnums(struct cxl_dev_state *cxlds,
+				    struct cxl_event_interrupt_policy *policy)
+{
+	struct cxl_mbox_cmd mbox_cmd;
+	int rc;
+
+	*policy = (struct cxl_event_interrupt_policy) {
+		.info_settings = CXL_INT_MSI_MSIX,
+		.warn_settings = CXL_INT_MSI_MSIX,
+		.failure_settings = CXL_INT_MSI_MSIX,
+		.fatal_settings = CXL_INT_MSI_MSIX,
+	};
+
+	mbox_cmd = (struct cxl_mbox_cmd) {
+		.opcode = CXL_MBOX_OP_SET_EVT_INT_POLICY,
+		.payload_in = policy,
+		.size_in = sizeof(*policy),
+	};
+
+	rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+	if (rc < 0) {
+		dev_err(cxlds->dev, "Failed to set event interrupt policy : %d",
+			rc);
+		return rc;
+	}
+
+	/* Retrieve final interrupt settings */
+	return cxl_event_get_int_policy(cxlds, policy);
+}
+
+static int cxl_event_irqsetup(struct cxl_dev_state *cxlds)
+{
+	struct cxl_event_interrupt_policy policy;
+	int rc;
+
+	rc = cxl_event_config_msgnums(cxlds, &policy);
+	if (rc)
+		return rc;
+
+	rc = cxl_event_req_irq(cxlds, policy.info_settings);
+	if (rc) {
+		dev_err(cxlds->dev, "Failed to get interrupt for event Info log\n");
+		return rc;
+	}
+
+	rc = cxl_event_req_irq(cxlds, policy.warn_settings);
+	if (rc) {
+		dev_err(cxlds->dev, "Failed to get interrupt for event Warn log\n");
+		return rc;
+	}
+
+	rc = cxl_event_req_irq(cxlds, policy.failure_settings);
+	if (rc) {
+		dev_err(cxlds->dev, "Failed to get interrupt for event Failure log\n");
+		return rc;
+	}
+
+	rc = cxl_event_req_irq(cxlds, policy.fatal_settings);
+	if (rc) {
+		dev_err(cxlds->dev, "Failed to get interrupt for event Fatal log\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+static bool cxl_event_int_is_fw(u8 setting)
+{
+	u8 mode = FIELD_GET(CXLDEV_EVENT_INT_MODE_MASK, setting);
+
+	return mode == CXL_INT_FW;
+}
+
+static int cxl_event_config(struct pci_host_bridge *host_bridge,
+			    struct cxl_dev_state *cxlds)
+{
+	struct cxl_event_interrupt_policy policy;
+	int rc;
+
+	/*
+	 * When BIOS maintains CXL error reporting control, it will process
+	 * event records.  Only one agent can do so.
+	 */
+	if (!host_bridge->native_cxl_error)
+		return 0;
+
+	rc = cxl_mem_alloc_event_buf(cxlds);
+	if (rc)
+		return rc;
+
+	rc = cxl_event_get_int_policy(cxlds, &policy);
+	if (rc)
+		return rc;
+
+	if (cxl_event_int_is_fw(policy.info_settings) ||
+	    cxl_event_int_is_fw(policy.warn_settings) ||
+	    cxl_event_int_is_fw(policy.failure_settings) ||
+	    cxl_event_int_is_fw(policy.fatal_settings)) {
+		dev_err(cxlds->dev, "FW still in control of Event Logs despite _OSC settings\n");
+		return -EBUSY;
+	}
+
+	rc = cxl_event_irqsetup(cxlds);
+	if (rc)
+		return rc;
+
+	cxl_mem_get_event_records(cxlds, CXLDEV_EVENT_STATUS_ALL);
+
+	return 0;
+}
+
 static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus);
@@ -465,6 +668,7 @@  static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	rc = pcim_enable_device(pdev);
 	if (rc)
 		return rc;
+	pci_set_master(pdev);
 
 	cxlds = cxl_dev_state_create(&pdev->dev);
 	if (IS_ERR(cxlds))
@@ -521,20 +725,17 @@  static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	if (rc)
 		return rc;
 
+	rc = cxl_alloc_irq_vectors(pdev);
+	if (rc)
+		return rc;
+
 	cxlmd = devm_cxl_add_memdev(cxlds);
 	if (IS_ERR(cxlmd))
 		return PTR_ERR(cxlmd);
 
-	/*
-	 * When BIOS maintains CXL error reporting control, it will process
-	 * event records.  Only one agent can do so.
-	 */
-	if (host_bridge->native_cxl_error) {
-		rc = cxl_mem_alloc_event_buf(cxlds);
-		if (rc)
-			return rc;
-		cxl_mem_get_event_records(cxlds, CXLDEV_EVENT_STATUS_ALL);
-	}
+	rc = cxl_event_config(host_bridge, cxlds);
+	if (rc)
+		return rc;
 
 	if (cxlds->regs.ras) {
 		pci_enable_pcie_error_reporting(pdev);