@@ -139,8 +139,108 @@ int cxl_create_prot_err_info(struct pci_dev *_pdev, int severity,
return 0;
}
+EXPORT_SYMBOL_NS_GPL(cxl_create_prot_err_info, "CXL");
-struct work_struct cxl_prot_err_work;
+static void cxl_do_recovery(struct pci_dev *pdev) { }
+
+static int cxl_rch_handle_error_iter(struct pci_dev *pdev, void *data)
+{
+ struct cxl_prot_error_info *err_info = data;
+ const struct cxl_error_handlers *err_handler;
+ struct device *dev = err_info->dev;
+ struct cxl_driver *pdrv;
+
+ /*
+ * The capability, status, and control fields in Device 0,
+ * Function 0 DVSEC control the CXL functionality of the
+ * entire device (CXL 3.0, 8.1.3).
+ */
+ if (pdev->devfn != PCI_DEVFN(0, 0))
+ return 0;
+
+ /*
+ * CXL Memory Devices must have the 502h class code set (CXL
+ * 3.0, 8.1.12.1).
+ */
+ if ((pdev->class >> 8) != PCI_CLASS_MEMORY_CXL)
+ return 0;
+
+ if (!is_cxl_memdev(dev) || !dev->driver)
+ return 0;
+
+ pdrv = to_cxl_drv(dev->driver);
+ if (!pdrv || !pdrv->err_handler)
+ return 0;
+
+ err_handler = pdrv->err_handler;
+ if (err_info->severity == AER_CORRECTABLE) {
+ if (err_handler->cor_error_detected)
+ err_handler->cor_error_detected(dev, err_info);
+ } else if (err_handler->error_detected) {
+ cxl_do_recovery(pdev);
+ }
+
+ return 0;
+}
+
+static void cxl_handle_prot_error(struct pci_dev *pdev, struct cxl_prot_error_info *err_info)
+{
+ if (!pdev || !err_info)
+ return;
+
+ /*
+ * Internal errors of an RCEC indicate an AER error in an
+ * RCH's downstream port. Check and handle them in the CXL.mem
+ * device driver.
+ */
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_EC)
+ return pcie_walk_rcec(pdev, cxl_rch_handle_error_iter, err_info);
+
+ if (err_info->severity == AER_CORRECTABLE) {
+ struct device *dev __free(put_device) = get_device(err_info->dev);
+ struct cxl_driver *pdrv;
+ int aer = pdev->aer_cap;
+
+ if (!dev || !dev->driver)
+ return;
+
+ if (aer) {
+ int ras_status;
+
+ pci_read_config_dword(pdev, aer + PCI_ERR_COR_STATUS, &ras_status);
+ pci_write_config_dword(pdev, aer + PCI_ERR_COR_STATUS,
+ ras_status);
+ }
+
+ pdrv = to_cxl_drv(dev->driver);
+ if (!pdrv || !pdrv->err_handler ||
+ !pdrv->err_handler->cor_error_detected)
+ return;
+
+ pdrv->err_handler->cor_error_detected(dev, err_info);
+ pcie_clear_device_status(pdev);
+ } else {
+ cxl_do_recovery(pdev);
+ }
+}
+
+static void cxl_prot_err_work_fn(struct work_struct *work)
+{
+ struct cxl_prot_err_work_data wd;
+
+ while (cxl_prot_err_kfifo_get(&wd)) {
+ struct cxl_prot_error_info *err_info = &wd.err_info;
+ struct device *dev __free(put_device) = get_device(err_info->dev);
+ struct pci_dev *pdev __free(pci_dev_put) = pci_dev_get(err_info->pdev);
+
+ if (!dev || !pdev)
+ continue;
+
+ cxl_handle_prot_error(pdev, err_info);
+ }
+}
+
+static DECLARE_WORK(cxl_prot_err_work, cxl_prot_err_work_fn);
int cxl_ras_init(void)
{
@@ -11,6 +11,8 @@
#include <linux/log2.h>
#include <linux/node.h>
#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
extern const struct nvdimm_security_ops *cxl_security_ops;
@@ -786,6 +788,20 @@ static inline int cxl_root_decoder_autoremove(struct device *host,
}
int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint);
+int cxl_create_prot_err_info(struct pci_dev *pdev, int severity,
+ struct cxl_prot_error_info *err_info);
+
+/* CXL bus error event callbacks */
+struct cxl_error_handlers {
+ /* CXL bus error detected on this device */
+ pci_ers_result_t (*error_detected)(struct device *dev,
+ struct cxl_prot_error_info *err_info);
+
+ /* Allow device driver to record more details of a correctable error */
+ void (*cor_error_detected)(struct device *dev,
+ struct cxl_prot_error_info *err_info);
+};
+
/**
* struct cxl_endpoint_dvsec_info - Cached DVSEC info
* @mem_enabled: cached value of mem_enabled in the DVSEC at init time
@@ -820,6 +836,7 @@ struct cxl_driver {
void (*remove)(struct device *dev);
struct device_driver drv;
int id;
+ const struct cxl_error_handlers *err_handler;
};
#define to_cxl_drv(__drv) container_of_const(__drv, struct cxl_driver, drv)
@@ -2320,6 +2320,7 @@ void pcie_clear_device_status(struct pci_dev *dev)
pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
}
+EXPORT_SYMBOL_NS_GPL(pcie_clear_device_status, "CXL");
#endif
/**
@@ -593,16 +593,10 @@ static inline bool pci_dpc_recovered(struct pci_dev *pdev) { return false; }
void pci_rcec_init(struct pci_dev *dev);
void pci_rcec_exit(struct pci_dev *dev);
void pcie_link_rcec(struct pci_dev *rcec);
-void pcie_walk_rcec(struct pci_dev *rcec,
- int (*cb)(struct pci_dev *, void *),
- void *userdata);
#else
static inline void pci_rcec_init(struct pci_dev *dev) { }
static inline void pci_rcec_exit(struct pci_dev *dev) { }
static inline void pcie_link_rcec(struct pci_dev *rcec) { }
-static inline void pcie_walk_rcec(struct pci_dev *rcec,
- int (*cb)(struct pci_dev *, void *),
- void *userdata) { }
#endif
#ifdef CONFIG_PCI_ATS
@@ -288,6 +288,7 @@ void pci_aer_clear_fatal_status(struct pci_dev *dev)
if (status)
pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, status);
}
+EXPORT_SYMBOL_GPL(pci_aer_clear_fatal_status);
/**
* pci_aer_raw_clear_status - Clear AER error registers.
@@ -1018,47 +1019,6 @@ static bool is_cxl_error(struct aer_err_info *info)
return is_internal_error(info);
}
-static int cxl_rch_handle_error_iter(struct pci_dev *dev, void *data)
-{
- struct aer_err_info *info = (struct aer_err_info *)data;
- const struct pci_error_handlers *err_handler;
-
- if (!is_cxl_mem_dev(dev) || !cxl_error_is_native(dev))
- return 0;
-
- /* protect dev->driver */
- device_lock(&dev->dev);
-
- err_handler = dev->driver ? dev->driver->err_handler : NULL;
- if (!err_handler)
- goto out;
-
- if (info->severity == AER_CORRECTABLE) {
- if (err_handler->cor_error_detected)
- err_handler->cor_error_detected(dev);
- } else if (err_handler->error_detected) {
- if (info->severity == AER_NONFATAL)
- err_handler->error_detected(dev, pci_channel_io_normal);
- else if (info->severity == AER_FATAL)
- err_handler->error_detected(dev, pci_channel_io_frozen);
- }
-out:
- device_unlock(&dev->dev);
- return 0;
-}
-
-static void cxl_rch_handle_error(struct pci_dev *dev, struct aer_err_info *info)
-{
- /*
- * Internal errors of an RCEC indicate an AER error in an
- * RCH's downstream port. Check and handle them in the CXL.mem
- * device driver.
- */
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_EC &&
- is_internal_error(info))
- pcie_walk_rcec(dev, cxl_rch_handle_error_iter, info);
-}
-
static int handles_cxl_error_iter(struct pci_dev *dev, void *data)
{
bool *handles_cxl = data;
@@ -145,6 +145,7 @@ void pcie_walk_rcec(struct pci_dev *rcec, int (*cb)(struct pci_dev *, void *),
walk_rcec(walk_rcec_helper, &rcec_data);
}
+EXPORT_SYMBOL_NS_GPL(pcie_walk_rcec, "CXL");
void pci_rcec_init(struct pci_dev *dev)
{
@@ -66,12 +66,14 @@ struct cxl_prot_err_work_data {
#if defined(CONFIG_PCIEAER)
int pci_aer_clear_nonfatal_status(struct pci_dev *dev);
+void pci_aer_clear_fatal_status(struct pci_dev *dev);
int pcie_aer_is_native(struct pci_dev *dev);
#else
static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
{
return -EINVAL;
}
+static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { }
static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; }
#endif
@@ -1802,6 +1802,9 @@ extern bool pcie_ports_native;
int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req,
bool use_lt);
+void pcie_walk_rcec(struct pci_dev *rcec,
+ int (*cb)(struct pci_dev *, void *),
+ void *userdata);
#else
#define pcie_ports_disabled true
#define pcie_ports_native false
@@ -1812,8 +1815,15 @@ static inline int pcie_set_target_speed(struct pci_dev *port,
{
return -EOPNOTSUPP;
}
+
+static inline void pcie_walk_rcec(struct pci_dev *rcec,
+ int (*cb)(struct pci_dev *, void *),
+ void *userdata) { }
+
#endif
+void pcie_clear_device_status(struct pci_dev *dev);
+
#define PCIE_LINK_STATE_L0S (BIT(0) | BIT(1)) /* Upstr/dwnstr L0s */
#define PCIE_LINK_STATE_L1 BIT(2) /* L1 state */
#define PCIE_LINK_STATE_L1_1 BIT(3) /* ASPM L1.1 state */
The AER driver is now designed to forward CXL protocol errors to the CXL driver. Update the CXL driver with functionality to dequeue the forwarded CXL error from the kfifo. Also, update the CXL driver to process the CXL protocol errors using CXL protocol error handlers. First, move cxl_rch_handle_error_iter() from aer.c to cxl/core/ras.c. Remove and drop the cxl_rch_handle_error() in aer.c as it is not needed. Introduce function cxl_prot_err_work_fn() to dequeue work forwarded by the AER service driver. This will begin the CXL protocol error processing with the call to cxl_handle_prot_error(). Introduce cxl_handle_prot_error() to differntiate between Restricted CXL Host (RCH) protocol errors and CXL virtual host (VH) protocol errors. RCH errors will be processed with a call to walk the associated Root Complex Event Collector's (RCEC) secondary bus looking for the Root Complex Integrated Endpoint (RCiEP) to handle the RCH error. Export pcie_walk_rcec() so the CXL driver can walk the RCEC's downstream bus, searching for the RCiEP. VH correctable error (CE) processing will call the CXL CE handler if present. VH uncorrectable errors (UCE) will call cxl_do_recovery(), implemented as a stub for now and to be updated in future patch. Export pci_aer_clean_fatal_status() and pci_clean_device_status() used to clean up AER status after handling. Create cxl_driver::error_handler structure similar to pci_driver::error_handlers. Add handlers for CE and UCE CXL.io errors. Add 'struct cxl_prot_error_info' as a parameter to the CXL CE and UCE error handlers. Signed-off-by: Terry Bowman <terry.bowman@amd.com> --- drivers/cxl/core/ras.c | 102 +++++++++++++++++++++++++++++++++++++++- drivers/cxl/cxl.h | 17 +++++++ drivers/pci/pci.c | 1 + drivers/pci/pci.h | 6 --- drivers/pci/pcie/aer.c | 42 +---------------- drivers/pci/pcie/rcec.c | 1 + include/linux/aer.h | 2 + include/linux/pci.h | 10 ++++ 8 files changed, 133 insertions(+), 48 deletions(-)