Message ID | 20211014185400.10451-2-nchatrad@amd.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [v4,1/4] x86/amd_nb: Add support for northbridges on Aldebaran | expand |
On Fri, Oct 15, 2021 at 12:23:57AM +0530, Naveen Krishna Chatradhi wrote: > From: Muralidhara M K <muralimk@amd.com> > > On newer systems the CPUs manage MCA errors reported from the GPUs. > Enumerate the GPU nodes with the AMD NB framework to support EDAC. > > GPU nodes are enumerated in sequential order based on the PCI hierarchy, > and the first GPU node is assumed to have an "AMD Node ID" value after > CPU Nodes are fully populated. > > Aldebaran is an AMD GPU, GPU drivers are part of the DRM framework > https://lists.freedesktop.org/archives/amd-gfx/2021-February/059694.html > > Each Aldebaran GPU has 2 Data Fabrics, which are enumerated as 2 nodes. > With this implementation detail, the Data Fabric on the GPU nodes can be > accessed the same way as the Data Fabric on CPU nodes. > > Special handling was necessary in northbridge enumeration as the > roots_per_misc value is different for GPU and CPU nodes. > > Signed-off-by: Muralidhara M K <muralimk@amd.com> > Co-developed-by: Naveen Krishna Chatradhi <nchatrad@amd.com> > Signed-off-by: Naveen Krishna Chatradhi <nchatrad@amd.com> > Link: https://lkml.kernel.org/r/20210823185437.94417-2-nchatrad@amd.com > --- > Changes since v3: > 1. Use word "gpu" instead of "noncpu" in the patch > 2. Do not create pci_dev_ids arrays for gpu nodes > 3. Identify the gpu node start index from DF18F1 registers on the GPU nodes. > a. Export cpu node count and gpu start node id > > Changes since v2: > 1. Added Reviewed-by Yazen Ghannam > > Changes since v1: > 1. Modified the commit message and comments in the code > 2. Squashed patch 1/7: "x86/amd_nb: Add Aldebaran device to PCI IDs" > > arch/x86/include/asm/amd_nb.h | 9 +++ > arch/x86/kernel/amd_nb.c | 131 ++++++++++++++++++++++++++++------ > include/linux/pci_ids.h | 1 + > 3 files changed, 118 insertions(+), 23 deletions(-) > > diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h > index 455066a06f60..5898300f11ed 100644 > --- a/arch/x86/include/asm/amd_nb.h > +++ b/arch/x86/include/asm/amd_nb.h > @@ -68,10 +68,17 @@ struct amd_northbridge { > struct threshold_bank *bank4; > }; > > +/* heterogeneous system node type map variables */ > +struct amd_node_map { > + u16 gpu_node_start_id; > + u16 cpu_node_count; > +}; > + > struct amd_northbridge_info { > u16 num; > u64 flags; > struct amd_northbridge *nb; > + struct amd_node_map *nmap; Just a minor nit, but does the name "nmap" conflict with anything in the kernel? At first glance it looks like "network" map. > }; > > #define AMD_NB_GART BIT(0) > @@ -83,6 +90,8 @@ struct amd_northbridge_info { > u16 amd_nb_num(void); > bool amd_nb_has_feature(unsigned int feature); > struct amd_northbridge *node_to_amd_nb(int node); > +u16 amd_gpu_node_start_id(void); > +u16 amd_cpu_node_count(void); > > static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev) > { > diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c > index c92c9c774c0e..54a6a7462f07 100644 > --- a/arch/x86/kernel/amd_nb.c > +++ b/arch/x86/kernel/amd_nb.c > @@ -19,6 +19,7 @@ > #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0 > #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480 > #define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630 > +#define PCI_DEVICE_ID_AMD_ALDEBARAN_ROOT 0x14bb > #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 > #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec > #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494 > @@ -28,6 +29,7 @@ > #define PCI_DEVICE_ID_AMD_19H_M40H_ROOT 0x14b5 > #define PCI_DEVICE_ID_AMD_19H_M40H_DF_F4 0x167d > #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e > +#define PCI_DEVICE_ID_AMD_ALDEBARAN_DF_F4 0x14d4 > > /* Protect the PCI config register pairs used for SMN and DF indirect access. */ > static DEFINE_MUTEX(smn_mutex); > @@ -40,6 +42,7 @@ static const struct pci_device_id amd_root_ids[] = { > { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, > { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) }, > { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) }, > + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_ALDEBARAN_ROOT) }, > {} > }; > > @@ -63,6 +66,7 @@ static const struct pci_device_id amd_nb_misc_ids[] = { > { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) }, > { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) }, > { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) }, > + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_ALDEBARAN_DF_F3) }, > {} > }; > > @@ -81,6 +85,7 @@ static const struct pci_device_id amd_nb_link_ids[] = { > { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) }, > { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) }, > { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, > + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_ALDEBARAN_DF_F4) }, > {} > }; > > @@ -126,6 +131,55 @@ struct amd_northbridge *node_to_amd_nb(int node) > } > EXPORT_SYMBOL_GPL(node_to_amd_nb); > > +/* > + * GPU start index and CPU count values on an heterogeneous system, > + * these values will be used by the AMD EDAC and MCE modules. > + */ > +u16 amd_gpu_node_start_id(void) > +{ > + return (amd_northbridges.nmap) ? > + amd_northbridges.nmap->gpu_node_start_id : 0; > +} > +EXPORT_SYMBOL_GPL(amd_gpu_node_start_id); > + > +u16 amd_cpu_node_count(void) > +{ > + return (amd_northbridges.nmap) ? > + amd_northbridges.nmap->cpu_node_count : amd_northbridges.num; > +} > +EXPORT_SYMBOL_GPL(amd_cpu_node_count); > + > +/* DF18xF1 regsters on Aldebaran GPU */ > +#define REG_LOCAL_NODE_TYPE_MAP 0x144 > +#define REG_RMT_NODE_TYPE_MAP 0x148 > + > +#define PCI_DEVICE_ID_AMD_ALDEBARAN_DF_F1 0x14d1 > + > +static int amd_get_node_map(void) > +{ > + struct amd_node_map *np; > + struct pci_dev *pdev = NULL; > + u32 tmp; > + These lines should be ordered from longest to shortest. You could even combine the "struct pci_dev" line with the line below. Just pass NULL to pci_get_device(). > + pdev = pci_get_device(PCI_VENDOR_ID_AMD, > + PCI_DEVICE_ID_AMD_ALDEBARAN_DF_F1, pdev); > + if (!pdev) > + return -ENODEV; > + > + np = kmalloc(sizeof(*np), GFP_KERNEL); > + if (!np) > + return -ENOMEM; > + > + pci_read_config_dword(pdev, REG_LOCAL_NODE_TYPE_MAP, &tmp); > + np->gpu_node_start_id = tmp & 0xFFF; > + > + pci_read_config_dword(pdev, REG_RMT_NODE_TYPE_MAP, &tmp); > + np->cpu_node_count = tmp >> 16 & 0xFFF; > + The PCI device, register offsets, and bit fields all look correct. I think a comment with explanation will be helpful. Something that mentions how some DF devices have these registers with "Base Node ID" and a "Node Count" fields. "Local Node Type" refers to nodes with the same type as that from which the register is read, and "Remote Node Type" refers to nodes with a different type. So if you read the registers from a GPU node, then "Local" refers to GPU nodes and "Remote" refers to CPU nodes, and vice versa. Since this information is only needed with we have CPU+GPU system, we only need to gather it when we find a GPU device. > + amd_northbridges.nmap = np; > + return 0; > +} > + > static struct pci_dev *next_northbridge(struct pci_dev *dev, > const struct pci_device_id *ids) > { > @@ -230,6 +284,27 @@ int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo) > } > EXPORT_SYMBOL_GPL(amd_df_indirect_read); > > +struct pci_dev *get_root_devs(struct pci_dev *root, > + const struct pci_device_id *root_ids, > + u16 roots_per_misc) > +{ > + u16 j; > + > + /* > + * If there are more PCI root devices than data fabric/ > + * system management network interfaces, then the (N) > + * PCI roots per DF/SMN interface are functionally the > + * same (for DF/SMN access) and N-1 are redundant. N-1 > + * PCI roots should be skipped per DF/SMN interface so > + * the following DF/SMN interfaces get mapped to > + * correct PCI roots. > + */ > + for (j = 0; j < roots_per_misc; j++) > + root = next_northbridge(root, root_ids); > + > + return root; > +} > + > int amd_cache_northbridges(void) > { > const struct pci_device_id *misc_ids = amd_nb_misc_ids; > @@ -237,10 +312,10 @@ int amd_cache_northbridges(void) > const struct pci_device_id *root_ids = amd_root_ids; > struct pci_dev *root, *misc, *link; > struct amd_northbridge *nb; > - u16 roots_per_misc = 0; > - u16 misc_count = 0; > - u16 root_count = 0; > - u16 i, j; > + u16 roots_per_misc = 0, gpu_roots_per_misc = 0; > + u16 misc_count = 0, gpu_misc_count = 0; > + u16 root_count = 0, gpu_root_count = 0; > + u16 i; > > if (amd_northbridges.num) > return 0; > @@ -252,15 +327,23 @@ int amd_cache_northbridges(void) > } > > misc = NULL; > - while ((misc = next_northbridge(misc, misc_ids)) != NULL) > - misc_count++; > + while ((misc = next_northbridge(misc, misc_ids)) != NULL) { > + if (misc->device == PCI_DEVICE_ID_AMD_ALDEBARAN_DF_F3) I think this may need to be extended for future devices. In which case, it may make sense to go back to the original solution of having another list of IDs just for GPUs. I can't say for sure though. So maybe we keep this how you have it and deal with future systems when we come to them. > + gpu_misc_count++; > + else > + misc_count++; > + } > > if (!misc_count) > return -ENODEV; > > root = NULL; > - while ((root = next_northbridge(root, root_ids)) != NULL) > - root_count++; > + while ((root = next_northbridge(root, root_ids)) != NULL) { > + if (root->device == PCI_DEVICE_ID_AMD_ALDEBARAN_ROOT) > + gpu_root_count++; > + else > + root_count++; > + } > > if (root_count) { > roots_per_misc = root_count / misc_count; > @@ -275,33 +358,35 @@ int amd_cache_northbridges(void) > } > } > > - nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL); > + /* > + * The number of miscs, roots and roots_per_misc might vary on different > + * nodes of a heterogeneous system. > + * calculate roots_per_misc accordingly in order to skip the redundant Capitalize "calculate". > + * roots and map the DF/SMN interfaces to correct PCI roots. > + */ > + if (gpu_root_count && gpu_misc_count) { > + if (amd_get_node_map()) > + return -ENOMEM; amd_get_node_map() can return ENODEV and ENOMEM, but only ENOMEM is passed along here. I'm not sure that the ENODEV case is necessary. I think you can just return silently if the GPU PCI ID is not found. In this case, the nmap structure won't be set, so the code will act as if the system is CPU-only. Or you can save the return value from amd_get_node_map() and return that. Maybe this would be the more conservative behavior. We want to give an error if we found some GPU devices, but we didn't find the one device that we need to gather the node map info. Thanks, Yazen
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index 455066a06f60..5898300f11ed 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -68,10 +68,17 @@ struct amd_northbridge { struct threshold_bank *bank4; }; +/* heterogeneous system node type map variables */ +struct amd_node_map { + u16 gpu_node_start_id; + u16 cpu_node_count; +}; + struct amd_northbridge_info { u16 num; u64 flags; struct amd_northbridge *nb; + struct amd_node_map *nmap; }; #define AMD_NB_GART BIT(0) @@ -83,6 +90,8 @@ struct amd_northbridge_info { u16 amd_nb_num(void); bool amd_nb_has_feature(unsigned int feature); struct amd_northbridge *node_to_amd_nb(int node); +u16 amd_gpu_node_start_id(void); +u16 amd_cpu_node_count(void); static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev) { diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index c92c9c774c0e..54a6a7462f07 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -19,6 +19,7 @@ #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0 #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480 #define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630 +#define PCI_DEVICE_ID_AMD_ALDEBARAN_ROOT 0x14bb #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494 @@ -28,6 +29,7 @@ #define PCI_DEVICE_ID_AMD_19H_M40H_ROOT 0x14b5 #define PCI_DEVICE_ID_AMD_19H_M40H_DF_F4 0x167d #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e +#define PCI_DEVICE_ID_AMD_ALDEBARAN_DF_F4 0x14d4 /* Protect the PCI config register pairs used for SMN and DF indirect access. */ static DEFINE_MUTEX(smn_mutex); @@ -40,6 +42,7 @@ static const struct pci_device_id amd_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_ALDEBARAN_ROOT) }, {} }; @@ -63,6 +66,7 @@ static const struct pci_device_id amd_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_ALDEBARAN_DF_F3) }, {} }; @@ -81,6 +85,7 @@ static const struct pci_device_id amd_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_ALDEBARAN_DF_F4) }, {} }; @@ -126,6 +131,55 @@ struct amd_northbridge *node_to_amd_nb(int node) } EXPORT_SYMBOL_GPL(node_to_amd_nb); +/* + * GPU start index and CPU count values on an heterogeneous system, + * these values will be used by the AMD EDAC and MCE modules. + */ +u16 amd_gpu_node_start_id(void) +{ + return (amd_northbridges.nmap) ? + amd_northbridges.nmap->gpu_node_start_id : 0; +} +EXPORT_SYMBOL_GPL(amd_gpu_node_start_id); + +u16 amd_cpu_node_count(void) +{ + return (amd_northbridges.nmap) ? + amd_northbridges.nmap->cpu_node_count : amd_northbridges.num; +} +EXPORT_SYMBOL_GPL(amd_cpu_node_count); + +/* DF18xF1 regsters on Aldebaran GPU */ +#define REG_LOCAL_NODE_TYPE_MAP 0x144 +#define REG_RMT_NODE_TYPE_MAP 0x148 + +#define PCI_DEVICE_ID_AMD_ALDEBARAN_DF_F1 0x14d1 + +static int amd_get_node_map(void) +{ + struct amd_node_map *np; + struct pci_dev *pdev = NULL; + u32 tmp; + + pdev = pci_get_device(PCI_VENDOR_ID_AMD, + PCI_DEVICE_ID_AMD_ALDEBARAN_DF_F1, pdev); + if (!pdev) + return -ENODEV; + + np = kmalloc(sizeof(*np), GFP_KERNEL); + if (!np) + return -ENOMEM; + + pci_read_config_dword(pdev, REG_LOCAL_NODE_TYPE_MAP, &tmp); + np->gpu_node_start_id = tmp & 0xFFF; + + pci_read_config_dword(pdev, REG_RMT_NODE_TYPE_MAP, &tmp); + np->cpu_node_count = tmp >> 16 & 0xFFF; + + amd_northbridges.nmap = np; + return 0; +} + static struct pci_dev *next_northbridge(struct pci_dev *dev, const struct pci_device_id *ids) { @@ -230,6 +284,27 @@ int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo) } EXPORT_SYMBOL_GPL(amd_df_indirect_read); +struct pci_dev *get_root_devs(struct pci_dev *root, + const struct pci_device_id *root_ids, + u16 roots_per_misc) +{ + u16 j; + + /* + * If there are more PCI root devices than data fabric/ + * system management network interfaces, then the (N) + * PCI roots per DF/SMN interface are functionally the + * same (for DF/SMN access) and N-1 are redundant. N-1 + * PCI roots should be skipped per DF/SMN interface so + * the following DF/SMN interfaces get mapped to + * correct PCI roots. + */ + for (j = 0; j < roots_per_misc; j++) + root = next_northbridge(root, root_ids); + + return root; +} + int amd_cache_northbridges(void) { const struct pci_device_id *misc_ids = amd_nb_misc_ids; @@ -237,10 +312,10 @@ int amd_cache_northbridges(void) const struct pci_device_id *root_ids = amd_root_ids; struct pci_dev *root, *misc, *link; struct amd_northbridge *nb; - u16 roots_per_misc = 0; - u16 misc_count = 0; - u16 root_count = 0; - u16 i, j; + u16 roots_per_misc = 0, gpu_roots_per_misc = 0; + u16 misc_count = 0, gpu_misc_count = 0; + u16 root_count = 0, gpu_root_count = 0; + u16 i; if (amd_northbridges.num) return 0; @@ -252,15 +327,23 @@ int amd_cache_northbridges(void) } misc = NULL; - while ((misc = next_northbridge(misc, misc_ids)) != NULL) - misc_count++; + while ((misc = next_northbridge(misc, misc_ids)) != NULL) { + if (misc->device == PCI_DEVICE_ID_AMD_ALDEBARAN_DF_F3) + gpu_misc_count++; + else + misc_count++; + } if (!misc_count) return -ENODEV; root = NULL; - while ((root = next_northbridge(root, root_ids)) != NULL) - root_count++; + while ((root = next_northbridge(root, root_ids)) != NULL) { + if (root->device == PCI_DEVICE_ID_AMD_ALDEBARAN_ROOT) + gpu_root_count++; + else + root_count++; + } if (root_count) { roots_per_misc = root_count / misc_count; @@ -275,33 +358,35 @@ int amd_cache_northbridges(void) } } - nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL); + /* + * The number of miscs, roots and roots_per_misc might vary on different + * nodes of a heterogeneous system. + * calculate roots_per_misc accordingly in order to skip the redundant + * roots and map the DF/SMN interfaces to correct PCI roots. + */ + if (gpu_root_count && gpu_misc_count) { + if (amd_get_node_map()) + return -ENOMEM; + + gpu_roots_per_misc = gpu_root_count / gpu_misc_count; + } + + amd_northbridges.num = misc_count + gpu_misc_count; + nb = kcalloc(amd_northbridges.num, sizeof(struct amd_northbridge), GFP_KERNEL); if (!nb) return -ENOMEM; amd_northbridges.nb = nb; - amd_northbridges.num = misc_count; link = misc = root = NULL; for (i = 0; i < amd_northbridges.num; i++) { + u16 misc_roots = i < misc_count ? roots_per_misc : gpu_roots_per_misc; node_to_amd_nb(i)->root = root = - next_northbridge(root, root_ids); + get_root_devs(root, root_ids, misc_roots); node_to_amd_nb(i)->misc = misc = next_northbridge(misc, misc_ids); node_to_amd_nb(i)->link = link = next_northbridge(link, link_ids); - - /* - * If there are more PCI root devices than data fabric/ - * system management network interfaces, then the (N) - * PCI roots per DF/SMN interface are functionally the - * same (for DF/SMN access) and N-1 are redundant. N-1 - * PCI roots should be skipped per DF/SMN interface so - * the following DF/SMN interfaces get mapped to - * correct PCI roots. - */ - for (j = 1; j < roots_per_misc; j++) - root = next_northbridge(root, root_ids); } if (amd_gart_present()) diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 011f2f1ea5bb..b3a0ec29dbd6 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -557,6 +557,7 @@ #define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653 #define PCI_DEVICE_ID_AMD_19H_M40H_DF_F3 0x167c #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F3 0x166d +#define PCI_DEVICE_ID_AMD_ALDEBARAN_DF_F3 0x14d3 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001