Message ID | 20220404151445.10955-19-Jonathan.Cameron@huawei.com |
---|---|
State | Superseded |
Headers | show |
Series | CXl 2.0 emulation Support | expand |
> On Apr 4, 2022, at 8:14 AM, Jonathan Cameron via <qemu-devel@nongnu.org> wrote: > > From: Ben Widawsky <ben.widawsky@intel.com> > > A device's volatile and persistent memory are known Host Defined Memory > (HDM) regions. The mechanism by which the device is programmed to claim > the addresses associated with those regions is through dedicated logic > known as the HDM decoder. In order to allow the OS to properly program > the HDMs, the HDM decoders must be modeled. > > There are two ways the HDM decoders can be implemented, the legacy > mechanism is through the PCIe DVSEC programming from CXL 1.1 (8.1.3.8), > and MMIO is found in 8.2.5.12 of the spec. For now, 8.1.3.8 is not > implemented. > > Much of CXL device logic is implemented in cxl-utils. The HDM decoder > however is implemented directly by the device implementation. > Whilst the implementation currently does no validity checks on the > encoder set up, future work will add sanity checking specific to > the type of cxl component. > > Signed-off-by: Ben Widawsky <ben.widawsky@intel.com> > Co-developed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> > Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> > Reviewed-by: Alex Bennée <alex.bennee@linaro.org> > --- > hw/mem/cxl_type3.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 55 insertions(+) > > diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c > index 329a6ea2a9..5c93fbbd9b 100644 > --- a/hw/mem/cxl_type3.c > +++ b/hw/mem/cxl_type3.c > @@ -50,6 +50,48 @@ static void build_dvsecs(CXLType3Dev *ct3d) > GPF_DEVICE_DVSEC_REVID, dvsec); > } > > +static void hdm_decoder_commit(CXLType3Dev *ct3d, int which) > +{ > + ComponentRegisters *cregs = &ct3d->cxl_cstate.crb; > + uint32_t *cache_mem = cregs->cache_mem_registers; > + > + assert(which == 0); > + > + /* TODO: Sanity checks that the decoder is possible */ > + ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMIT, 0); > + ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, ERR, 0); > + > + ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMITTED, 1); > +} > + > +static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value, > + unsigned size) > +{ > + CXLComponentState *cxl_cstate = opaque; > + ComponentRegisters *cregs = &cxl_cstate->crb; > + CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate); > + uint32_t *cache_mem = cregs->cache_mem_registers; > + bool should_commit = false; > + int which_hdm = -1; > + > + assert(size == 4); > + g_assert(offset <= CXL2_COMPONENT_CM_REGION_SIZE); > + Looks like this will allow offset == CXL2_COMPONENT_CM_REGION_SIZE to pass the check, and cause a buffer overrun. Shouldn’t this be g_assert(offset< CXL2_COMPONENT_CM_REGION_SIZE)? We also need to make sure (offset + 4<= CXL2_COMPONENT_CM_REGION_SIZE). Or maybe we just need offset +4 <= CXL2_COMPONENT_CM_REGION_SIZE here, if offset < CXL2_COMPONENT_CM_REGION_SIZE is already checked somewhere else. > + switch (offset) { > + case A_CXL_HDM_DECODER0_CTRL: > + should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT); > + which_hdm = 0; > + break; > + default: > + break; > + } > + > + stl_le_p((uint8_t *)cache_mem + offset, value); > + if (should_commit) { > + hdm_decoder_commit(ct3d, which_hdm); > + } > +} > + > static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp) > { > MemoryRegion *mr; > @@ -93,6 +135,9 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp) > ct3d->cxl_cstate.pdev = pci_dev; > build_dvsecs(ct3d); > > + regs->special_ops = g_new0(MemoryRegionOps, 1); > + regs->special_ops->write = ct3d_reg_write; > + > cxl_component_register_block_init(OBJECT(pci_dev), cxl_cstate, > TYPE_CXL_TYPE3); > > @@ -107,6 +152,15 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp) > &ct3d->cxl_dstate.device_registers); > } > > +static void ct3_exit(PCIDevice *pci_dev) > +{ > + CXLType3Dev *ct3d = CXL_TYPE3(pci_dev); > + CXLComponentState *cxl_cstate = &ct3d->cxl_cstate; > + ComponentRegisters *regs = &cxl_cstate->crb; > + > + g_free(regs->special_ops); > +} > + > static void ct3d_reset(DeviceState *dev) > { > CXLType3Dev *ct3d = CXL_TYPE3(dev); > @@ -128,6 +182,7 @@ static void ct3_class_init(ObjectClass *oc, void *data) > PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); > > pc->realize = ct3_realize; > + pc->exit = ct3_exit; > pc->class_id = PCI_CLASS_STORAGE_EXPRESS; > pc->vendor_id = PCI_VENDOR_ID_INTEL; > pc->device_id = 0xd93; /* LVF for now */ > -- > 2.32.0 > > >
On Mon, 4 Apr 2022 12:19:07 -0700 Tong Zhang <ztong0001@gmail.com> wrote: > > On Apr 4, 2022, at 8:14 AM, Jonathan Cameron via <qemu-devel@nongnu.org> wrote: > > > > From: Ben Widawsky <ben.widawsky@intel.com> > > > > A device's volatile and persistent memory are known Host Defined Memory > > (HDM) regions. The mechanism by which the device is programmed to claim > > the addresses associated with those regions is through dedicated logic > > known as the HDM decoder. In order to allow the OS to properly program > > the HDMs, the HDM decoders must be modeled. > > > > There are two ways the HDM decoders can be implemented, the legacy > > mechanism is through the PCIe DVSEC programming from CXL 1.1 (8.1.3.8), > > and MMIO is found in 8.2.5.12 of the spec. For now, 8.1.3.8 is not > > implemented. > > > > Much of CXL device logic is implemented in cxl-utils. The HDM decoder > > however is implemented directly by the device implementation. > > Whilst the implementation currently does no validity checks on the > > encoder set up, future work will add sanity checking specific to > > the type of cxl component. > > > > Signed-off-by: Ben Widawsky <ben.widawsky@intel.com> > > Co-developed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> > > Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> > > Reviewed-by: Alex Bennée <alex.bennee@linaro.org> > > --- > > hw/mem/cxl_type3.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++ > > 1 file changed, 55 insertions(+) > > > > diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c > > index 329a6ea2a9..5c93fbbd9b 100644 > > --- a/hw/mem/cxl_type3.c > > +++ b/hw/mem/cxl_type3.c ... > > + > > +static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value, > > + unsigned size) > > +{ > > + CXLComponentState *cxl_cstate = opaque; > > + ComponentRegisters *cregs = &cxl_cstate->crb; > > + CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate); > > + uint32_t *cache_mem = cregs->cache_mem_registers; > > + bool should_commit = false; > > + int which_hdm = -1; > > + > > + assert(size == 4); > > + g_assert(offset <= CXL2_COMPONENT_CM_REGION_SIZE); > > + > > Looks like this will allow offset == CXL2_COMPONENT_CM_REGION_SIZE to pass the check, and cause a buffer overrun. > Shouldn’t this be g_assert(offset< CXL2_COMPONENT_CM_REGION_SIZE)? Good point. Silly mistake. I'll fix for v10. > We also need to make sure (offset + 4<= CXL2_COMPONENT_CM_REGION_SIZE). > Or maybe we just need offset +4 <= CXL2_COMPONENT_CM_REGION_SIZE here, if offset < CXL2_COMPONENT_CM_REGION_SIZE is already checked somewhere else. I think we are fine without these two because in cxl-components-utils we restrict the accesses to aligned only. Jonathan > > > + switch (offset) { > > + case A_CXL_HDM_DECODER0_CTRL: > > + should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT); > > + which_hdm = 0; > > + break; > > + default: > > + break; > > + } > > + > > + stl_le_p((uint8_t *)cache_mem + offset, value); > > + if (should_commit) { > > + hdm_decoder_commit(ct3d, which_hdm); > > + } > > +}
diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c index 329a6ea2a9..5c93fbbd9b 100644 --- a/hw/mem/cxl_type3.c +++ b/hw/mem/cxl_type3.c @@ -50,6 +50,48 @@ static void build_dvsecs(CXLType3Dev *ct3d) GPF_DEVICE_DVSEC_REVID, dvsec); } +static void hdm_decoder_commit(CXLType3Dev *ct3d, int which) +{ + ComponentRegisters *cregs = &ct3d->cxl_cstate.crb; + uint32_t *cache_mem = cregs->cache_mem_registers; + + assert(which == 0); + + /* TODO: Sanity checks that the decoder is possible */ + ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMIT, 0); + ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, ERR, 0); + + ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMITTED, 1); +} + +static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + CXLComponentState *cxl_cstate = opaque; + ComponentRegisters *cregs = &cxl_cstate->crb; + CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate); + uint32_t *cache_mem = cregs->cache_mem_registers; + bool should_commit = false; + int which_hdm = -1; + + assert(size == 4); + g_assert(offset <= CXL2_COMPONENT_CM_REGION_SIZE); + + switch (offset) { + case A_CXL_HDM_DECODER0_CTRL: + should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT); + which_hdm = 0; + break; + default: + break; + } + + stl_le_p((uint8_t *)cache_mem + offset, value); + if (should_commit) { + hdm_decoder_commit(ct3d, which_hdm); + } +} + static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp) { MemoryRegion *mr; @@ -93,6 +135,9 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp) ct3d->cxl_cstate.pdev = pci_dev; build_dvsecs(ct3d); + regs->special_ops = g_new0(MemoryRegionOps, 1); + regs->special_ops->write = ct3d_reg_write; + cxl_component_register_block_init(OBJECT(pci_dev), cxl_cstate, TYPE_CXL_TYPE3); @@ -107,6 +152,15 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp) &ct3d->cxl_dstate.device_registers); } +static void ct3_exit(PCIDevice *pci_dev) +{ + CXLType3Dev *ct3d = CXL_TYPE3(pci_dev); + CXLComponentState *cxl_cstate = &ct3d->cxl_cstate; + ComponentRegisters *regs = &cxl_cstate->crb; + + g_free(regs->special_ops); +} + static void ct3d_reset(DeviceState *dev) { CXLType3Dev *ct3d = CXL_TYPE3(dev); @@ -128,6 +182,7 @@ static void ct3_class_init(ObjectClass *oc, void *data) PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); pc->realize = ct3_realize; + pc->exit = ct3_exit; pc->class_id = PCI_CLASS_STORAGE_EXPRESS; pc->vendor_id = PCI_VENDOR_ID_INTEL; pc->device_id = 0xd93; /* LVF for now */