Message ID | 1552300155-25216-7-git-send-email-yuval.shaia@oracle.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Misc fixes to pvrdma device | expand |
On 3/11/19 12:29 PM, Yuval Shaia wrote: > When device is going down free all saved MAD buffers. > > Signed-off-by: Yuval Shaia <yuval.shaia@oracle.com> > Reviewed-by: Marcel Apfelbaum <marcel.apfelbaum@gmail.com> > --- > hw/rdma/rdma_backend.c | 34 +++++++++++++++++++++++++++++++++- > hw/rdma/vmw/pvrdma_main.c | 2 ++ > 2 files changed, 35 insertions(+), 1 deletion(-) > > diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c > index e8af974..d0bbe57 100644 > --- a/hw/rdma/rdma_backend.c > +++ b/hw/rdma/rdma_backend.c > @@ -64,6 +64,33 @@ static inline void complete_work(enum ibv_wc_status status, uint32_t vendor_err, > comp_handler(ctx, &wc); > } > > +static void free_cqe_ctx(gpointer data, gpointer user_data) > +{ > + BackendCtx *bctx; > + RdmaDeviceResources *rdma_dev_res = user_data; ^^^^ No need to do casting for the above assignment "(RdmaDeviceResources *)"? > + unsigned long cqe_ctx_id = GPOINTER_TO_INT(data); > + > + bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, cqe_ctx_id); > + if (bctx) { > + rdma_rm_dealloc_cqe_ctx(rdma_dev_res, cqe_ctx_id); > + } > + g_free(bctx); > +} > + > +static void clean_recv_mads(RdmaBackendDev *backend_dev) > +{ > + unsigned long cqe_ctx_id; > + > + do { > + cqe_ctx_id = rdma_protected_qlist_pop_int64(&backend_dev-> > + recv_mads_list); > + if (cqe_ctx_id != -ENOENT) { > + free_cqe_ctx(GINT_TO_POINTER(cqe_ctx_id), > + backend_dev->rdma_dev_res); > + } > + } while (cqe_ctx_id != -ENOENT); > +} > + > static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq) > { > int i, ne, total_ne = 0; > @@ -1037,6 +1064,11 @@ static int mad_init(RdmaBackendDev *backend_dev, CharBackend *mad_chr_be) > return 0; > } > > +static void mad_stop(RdmaBackendDev *backend_dev) > +{ > + clean_recv_mads(backend_dev); > +} > + > static void mad_fini(RdmaBackendDev *backend_dev) > { > disable_rdmacm_mux_async(backend_dev); > @@ -1224,12 +1256,12 @@ void rdma_backend_start(RdmaBackendDev *backend_dev) > > void rdma_backend_stop(RdmaBackendDev *backend_dev) > { > + mad_stop(backend_dev); > stop_backend_thread(&backend_dev->comp_thread); > } > > void rdma_backend_fini(RdmaBackendDev *backend_dev) > { > - rdma_backend_stop(backend_dev); > mad_fini(backend_dev); > g_hash_table_destroy(ah_hash); > ibv_destroy_comp_channel(backend_dev->channel); > diff --git a/hw/rdma/vmw/pvrdma_main.c b/hw/rdma/vmw/pvrdma_main.c > index 729a2df..04845f4 100644 > --- a/hw/rdma/vmw/pvrdma_main.c > +++ b/hw/rdma/vmw/pvrdma_main.c > @@ -313,6 +313,8 @@ static void pvrdma_fini(PCIDevice *pdev) > > pvrdma_qp_ops_fini(); > > + rdma_backend_stop(&dev->backend_dev); > + > rdma_rm_fini(&dev->rdma_dev_res, &dev->backend_dev, > dev->backend_eth_device_name); > >
On Tue, Mar 12, 2019 at 12:08:53PM +0200, Kamal Heib wrote: > > > On 3/11/19 12:29 PM, Yuval Shaia wrote: > > When device is going down free all saved MAD buffers. > > > > Signed-off-by: Yuval Shaia <yuval.shaia@oracle.com> > > Reviewed-by: Marcel Apfelbaum <marcel.apfelbaum@gmail.com> > > --- > > hw/rdma/rdma_backend.c | 34 +++++++++++++++++++++++++++++++++- > > hw/rdma/vmw/pvrdma_main.c | 2 ++ > > 2 files changed, 35 insertions(+), 1 deletion(-) > > > > diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c > > index e8af974..d0bbe57 100644 > > --- a/hw/rdma/rdma_backend.c > > +++ b/hw/rdma/rdma_backend.c > > @@ -64,6 +64,33 @@ static inline void complete_work(enum ibv_wc_status status, uint32_t vendor_err, > > comp_handler(ctx, &wc); > > } > > > > +static void free_cqe_ctx(gpointer data, gpointer user_data) > > +{ > > + BackendCtx *bctx; > > + RdmaDeviceResources *rdma_dev_res = user_data; > ^^^^ > No need to do casting for the above assignment "(RdmaDeviceResources *)"? Compiler didn't gave any error so i skipped it. > > > > + unsigned long cqe_ctx_id = GPOINTER_TO_INT(data); > > + > > + bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, cqe_ctx_id); > > + if (bctx) { > > + rdma_rm_dealloc_cqe_ctx(rdma_dev_res, cqe_ctx_id); > > + } > > + g_free(bctx); > > +} > > + > > +static void clean_recv_mads(RdmaBackendDev *backend_dev) > > +{ > > + unsigned long cqe_ctx_id; > > + > > + do { > > + cqe_ctx_id = rdma_protected_qlist_pop_int64(&backend_dev-> > > + recv_mads_list); > > + if (cqe_ctx_id != -ENOENT) { > > + free_cqe_ctx(GINT_TO_POINTER(cqe_ctx_id), > > + backend_dev->rdma_dev_res); > > + } > > + } while (cqe_ctx_id != -ENOENT); > > +} > > + > > static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq) > > { > > int i, ne, total_ne = 0; > > @@ -1037,6 +1064,11 @@ static int mad_init(RdmaBackendDev *backend_dev, CharBackend *mad_chr_be) > > return 0; > > } > > > > +static void mad_stop(RdmaBackendDev *backend_dev) > > +{ > > + clean_recv_mads(backend_dev); > > +} > > + > > static void mad_fini(RdmaBackendDev *backend_dev) > > { > > disable_rdmacm_mux_async(backend_dev); > > @@ -1224,12 +1256,12 @@ void rdma_backend_start(RdmaBackendDev *backend_dev) > > > > void rdma_backend_stop(RdmaBackendDev *backend_dev) > > { > > + mad_stop(backend_dev); > > stop_backend_thread(&backend_dev->comp_thread); > > } > > > > void rdma_backend_fini(RdmaBackendDev *backend_dev) > > { > > - rdma_backend_stop(backend_dev); > > mad_fini(backend_dev); > > g_hash_table_destroy(ah_hash); > > ibv_destroy_comp_channel(backend_dev->channel); > > diff --git a/hw/rdma/vmw/pvrdma_main.c b/hw/rdma/vmw/pvrdma_main.c > > index 729a2df..04845f4 100644 > > --- a/hw/rdma/vmw/pvrdma_main.c > > +++ b/hw/rdma/vmw/pvrdma_main.c > > @@ -313,6 +313,8 @@ static void pvrdma_fini(PCIDevice *pdev) > > > > pvrdma_qp_ops_fini(); > > > > + rdma_backend_stop(&dev->backend_dev); > > + > > rdma_rm_fini(&dev->rdma_dev_res, &dev->backend_dev, > > dev->backend_eth_device_name); > > > > >
On 3/12/19 1:56 PM, Yuval Shaia wrote: > On Tue, Mar 12, 2019 at 12:08:53PM +0200, Kamal Heib wrote: >> >> >> On 3/11/19 12:29 PM, Yuval Shaia wrote: >>> When device is going down free all saved MAD buffers. >>> >>> Signed-off-by: Yuval Shaia <yuval.shaia@oracle.com> >>> Reviewed-by: Marcel Apfelbaum <marcel.apfelbaum@gmail.com> >>> --- >>> hw/rdma/rdma_backend.c | 34 +++++++++++++++++++++++++++++++++- >>> hw/rdma/vmw/pvrdma_main.c | 2 ++ >>> 2 files changed, 35 insertions(+), 1 deletion(-) >>> >>> diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c >>> index e8af974..d0bbe57 100644 >>> --- a/hw/rdma/rdma_backend.c >>> +++ b/hw/rdma/rdma_backend.c >>> @@ -64,6 +64,33 @@ static inline void complete_work(enum ibv_wc_status status, uint32_t vendor_err, >>> comp_handler(ctx, &wc); >>> } >>> >>> +static void free_cqe_ctx(gpointer data, gpointer user_data) >>> +{ >>> + BackendCtx *bctx; >>> + RdmaDeviceResources *rdma_dev_res = user_data; >> ^^^^ >> No need to do casting for the above assignment "(RdmaDeviceResources *)"? > > Compiler didn't gave any error so i skipped it. > OK, Other than that: Reviewed-by: Kamal Heib <kamalheib1@gmail.com> >> >> >>> + unsigned long cqe_ctx_id = GPOINTER_TO_INT(data); >>> + >>> + bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, cqe_ctx_id); >>> + if (bctx) { >>> + rdma_rm_dealloc_cqe_ctx(rdma_dev_res, cqe_ctx_id); >>> + } >>> + g_free(bctx); >>> +} >>> + >>> +static void clean_recv_mads(RdmaBackendDev *backend_dev) >>> +{ >>> + unsigned long cqe_ctx_id; >>> + >>> + do { >>> + cqe_ctx_id = rdma_protected_qlist_pop_int64(&backend_dev-> >>> + recv_mads_list); >>> + if (cqe_ctx_id != -ENOENT) { >>> + free_cqe_ctx(GINT_TO_POINTER(cqe_ctx_id), >>> + backend_dev->rdma_dev_res); >>> + } >>> + } while (cqe_ctx_id != -ENOENT); >>> +} >>> + >>> static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq) >>> { >>> int i, ne, total_ne = 0; >>> @@ -1037,6 +1064,11 @@ static int mad_init(RdmaBackendDev *backend_dev, CharBackend *mad_chr_be) >>> return 0; >>> } >>> >>> +static void mad_stop(RdmaBackendDev *backend_dev) >>> +{ >>> + clean_recv_mads(backend_dev); >>> +} >>> + >>> static void mad_fini(RdmaBackendDev *backend_dev) >>> { >>> disable_rdmacm_mux_async(backend_dev); >>> @@ -1224,12 +1256,12 @@ void rdma_backend_start(RdmaBackendDev *backend_dev) >>> >>> void rdma_backend_stop(RdmaBackendDev *backend_dev) >>> { >>> + mad_stop(backend_dev); >>> stop_backend_thread(&backend_dev->comp_thread); >>> } >>> >>> void rdma_backend_fini(RdmaBackendDev *backend_dev) >>> { >>> - rdma_backend_stop(backend_dev); >>> mad_fini(backend_dev); >>> g_hash_table_destroy(ah_hash); >>> ibv_destroy_comp_channel(backend_dev->channel); >>> diff --git a/hw/rdma/vmw/pvrdma_main.c b/hw/rdma/vmw/pvrdma_main.c >>> index 729a2df..04845f4 100644 >>> --- a/hw/rdma/vmw/pvrdma_main.c >>> +++ b/hw/rdma/vmw/pvrdma_main.c >>> @@ -313,6 +313,8 @@ static void pvrdma_fini(PCIDevice *pdev) >>> >>> pvrdma_qp_ops_fini(); >>> >>> + rdma_backend_stop(&dev->backend_dev); >>> + >>> rdma_rm_fini(&dev->rdma_dev_res, &dev->backend_dev, >>> dev->backend_eth_device_name); >>> >>> >> >
diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c index e8af974..d0bbe57 100644 --- a/hw/rdma/rdma_backend.c +++ b/hw/rdma/rdma_backend.c @@ -64,6 +64,33 @@ static inline void complete_work(enum ibv_wc_status status, uint32_t vendor_err, comp_handler(ctx, &wc); } +static void free_cqe_ctx(gpointer data, gpointer user_data) +{ + BackendCtx *bctx; + RdmaDeviceResources *rdma_dev_res = user_data; + unsigned long cqe_ctx_id = GPOINTER_TO_INT(data); + + bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, cqe_ctx_id); + if (bctx) { + rdma_rm_dealloc_cqe_ctx(rdma_dev_res, cqe_ctx_id); + } + g_free(bctx); +} + +static void clean_recv_mads(RdmaBackendDev *backend_dev) +{ + unsigned long cqe_ctx_id; + + do { + cqe_ctx_id = rdma_protected_qlist_pop_int64(&backend_dev-> + recv_mads_list); + if (cqe_ctx_id != -ENOENT) { + free_cqe_ctx(GINT_TO_POINTER(cqe_ctx_id), + backend_dev->rdma_dev_res); + } + } while (cqe_ctx_id != -ENOENT); +} + static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq) { int i, ne, total_ne = 0; @@ -1037,6 +1064,11 @@ static int mad_init(RdmaBackendDev *backend_dev, CharBackend *mad_chr_be) return 0; } +static void mad_stop(RdmaBackendDev *backend_dev) +{ + clean_recv_mads(backend_dev); +} + static void mad_fini(RdmaBackendDev *backend_dev) { disable_rdmacm_mux_async(backend_dev); @@ -1224,12 +1256,12 @@ void rdma_backend_start(RdmaBackendDev *backend_dev) void rdma_backend_stop(RdmaBackendDev *backend_dev) { + mad_stop(backend_dev); stop_backend_thread(&backend_dev->comp_thread); } void rdma_backend_fini(RdmaBackendDev *backend_dev) { - rdma_backend_stop(backend_dev); mad_fini(backend_dev); g_hash_table_destroy(ah_hash); ibv_destroy_comp_channel(backend_dev->channel); diff --git a/hw/rdma/vmw/pvrdma_main.c b/hw/rdma/vmw/pvrdma_main.c index 729a2df..04845f4 100644 --- a/hw/rdma/vmw/pvrdma_main.c +++ b/hw/rdma/vmw/pvrdma_main.c @@ -313,6 +313,8 @@ static void pvrdma_fini(PCIDevice *pdev) pvrdma_qp_ops_fini(); + rdma_backend_stop(&dev->backend_dev); + rdma_rm_fini(&dev->rdma_dev_res, &dev->backend_dev, dev->backend_eth_device_name);