Message ID | 1485301777-3465-6-git-send-email-dhinakaran.pandiyan@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Tue, Jan 24, 2017 at 03:49:33PM -0800, Dhinakaran Pandiyan wrote: > Link bandwidth is shared between multiple display streams in DP MST > configurations. The DP MST topology manager structure maintains the > shared link bandwidth for a primary link directly connected to the GPU. For > atomic modesetting drivers, checking if there is sufficient link bandwidth > for a mode needs to be done during the atomic_check phase to avoid failed > modesets. Let's encsapsulate the available link bw information in a > private state structure so that bw can be allocated and released atomically > for each of the ports sharing the primary link. > > Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com> Yeah, I like how this neatly hides all the atomic state tracking now, and just leaves the mst specific logic in the mst helpers. > --- > drivers/gpu/drm/drm_dp_mst_topology.c | 60 +++++++++++++++++++++++++++++++++++ > include/drm/drm_dp_mst_helper.h | 20 ++++++++++++ > 2 files changed, 80 insertions(+) > > diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c > index b871d4e..247286b 100644 > --- a/drivers/gpu/drm/drm_dp_mst_topology.c > +++ b/drivers/gpu/drm/drm_dp_mst_topology.c > @@ -2042,6 +2042,8 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms > goto out_unlock; > } > > + /* max. time slots - one slot for MTP header */ > + mgr->state->avail_slots = 63; We can't reset atomic state on each hotplug, that would wreak the atomic tracking. E.g. there might still be a pipe around running on this output, and then we'd go above 63 slots when we release that one. You need to init this once in drm_dp_mst_topology_mgr_init() and then make sure it's always accurate. > /* add initial branch device at LCT 1 */ > mstb = drm_dp_add_mst_branch_device(1, NULL); > if (mstb == NULL) { > @@ -2935,6 +2937,55 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) > (*mgr->cbs->hotplug)(mgr); > } > > +void drm_dp_mst_destroy_state(void *obj_state) > +{ > + kfree(obj_state); > +} > + > +void drm_dp_mst_swap_state(void *obj, void **obj_state_ptr) > +{ > + struct drm_dp_mst_topology_mgr *mgr = obj; > + struct drm_dp_mst_topology_state **topology_state_ptr; > + > + topology_state_ptr = (struct drm_dp_mst_topology_state **)obj_state_ptr; > + > + mgr->state->state = (*topology_state_ptr)->state; > + swap(*topology_state_ptr, mgr->state); > + mgr->state->state = NULL; > +} > + > +void *drm_dp_mst_duplicate_state(struct drm_atomic_state *state, void *obj) > +{ > + struct drm_dp_mst_topology_mgr *mgr = obj; > + struct drm_dp_mst_topology_state *new_mst_state; > + > + if (WARN_ON(!mgr->state)) > + return NULL; > + > + new_mst_state = kmalloc(sizeof(*new_mst_state), GFP_KERNEL); > + if (new_mst_state) { > + new_mst_state->avail_slots = mgr->state->avail_slots; > + new_mst_state->mgr = mgr; > + new_mst_state->state = state; > + } kmemdup is less code :-) > + > + return new_mst_state; > +} > + > +static const struct drm_private_state_funcs mst_state_funcs = { > + .swap_state = drm_dp_mst_swap_state, > + .destroy_state = drm_dp_mst_destroy_state, > + .duplicate_state = drm_dp_mst_duplicate_state, > +}; > + Needs a few lines of kernel-doc here since it's an exported function. Cheers, Daniel > +struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, > + struct drm_dp_mst_topology_mgr *mgr) > +{ > + return drm_atomic_get_priv_obj_state(state, mgr, > + &mst_state_funcs); > +} > +EXPORT_SYMBOL(drm_atomic_get_mst_topology_state); > + > /** > * drm_dp_mst_topology_mgr_init - initialise a topology manager > * @mgr: manager struct to initialise > @@ -2979,6 +3030,12 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, > if (test_calc_pbn_mode() < 0) > DRM_ERROR("MST PBN self-test failed\n"); > > + mgr->state = kzalloc(sizeof(*mgr->state), GFP_KERNEL); > + if (mgr->state == NULL) > + return -ENOMEM; > + mgr->state->mgr = mgr; > + mgr->funcs = &mst_state_funcs; > + > return 0; > } > EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); > @@ -2999,6 +3056,9 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) > mutex_unlock(&mgr->payload_lock); > mgr->dev = NULL; > mgr->aux = NULL; > + kfree(mgr->state); > + mgr->state = NULL; > + mgr->funcs = NULL; > } > EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); > > diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h > index 98d3c73..8cbdbfa 100644 > --- a/include/drm/drm_dp_mst_helper.h > +++ b/include/drm/drm_dp_mst_helper.h > @@ -24,6 +24,7 @@ > > #include <linux/types.h> > #include <drm/drm_dp_helper.h> > +#include <drm/drm_atomic.h> > > struct drm_dp_mst_branch; > > @@ -403,6 +404,12 @@ struct drm_dp_payload { > int vcpi; > }; > > +struct drm_dp_mst_topology_state { > + int avail_slots; > + struct drm_atomic_state *state; > + struct drm_dp_mst_topology_mgr *mgr; > +}; > + > /** > * struct drm_dp_mst_topology_mgr - DisplayPort MST manager > * > @@ -481,6 +488,16 @@ struct drm_dp_mst_topology_mgr { > int pbn_div; > > /** > + * @state: State information for topology manager > + */ > + struct drm_dp_mst_topology_state *state; > + > + /** > + * @funcs: Atomic helper callbacks > + */ > + const struct drm_private_state_funcs *funcs; > + > + /** > * @qlock: protects @tx_msg_downq, the tx_slots in struct > * &drm_dp_mst_branch and txmsg->state once they are queued > */ > @@ -596,4 +613,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m, > > void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr); > int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr); > +struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, > + struct drm_dp_mst_topology_mgr *mgr); > + > #endif > -- > 2.7.4 >
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index b871d4e..247286b 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -2042,6 +2042,8 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms goto out_unlock; } + /* max. time slots - one slot for MTP header */ + mgr->state->avail_slots = 63; /* add initial branch device at LCT 1 */ mstb = drm_dp_add_mst_branch_device(1, NULL); if (mstb == NULL) { @@ -2935,6 +2937,55 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) (*mgr->cbs->hotplug)(mgr); } +void drm_dp_mst_destroy_state(void *obj_state) +{ + kfree(obj_state); +} + +void drm_dp_mst_swap_state(void *obj, void **obj_state_ptr) +{ + struct drm_dp_mst_topology_mgr *mgr = obj; + struct drm_dp_mst_topology_state **topology_state_ptr; + + topology_state_ptr = (struct drm_dp_mst_topology_state **)obj_state_ptr; + + mgr->state->state = (*topology_state_ptr)->state; + swap(*topology_state_ptr, mgr->state); + mgr->state->state = NULL; +} + +void *drm_dp_mst_duplicate_state(struct drm_atomic_state *state, void *obj) +{ + struct drm_dp_mst_topology_mgr *mgr = obj; + struct drm_dp_mst_topology_state *new_mst_state; + + if (WARN_ON(!mgr->state)) + return NULL; + + new_mst_state = kmalloc(sizeof(*new_mst_state), GFP_KERNEL); + if (new_mst_state) { + new_mst_state->avail_slots = mgr->state->avail_slots; + new_mst_state->mgr = mgr; + new_mst_state->state = state; + } + + return new_mst_state; +} + +static const struct drm_private_state_funcs mst_state_funcs = { + .swap_state = drm_dp_mst_swap_state, + .destroy_state = drm_dp_mst_destroy_state, + .duplicate_state = drm_dp_mst_duplicate_state, +}; + +struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr) +{ + return drm_atomic_get_priv_obj_state(state, mgr, + &mst_state_funcs); +} +EXPORT_SYMBOL(drm_atomic_get_mst_topology_state); + /** * drm_dp_mst_topology_mgr_init - initialise a topology manager * @mgr: manager struct to initialise @@ -2979,6 +3030,12 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, if (test_calc_pbn_mode() < 0) DRM_ERROR("MST PBN self-test failed\n"); + mgr->state = kzalloc(sizeof(*mgr->state), GFP_KERNEL); + if (mgr->state == NULL) + return -ENOMEM; + mgr->state->mgr = mgr; + mgr->funcs = &mst_state_funcs; + return 0; } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); @@ -2999,6 +3056,9 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) mutex_unlock(&mgr->payload_lock); mgr->dev = NULL; mgr->aux = NULL; + kfree(mgr->state); + mgr->state = NULL; + mgr->funcs = NULL; } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 98d3c73..8cbdbfa 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -24,6 +24,7 @@ #include <linux/types.h> #include <drm/drm_dp_helper.h> +#include <drm/drm_atomic.h> struct drm_dp_mst_branch; @@ -403,6 +404,12 @@ struct drm_dp_payload { int vcpi; }; +struct drm_dp_mst_topology_state { + int avail_slots; + struct drm_atomic_state *state; + struct drm_dp_mst_topology_mgr *mgr; +}; + /** * struct drm_dp_mst_topology_mgr - DisplayPort MST manager * @@ -481,6 +488,16 @@ struct drm_dp_mst_topology_mgr { int pbn_div; /** + * @state: State information for topology manager + */ + struct drm_dp_mst_topology_state *state; + + /** + * @funcs: Atomic helper callbacks + */ + const struct drm_private_state_funcs *funcs; + + /** * @qlock: protects @tx_msg_downq, the tx_slots in struct * &drm_dp_mst_branch and txmsg->state once they are queued */ @@ -596,4 +613,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m, void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr); int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr); +struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr); + #endif
Link bandwidth is shared between multiple display streams in DP MST configurations. The DP MST topology manager structure maintains the shared link bandwidth for a primary link directly connected to the GPU. For atomic modesetting drivers, checking if there is sufficient link bandwidth for a mode needs to be done during the atomic_check phase to avoid failed modesets. Let's encsapsulate the available link bw information in a private state structure so that bw can be allocated and released atomically for each of the ports sharing the primary link. Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com> --- drivers/gpu/drm/drm_dp_mst_topology.c | 60 +++++++++++++++++++++++++++++++++++ include/drm/drm_dp_mst_helper.h | 20 ++++++++++++ 2 files changed, 80 insertions(+)