Message ID | 20170906012634.3975-1-dhinakaran.pandiyan@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Tue, 2017-09-05 at 18:26 -0700, Dhinakaran Pandiyan wrote: > The POWER_DOWN_PHY and POWER_UP_PHY sideband message transactions > allow > the source to reqest any node in a mst path or a whole path to be > powered down or up. This allows drivers to target a specific sink in > the > MST topology, an improvement over just power managing the imediate > downstream device. Secondly, since the request-reply protocol waits > for an > ACK, we can be sure that a downstream sink has enough time to respond > to a > power up/down request. > > Cc: Lyude <lyude@redhat.com> > Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> > Cc: Harry Wentland <harry.wentland@amd.com> > Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com> > --- > drivers/gpu/drm/drm_dp_mst_topology.c | 73 > +++++++++++++++++++++++++++++++++++ > include/drm/drm_dp_mst_helper.h | 2 + > 2 files changed, 75 insertions(+) > > diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c > b/drivers/gpu/drm/drm_dp_mst_topology.c > index 41b492f99955..a9f12708a046 100644 > --- a/drivers/gpu/drm/drm_dp_mst_topology.c > +++ b/drivers/gpu/drm/drm_dp_mst_topology.c > @@ -294,6 +294,12 @@ static void drm_dp_encode_sideband_req(struct > drm_dp_sideband_msg_req_body *req, > memcpy(&buf[idx], req->u.i2c_write.bytes, req- > >u.i2c_write.num_bytes); > idx += req->u.i2c_write.num_bytes; > break; > + > + case DP_POWER_DOWN_PHY: > + case DP_POWER_UP_PHY: > + buf[idx] = (req->u.port_num.port_number & 0xf) << 4; > + idx++; > + break; > } > raw->cur_len = idx; > } > @@ -538,6 +544,22 @@ static bool > drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_r > return false; > } > > + > +static bool drm_dp_sideband_parse_power_updown_phy_ack(struct > drm_dp_sideband_msg_rx *raw, > + struct > drm_dp_sideband_msg_reply_body *repmsg) > +{ > + int idx = 1; > + > + repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & > 0xf; > + idx++; > + if (idx > raw->curlen) { > + DRM_DEBUG_KMS("power up/down phy parse length fail > %d %d\n", > + idx, raw->curlen); > + return false; > + } > + return true; > +} > + > static bool drm_dp_sideband_parse_reply(struct > drm_dp_sideband_msg_rx *raw, > struct > drm_dp_sideband_msg_reply_body *msg) > { > @@ -567,6 +589,9 @@ static bool drm_dp_sideband_parse_reply(struct > drm_dp_sideband_msg_rx *raw, > return > drm_dp_sideband_parse_enum_path_resources_ack(raw, msg); > case DP_ALLOCATE_PAYLOAD: > return > drm_dp_sideband_parse_allocate_payload_ack(raw, msg); > + case DP_POWER_DOWN_PHY: > + case DP_POWER_UP_PHY: > + return > drm_dp_sideband_parse_power_updown_phy_ack(raw, msg); > default: > DRM_ERROR("Got unknown reply 0x%02x\n", msg- > >req_type); > return false; > @@ -693,6 +718,22 @@ static int build_allocate_payload(struct > drm_dp_sideband_msg_tx *msg, int port_n > return 0; > } > > +static int build_power_updown_phy(struct drm_dp_sideband_msg_tx > *msg, > + int port_num, bool power_up) > +{ > + struct drm_dp_sideband_msg_req_body req; > + > + if (power_up) > + req.req_type = DP_POWER_UP_PHY; > + else > + req.req_type = DP_POWER_DOWN_PHY; > + > + req.u.port_num.port_number = port_num; > + drm_dp_encode_sideband_req(&req, msg); > + msg->path_msg = true; > + return 0; > +} > + > static int drm_dp_mst_assign_payload_id(struct > drm_dp_mst_topology_mgr *mgr, > struct drm_dp_vcpi *vcpi) > { > @@ -1724,6 +1765,38 @@ static int drm_dp_payload_send_msg(struct > drm_dp_mst_topology_mgr *mgr, > return ret; > } > > +int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr > *mgr, > + struct drm_dp_mst_port *port, bool > power_up) > +{ > + struct drm_dp_sideband_msg_tx *txmsg; > + int len, ret; > + > + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); > + if (!txmsg) > + return -ENOMEM; > + > + port = drm_dp_get_validated_port_ref(mgr, port); > + if (!port) > + return -EINVAL; if (!port), then you leak memory by not freeing txmsg > + > + txmsg->dst = port->parent; > + len = build_power_updown_phy(txmsg, port->port_num, > power_up); > + drm_dp_queue_down_tx(mgr, txmsg); > + > + ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg); > + if (ret > 0) { > + if (txmsg->reply.reply_type == 1) > + ret = -EINVAL; > + else > + ret = 0; > + } > + kfree(txmsg); > + drm_dp_put_port(port); > + > + return 0; > +} > +EXPORT_SYMBOL(drm_dp_send_power_updown_phy); > + > static int drm_dp_create_payload_step1(struct > drm_dp_mst_topology_mgr *mgr, > int id, > struct drm_dp_payload > *payload) > diff --git a/include/drm/drm_dp_mst_helper.h > b/include/drm/drm_dp_mst_helper.h > index d55abb75f29a..7f78d26a0766 100644 > --- a/include/drm/drm_dp_mst_helper.h > +++ b/include/drm/drm_dp_mst_helper.h > @@ -631,5 +631,7 @@ int drm_dp_atomic_find_vcpi_slots(struct > drm_atomic_state *state, > int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, > struct drm_dp_mst_topology_mgr > *mgr, > int slots); > +int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr > *mgr, > + struct drm_dp_mst_port *port, bool > power_up); > > #endif
On Wed, 2017-09-06 at 11:40 -0400, Lyude Paul wrote: > On Tue, 2017-09-05 at 18:26 -0700, Dhinakaran Pandiyan wrote: > > The POWER_DOWN_PHY and POWER_UP_PHY sideband message transactions > > allow > > the source to reqest any node in a mst path or a whole path to be > > powered down or up. This allows drivers to target a specific sink in > > the > > MST topology, an improvement over just power managing the imediate > > downstream device. Secondly, since the request-reply protocol waits > > for an > > ACK, we can be sure that a downstream sink has enough time to respond > > to a > > power up/down request. > > > > Cc: Lyude <lyude@redhat.com> > > Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> > > Cc: Harry Wentland <harry.wentland@amd.com> > > Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com> > > --- > > drivers/gpu/drm/drm_dp_mst_topology.c | 73 > > +++++++++++++++++++++++++++++++++++ > > include/drm/drm_dp_mst_helper.h | 2 + > > 2 files changed, 75 insertions(+) > > > > diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c > > b/drivers/gpu/drm/drm_dp_mst_topology.c > > index 41b492f99955..a9f12708a046 100644 > > --- a/drivers/gpu/drm/drm_dp_mst_topology.c > > +++ b/drivers/gpu/drm/drm_dp_mst_topology.c > > @@ -294,6 +294,12 @@ static void drm_dp_encode_sideband_req(struct > > drm_dp_sideband_msg_req_body *req, > > memcpy(&buf[idx], req->u.i2c_write.bytes, req- > > >u.i2c_write.num_bytes); > > idx += req->u.i2c_write.num_bytes; > > break; > > + > > + case DP_POWER_DOWN_PHY: > > + case DP_POWER_UP_PHY: > > + buf[idx] = (req->u.port_num.port_number & 0xf) << 4; > > + idx++; > > + break; > > } > > raw->cur_len = idx; > > } > > @@ -538,6 +544,22 @@ static bool > > drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_r > > return false; > > } > > > > + > > +static bool drm_dp_sideband_parse_power_updown_phy_ack(struct > > drm_dp_sideband_msg_rx *raw, > > + struct > > drm_dp_sideband_msg_reply_body *repmsg) > > +{ > > + int idx = 1; > > + > > + repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & > > 0xf; > > + idx++; > > + if (idx > raw->curlen) { > > + DRM_DEBUG_KMS("power up/down phy parse length fail > > %d %d\n", > > + idx, raw->curlen); > > + return false; > > + } > > + return true; > > +} > > + > > static bool drm_dp_sideband_parse_reply(struct > > drm_dp_sideband_msg_rx *raw, > > struct > > drm_dp_sideband_msg_reply_body *msg) > > { > > @@ -567,6 +589,9 @@ static bool drm_dp_sideband_parse_reply(struct > > drm_dp_sideband_msg_rx *raw, > > return > > drm_dp_sideband_parse_enum_path_resources_ack(raw, msg); > > case DP_ALLOCATE_PAYLOAD: > > return > > drm_dp_sideband_parse_allocate_payload_ack(raw, msg); > > + case DP_POWER_DOWN_PHY: > > + case DP_POWER_UP_PHY: > > + return > > drm_dp_sideband_parse_power_updown_phy_ack(raw, msg); > > default: > > DRM_ERROR("Got unknown reply 0x%02x\n", msg- > > >req_type); > > return false; > > @@ -693,6 +718,22 @@ static int build_allocate_payload(struct > > drm_dp_sideband_msg_tx *msg, int port_n > > return 0; > > } > > > > +static int build_power_updown_phy(struct drm_dp_sideband_msg_tx > > *msg, > > + int port_num, bool power_up) > > +{ > > + struct drm_dp_sideband_msg_req_body req; > > + > > + if (power_up) > > + req.req_type = DP_POWER_UP_PHY; > > + else > > + req.req_type = DP_POWER_DOWN_PHY; > > + > > + req.u.port_num.port_number = port_num; > > + drm_dp_encode_sideband_req(&req, msg); > > + msg->path_msg = true; > > + return 0; > > +} > > + > > static int drm_dp_mst_assign_payload_id(struct > > drm_dp_mst_topology_mgr *mgr, > > struct drm_dp_vcpi *vcpi) > > { > > @@ -1724,6 +1765,38 @@ static int drm_dp_payload_send_msg(struct > > drm_dp_mst_topology_mgr *mgr, > > return ret; > > } > > > > +int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr > > *mgr, > > + struct drm_dp_mst_port *port, bool > > power_up) > > +{ > > + struct drm_dp_sideband_msg_tx *txmsg; > > + int len, ret; > > + > > + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); > > + if (!txmsg) > > + return -ENOMEM; > > + > > + port = drm_dp_get_validated_port_ref(mgr, port); > > + if (!port) > > + return -EINVAL; > if (!port), then you leak memory by not freeing txmsg Right, I'll fix that up in the next version. Thanks! > > + > > + txmsg->dst = port->parent; > > + len = build_power_updown_phy(txmsg, port->port_num, > > power_up); > > + drm_dp_queue_down_tx(mgr, txmsg); > > + > > + ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg); > > + if (ret > 0) { > > + if (txmsg->reply.reply_type == 1) > > + ret = -EINVAL; > > + else > > + ret = 0; > > + } > > + kfree(txmsg); > > + drm_dp_put_port(port); > > + > > + return 0; > > +} > > +EXPORT_SYMBOL(drm_dp_send_power_updown_phy); > > + > > static int drm_dp_create_payload_step1(struct > > drm_dp_mst_topology_mgr *mgr, > > int id, > > struct drm_dp_payload > > *payload) > > diff --git a/include/drm/drm_dp_mst_helper.h > > b/include/drm/drm_dp_mst_helper.h > > index d55abb75f29a..7f78d26a0766 100644 > > --- a/include/drm/drm_dp_mst_helper.h > > +++ b/include/drm/drm_dp_mst_helper.h > > @@ -631,5 +631,7 @@ int drm_dp_atomic_find_vcpi_slots(struct > > drm_atomic_state *state, > > int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, > > struct drm_dp_mst_topology_mgr > > *mgr, > > int slots); > > +int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr > > *mgr, > > + struct drm_dp_mst_port *port, bool > > power_up); > > > > #endif > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@lists.freedesktop.org > https://lists.freedesktop.org/mailman/listinfo/intel-gfx
On 2017-09-05 18:26, Dhinakaran Pandiyan wrote: > The POWER_DOWN_PHY and POWER_UP_PHY sideband message transactions allow > the source to reqest any node in a mst path or a whole path to be > powered down or up. This allows drivers to target a specific sink in the > MST topology, an improvement over just power managing the imediate > downstream device. Secondly, since the request-reply protocol waits for an > ACK, we can be sure that a downstream sink has enough time to respond to a > power up/down request. > > Cc: Lyude <lyude@redhat.com> > Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> > Cc: Harry Wentland <harry.wentland@amd.com> > Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com> Tested-by: Stefan Assmann <sassmann@redhat.com>
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 41b492f99955..a9f12708a046 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -294,6 +294,12 @@ static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req, memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes); idx += req->u.i2c_write.num_bytes; break; + + case DP_POWER_DOWN_PHY: + case DP_POWER_UP_PHY: + buf[idx] = (req->u.port_num.port_number & 0xf) << 4; + idx++; + break; } raw->cur_len = idx; } @@ -538,6 +544,22 @@ static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_r return false; } + +static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + + repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf; + idx++; + if (idx > raw->curlen) { + DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n", + idx, raw->curlen); + return false; + } + return true; +} + static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw, struct drm_dp_sideband_msg_reply_body *msg) { @@ -567,6 +589,9 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw, return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg); case DP_ALLOCATE_PAYLOAD: return drm_dp_sideband_parse_allocate_payload_ack(raw, msg); + case DP_POWER_DOWN_PHY: + case DP_POWER_UP_PHY: + return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg); default: DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type); return false; @@ -693,6 +718,22 @@ static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_n return 0; } +static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg, + int port_num, bool power_up) +{ + struct drm_dp_sideband_msg_req_body req; + + if (power_up) + req.req_type = DP_POWER_UP_PHY; + else + req.req_type = DP_POWER_DOWN_PHY; + + req.u.port_num.port_number = port_num; + drm_dp_encode_sideband_req(&req, msg); + msg->path_msg = true; + return 0; +} + static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_vcpi *vcpi) { @@ -1724,6 +1765,38 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, return ret; } +int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, bool power_up) +{ + struct drm_dp_sideband_msg_tx *txmsg; + int len, ret; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) + return -ENOMEM; + + port = drm_dp_get_validated_port_ref(mgr, port); + if (!port) + return -EINVAL; + + txmsg->dst = port->parent; + len = build_power_updown_phy(txmsg, port->port_num, power_up); + drm_dp_queue_down_tx(mgr, txmsg); + + ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg); + if (ret > 0) { + if (txmsg->reply.reply_type == 1) + ret = -EINVAL; + else + ret = 0; + } + kfree(txmsg); + drm_dp_put_port(port); + + return 0; +} +EXPORT_SYMBOL(drm_dp_send_power_updown_phy); + static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr, int id, struct drm_dp_payload *payload) diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index d55abb75f29a..7f78d26a0766 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -631,5 +631,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr, int slots); +int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, bool power_up); #endif
The POWER_DOWN_PHY and POWER_UP_PHY sideband message transactions allow the source to reqest any node in a mst path or a whole path to be powered down or up. This allows drivers to target a specific sink in the MST topology, an improvement over just power managing the imediate downstream device. Secondly, since the request-reply protocol waits for an ACK, we can be sure that a downstream sink has enough time to respond to a power up/down request. Cc: Lyude <lyude@redhat.com> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Cc: Harry Wentland <harry.wentland@amd.com> Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com> --- drivers/gpu/drm/drm_dp_mst_topology.c | 73 +++++++++++++++++++++++++++++++++++ include/drm/drm_dp_mst_helper.h | 2 + 2 files changed, 75 insertions(+)