Message ID | 58381B39020000780012226E@prv-mh.provo.novell.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Fri, 25 Nov 2016, Jan Beulich wrote: > There's no point setting fields always receiving the same value on each > iteration, as handle_ioreq() doesn't alter them anyway. Set state and > count once ahead of the loop, drop the redundant clearing of > data_is_ptr, and avoid the meaningless setting of df altogether. > > Also avoid doing an unsigned long calculation of size when the field to > be initialized is only 32 bits wide (and the shift value in the range > 0...3). > > Signed-off-by: Jan Beulich <jbeulich@suse.com> > Reviewed-by: Paul Durrant <paul.durrant@citrix.com> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org> > --- a/xen-hvm.c > +++ b/xen-hvm.c > @@ -995,6 +995,8 @@ static int handle_buffered_iopage(XenIOS > } > > memset(&req, 0x00, sizeof(req)); > + req.state = STATE_IOREQ_READY; > + req.count = 1; > > for (;;) { > uint32_t rdptr = buf_page->read_pointer, wrptr; > @@ -1009,15 +1011,11 @@ static int handle_buffered_iopage(XenIOS > break; > } > buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM]; > - req.size = 1UL << buf_req->size; > - req.count = 1; > + req.size = 1U << buf_req->size; > req.addr = buf_req->addr; > req.data = buf_req->data; > - req.state = STATE_IOREQ_READY; > req.dir = buf_req->dir; > - req.df = 1; > req.type = buf_req->type; > - req.data_is_ptr = 0; > xen_rmb(); > qw = (req.size == 8); > if (qw) { > @@ -1032,6 +1030,13 @@ static int handle_buffered_iopage(XenIOS > > handle_ioreq(state, &req); > > + /* Only req.data may get updated by handle_ioreq(), albeit even that > + * should not happen as such data would never make it to the guest. > + */ > + assert(req.state == STATE_IOREQ_READY); > + assert(req.count == 1); > + assert(!req.data_is_ptr); > + > atomic_add(&buf_page->read_pointer, qw + 1); > } > > > >
--- a/xen-hvm.c +++ b/xen-hvm.c @@ -995,6 +995,8 @@ static int handle_buffered_iopage(XenIOS } memset(&req, 0x00, sizeof(req)); + req.state = STATE_IOREQ_READY; + req.count = 1; for (;;) { uint32_t rdptr = buf_page->read_pointer, wrptr; @@ -1009,15 +1011,11 @@ static int handle_buffered_iopage(XenIOS break; } buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM]; - req.size = 1UL << buf_req->size; - req.count = 1; + req.size = 1U << buf_req->size; req.addr = buf_req->addr; req.data = buf_req->data; - req.state = STATE_IOREQ_READY; req.dir = buf_req->dir; - req.df = 1; req.type = buf_req->type; - req.data_is_ptr = 0; xen_rmb(); qw = (req.size == 8); if (qw) { @@ -1032,6 +1030,13 @@ static int handle_buffered_iopage(XenIOS handle_ioreq(state, &req); + /* Only req.data may get updated by handle_ioreq(), albeit even that + * should not happen as such data would never make it to the guest. + */ + assert(req.state == STATE_IOREQ_READY); + assert(req.count == 1); + assert(!req.data_is_ptr); + atomic_add(&buf_page->read_pointer, qw + 1); }