Message ID | 20171109181741.31318-1-lepton@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Thu, Nov 9, 2017 at 7:17 PM, Tao Wu via Qemu-devel <qemu-devel@nongnu.org> wrote: > The old code treats bits as bytes when calculating host memory usage. > Change it to be consistent with allocation logic in pixman library. > > Signed-off-by: Tao Wu <lepton@google.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> > --- > hw/display/virtio-gpu.c | 16 ++++++++++++++-- > 1 file changed, 14 insertions(+), 2 deletions(-) > > diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c > index 43bbe09ea0..274e365713 100644 > --- a/hw/display/virtio-gpu.c > +++ b/hw/display/virtio-gpu.c > @@ -322,6 +322,18 @@ static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format) > } > } > > +static uint32_t calc_image_hostmem(pixman_format_code_t pformat, > + uint32_t width, uint32_t height) > +{ > + /* Copied from pixman/pixman-bits-image.c, skip integer overflow check. > + * pixman_image_create_bits will fail in case it overflow. > + */ > + > + int bpp = PIXMAN_FORMAT_BPP(pformat); > + int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t); > + return height * stride; > +} > + > static void virtio_gpu_resource_create_2d(VirtIOGPU *g, > struct virtio_gpu_ctrl_command *cmd) > { > @@ -366,7 +378,7 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g, > return; > } > > - res->hostmem = PIXMAN_FORMAT_BPP(pformat) * c2d.width * c2d.height; > + res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height); > if (res->hostmem + g->hostmem < g->conf.max_hostmem) { > res->image = pixman_image_create_bits(pformat, > c2d.width, > @@ -1087,7 +1099,7 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, > return -EINVAL; > } > > - res->hostmem = PIXMAN_FORMAT_BPP(pformat) * res->width * res->height; > + res->hostmem = calc_image_hostmem(pformat, res->width, res->height); > > res->addrs = g_new(uint64_t, res->iov_cnt); > res->iov = g_new(struct iovec, res->iov_cnt); > -- > 2.15.0.448.gf294e3d99a-goog > >
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index 43bbe09ea0..274e365713 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -322,6 +322,18 @@ static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format) } } +static uint32_t calc_image_hostmem(pixman_format_code_t pformat, + uint32_t width, uint32_t height) +{ + /* Copied from pixman/pixman-bits-image.c, skip integer overflow check. + * pixman_image_create_bits will fail in case it overflow. + */ + + int bpp = PIXMAN_FORMAT_BPP(pformat); + int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t); + return height * stride; +} + static void virtio_gpu_resource_create_2d(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd) { @@ -366,7 +378,7 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g, return; } - res->hostmem = PIXMAN_FORMAT_BPP(pformat) * c2d.width * c2d.height; + res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height); if (res->hostmem + g->hostmem < g->conf.max_hostmem) { res->image = pixman_image_create_bits(pformat, c2d.width, @@ -1087,7 +1099,7 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, return -EINVAL; } - res->hostmem = PIXMAN_FORMAT_BPP(pformat) * res->width * res->height; + res->hostmem = calc_image_hostmem(pformat, res->width, res->height); res->addrs = g_new(uint64_t, res->iov_cnt); res->iov = g_new(struct iovec, res->iov_cnt);
The old code treats bits as bytes when calculating host memory usage. Change it to be consistent with allocation logic in pixman library. Signed-off-by: Tao Wu <lepton@google.com> --- hw/display/virtio-gpu.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-)