@@ -3513,7 +3513,7 @@ static void __attribute__((__constructor__)) v9fs_set_fd_limit(void)
struct rlimit rlim;
if (getrlimit(RLIMIT_NOFILE, &rlim) < 0) {
error_report("Failed to get the resource limit");
- exit(1);
+ exit(EXIT_FAILURE);
}
open_fd_hw = rlim.rlim_cur - MIN(400, rlim.rlim_cur/3);
open_fd_rc = rlim.rlim_cur/2;
@@ -478,20 +478,20 @@ void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
if (req->elem.out_num < 1 || req->elem.in_num < 1) {
error_report("virtio-blk missing headers");
- exit(1);
+ exit(EXIT_FAILURE);
}
if (unlikely(iov_to_buf(iov, out_num, 0, &req->out,
sizeof(req->out)) != sizeof(req->out))) {
error_report("virtio-blk request outhdr too short");
- exit(1);
+ exit(EXIT_FAILURE);
}
iov_discard_front(&iov, &out_num, sizeof(req->out));
if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) {
error_report("virtio-blk request inhdr too short");
- exit(1);
+ exit(EXIT_FAILURE);
}
/* We always touch the last byte, so just see how big in_iov is. */
@@ -893,7 +893,7 @@ static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) ||
iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) {
error_report("virtio-net ctrl missing headers");
- exit(1);
+ exit(EXIT_FAILURE);
}
iov_cnt = elem->out_num;
@@ -1131,12 +1131,12 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t
i, n->mergeable_rx_bufs, offset, size,
n->guest_hdr_len, n->host_hdr_len,
vdev->guest_features);
- exit(1);
+ exit(EXIT_FAILURE);
}
if (elem->in_num < 1) {
error_report("virtio-net receive queue contains no in buffers");
- exit(1);
+ exit(EXIT_FAILURE);
}
sg = elem->in_sg;
@@ -1239,14 +1239,14 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
out_sg = elem->out_sg;
if (out_num < 1) {
error_report("virtio-net header not in first element");
- exit(1);
+ exit(EXIT_FAILURE);
}
if (n->has_vnet_hdr) {
if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
n->guest_hdr_len) {
error_report("virtio-net header incorrect");
- exit(1);
+ exit(EXIT_FAILURE);
}
if (n->needs_vnet_hdr_swap) {
virtio_net_hdr_swap(vdev, (void *) &mhdr);
@@ -884,7 +884,7 @@ static uint16_t pci_req_id_cache_extract(PCIReqIDCache *cache)
default:
error_printf("Invalid PCI requester ID cache type: %d\n",
cache->type);
- exit(1);
+ exit(EXIT_FAILURE);
break;
}
@@ -1085,7 +1085,7 @@ void pci_register_bar(PCIDevice *pci_dev, int region_num,
if (size & (size-1)) {
fprintf(stderr, "ERROR: PCI region size must be pow2 "
"type=0x%x, size=0x%"FMT_PCIBUS"\n", type, size);
- exit(1);
+ exit(EXIT_FAILURE);
}
r = &pci_dev->io_regions[region_num];
@@ -1787,19 +1787,19 @@ PCIDevice *pci_nic_init_nofail(NICInfo *nd, PCIBus *rootbus,
int i;
if (qemu_show_nic_models(nd->model, pci_nic_models)) {
- exit(0);
+ exit(EXIT_SUCCESS);
}
i = qemu_find_nic_model(nd, pci_nic_models, default_model);
if (i < 0) {
- exit(1);
+ exit(EXIT_FAILURE);
}
bus = pci_get_bus_devfn(&devfn, rootbus, devaddr);
if (!bus) {
error_report("Invalid PCI device address %s for device %s",
devaddr, pci_nic_names[i]);
- exit(1);
+ exit(EXIT_FAILURE);
}
pci_dev = pci_create(bus, devfn, pci_nic_names[i]);
@@ -1810,7 +1810,7 @@ PCIDevice *pci_nic_init_nofail(NICInfo *nd, PCIBus *rootbus,
if (err) {
error_report_err(err);
object_unparent(OBJECT(dev));
- exit(1);
+ exit(EXIT_FAILURE);
}
return pci_dev;
@@ -172,7 +172,7 @@ static void vhost_scsi_set_config(VirtIODevice *vdev,
if ((uint32_t) virtio_ldl_p(vdev, &scsiconf->sense_size) != vs->sense_size ||
(uint32_t) virtio_ldl_p(vdev, &scsiconf->cdb_size) != vs->cdb_size) {
error_report("vhost-scsi does not support changing the sense data and CDB sizes");
- exit(1);
+ exit(EXIT_FAILURE);
}
}
@@ -194,7 +194,7 @@ static void vhost_scsi_set_status(VirtIODevice *vdev, uint8_t val)
strerror(-ret));
/* There is no userspace virtio-scsi fallback so exit */
- exit(1);
+ exit(EXIT_FAILURE);
}
} else {
vhost_scsi_stop(s);
@@ -34,7 +34,7 @@ void virtio_scsi_set_iothread(VirtIOSCSI *s, IOThread *iothread)
if (!k->set_guest_notifiers || !k->ioeventfd_started) {
fprintf(stderr, "virtio-scsi: Failed to set iothread "
"(transport does not support notifiers)");
- exit(1);
+ exit(EXIT_FAILURE);
}
}
@@ -84,7 +84,7 @@ static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
static void virtio_scsi_bad_req(void)
{
error_report("wrong size for virtio-scsi headers");
- exit(1);
+ exit(EXIT_FAILURE);
}
static size_t qemu_sgl_concat(VirtIOSCSIReq *req, struct iovec *iov,
@@ -208,7 +208,7 @@ static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
sizeof(VirtIOSCSICmdResp) + vs->sense_size) < 0) {
error_report("invalid SCSI request migration data");
- exit(1);
+ exit(EXIT_FAILURE);
}
scsi_req_ref(sreq);
@@ -628,7 +628,7 @@ static void virtio_scsi_set_config(VirtIODevice *vdev,
if ((uint32_t) virtio_ldl_p(vdev, &scsiconf->sense_size) >= 65536 ||
(uint32_t) virtio_ldl_p(vdev, &scsiconf->cdb_size) >= 256) {
error_report("bad data written to virtio-scsi configuration space");
- exit(1);
+ exit(EXIT_FAILURE);
}
vs->sense_size = virtio_ldl_p(vdev, &scsiconf->sense_size);
@@ -339,7 +339,7 @@ static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
if (num_heads > vq->vring.num) {
error_report("Guest moved used index from %u to %u",
idx, vq->shadow_avail_idx);
- exit(1);
+ exit(EXIT_FAILURE);
}
/* On success, callers read a descriptor at vq->last_avail_idx.
* Make sure descriptor read does not bypass avail index read. */
@@ -361,7 +361,7 @@ static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
/* If their number is silly, that's a fatal mistake. */
if (head >= vq->vring.num) {
error_report("Guest says index %u is available", head);
- exit(1);
+ exit(EXIT_FAILURE);
}
return head;
@@ -384,7 +384,7 @@ static unsigned virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
if (next >= max) {
error_report("Desc next is %u", next);
- exit(1);
+ exit(EXIT_FAILURE);
}
vring_desc_read(vdev, desc, desc_pa, next);
@@ -417,13 +417,13 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
if (desc.flags & VRING_DESC_F_INDIRECT) {
if (desc.len % sizeof(VRingDesc)) {
error_report("Invalid size for indirect buffer table");
- exit(1);
+ exit(EXIT_FAILURE);
}
/* If we've got too many, that implies a descriptor loop. */
if (num_bufs >= max) {
error_report("Looped descriptor");
- exit(1);
+ exit(EXIT_FAILURE);
}
/* loop over the indirect descriptor table */
@@ -438,7 +438,7 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
/* If we've got too many, that implies a descriptor loop. */
if (++num_bufs > max) {
error_report("Looped descriptor");
- exit(1);
+ exit(EXIT_FAILURE);
}
if (desc.flags & VRING_DESC_F_WRITE) {
@@ -483,7 +483,7 @@ static void virtqueue_map_desc(unsigned int *p_num_sg, hwaddr *addr, struct iove
if (!sz) {
error_report("virtio: zero sized buffers are not allowed");
- exit(1);
+ exit(EXIT_FAILURE);
}
while (sz) {
@@ -491,7 +491,7 @@ static void virtqueue_map_desc(unsigned int *p_num_sg, hwaddr *addr, struct iove
if (num_sg == max_num_sg) {
error_report("virtio: too many write descriptors in indirect table");
- exit(1);
+ exit(EXIT_FAILURE);
}
iov[num_sg].iov_base = cpu_physical_memory_map(pa, &len, is_write);
@@ -529,11 +529,11 @@ static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr,
sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
if (!sg[i].iov_base) {
error_report("virtio: error trying to map MMIO memory");
- exit(1);
+ exit(EXIT_FAILURE);
}
if (len != sg[i].iov_len) {
error_report("virtio: unexpected memory split");
- exit(1);
+ exit(EXIT_FAILURE);
}
}
}
@@ -592,7 +592,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
if (vq->inuse >= vq->vring.num) {
error_report("Virtqueue size exceeded");
- exit(1);
+ exit(EXIT_FAILURE);
}
i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
@@ -604,7 +604,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
if (desc.flags & VRING_DESC_F_INDIRECT) {
if (desc.len % sizeof(VRingDesc)) {
error_report("Invalid size for indirect buffer table");
- exit(1);
+ exit(EXIT_FAILURE);
}
/* loop over the indirect descriptor table */
@@ -622,7 +622,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
} else {
if (in_num) {
error_report("Incorrect order for descriptors");
- exit(1);
+ exit(EXIT_FAILURE);
}
virtqueue_map_desc(&out_num, addr, iov,
VIRTQUEUE_MAX_SIZE, false, desc.addr, desc.len);
@@ -631,7 +631,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
/* If we've got too many, that implies a descriptor loop. */
if ((in_num + out_num) > max) {
error_report("Looped descriptor");
- exit(1);
+ exit(EXIT_FAILURE);
}
} while ((i = virtqueue_read_next_desc(vdev, &desc, desc_pa, max)) != max);
This patch is the result of coccinelle script scripts/coccinelle/exit.cocci Signed-off-by: Laurent Vivier <lvivier@redhat.com> CC: Stefan Hajnoczi <stefanha@redhat.com> --- hw/9pfs/9p.c | 2 +- hw/block/virtio-blk.c | 6 +++--- hw/net/virtio-net.c | 10 +++++----- hw/pci/pci.c | 12 ++++++------ hw/scsi/vhost-scsi.c | 4 ++-- hw/scsi/virtio-scsi-dataplane.c | 2 +- hw/scsi/virtio-scsi.c | 6 +++--- hw/virtio/virtio.c | 28 ++++++++++++++-------------- 8 files changed, 35 insertions(+), 35 deletions(-)