@@ -314,8 +314,6 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout))
break;
-
- cond_resched();
}
ivpu_ipc_consumer_del(vdev, &cons);
@@ -156,7 +156,6 @@ static int __must_check internal_alloc_pages_locked(struct ivpu_bo *bo)
ret = -ENOMEM;
goto err_free_pages;
}
- cond_resched();
}
bo->pages = pages;
@@ -105,7 +105,7 @@ static void ivpu_pm_recovery_work(struct work_struct *work)
retry:
ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) {
- cond_resched();
+ cond_resched_stall();
goto retry;
}
@@ -146,7 +146,11 @@ int ivpu_pm_suspend_cb(struct device *dev)
timeout = jiffies + msecs_to_jiffies(vdev->timeout.tdr);
while (!ivpu_hw_is_idle(vdev)) {
- cond_resched();
+
+ /* The timeout is in thousands of msecs. Maybe this should be a
+ * timed wait instead?
+ */
+ cond_resched_stall();
if (time_after_eq(jiffies, timeout)) {
ivpu_err(vdev, "Failed to enter idle on system suspend\n");
return -EBUSY;
@@ -1516,7 +1516,6 @@ void irq_polling_work(struct work_struct *work)
return;
}
- cond_resched();
usleep_range(datapath_poll_interval_us, 2 * datapath_poll_interval_us);
}
}
@@ -1547,7 +1546,6 @@ irqreturn_t dbc_irq_threaded_fn(int irq, void *data)
if (!event_count) {
event_count = NUM_EVENTS;
- cond_resched();
}
/*
@@ -470,14 +470,6 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf,
char c;
for (; count-- > 0; (*ppos)++, tmp++) {
- if (((count + 1) & 0x1f) == 0) {
- /*
- * charlcd_write() is invoked as a VFS->write() callback
- * and as such it is always invoked from preemptible
- * context and may sleep.
- */
- cond_resched();
- }
if (get_user(c, tmp))
return -EFAULT;
@@ -539,9 +531,6 @@ static void charlcd_puts(struct charlcd *lcd, const char *s)
int count = strlen(s);
for (; count-- > 0; tmp++) {
- if (((count + 1) & 0x1f) == 0)
- cond_resched();
-
charlcd_write_char(lcd, *tmp);
}
}
@@ -2696,7 +2696,6 @@ static void genpd_dev_pm_detach(struct device *dev, bool power_off)
break;
mdelay(i);
- cond_resched();
}
if (ret < 0) {
@@ -1235,8 +1235,7 @@ kthread(void *vp)
if (!more) {
schedule();
remove_wait_queue(k->waitq, &wait);
- } else
- cond_resched();
+ }
} while (!kthread_should_stop());
complete(&k->rendez); /* tell spawner we're stopping */
return 0;
@@ -111,7 +111,6 @@ static void brd_free_pages(struct brd_device *brd)
xa_for_each(&brd->brd_pages, idx, page) {
__free_page(page);
- cond_resched();
}
xa_destroy(&brd->brd_pages);
@@ -563,7 +563,6 @@ static unsigned long bm_count_bits(struct drbd_bitmap *b)
p_addr = __bm_map_pidx(b, idx);
bits += bitmap_weight(p_addr, BITS_PER_PAGE);
__bm_unmap(p_addr);
- cond_resched();
}
/* last (or only) page */
last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
@@ -1118,7 +1117,6 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned
atomic_inc(&ctx->in_flight);
bm_page_io_async(ctx, i);
++count;
- cond_resched();
}
} else if (flags & BM_AIO_WRITE_HINTED) {
/* ASSERT: BM_AIO_WRITE_ALL_PAGES is not set. */
@@ -1158,7 +1156,6 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned
atomic_inc(&ctx->in_flight);
bm_page_io_async(ctx, i);
++count;
- cond_resched();
}
}
@@ -1545,7 +1542,6 @@ void _drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const
for (page_nr = first_page; page_nr < last_page; page_nr++) {
bm_set_full_words_within_one_page(device->bitmap, page_nr, first_word, last_word);
spin_unlock_irq(&b->bm_lock);
- cond_resched();
first_word = 0;
spin_lock_irq(&b->bm_lock);
}
@@ -318,7 +318,6 @@ static void seq_print_resource_transfer_log_summary(struct seq_file *m,
struct drbd_request *req_next;
kref_get(&req->kref);
spin_unlock_irq(&resource->req_lock);
- cond_resched();
spin_lock_irq(&resource->req_lock);
req_next = list_next_entry(req, tl_requests);
if (kref_put(&req->kref, drbd_req_destroy))
@@ -271,7 +271,6 @@ static int lo_write_simple(struct loop_device *lo, struct request *rq,
ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
if (ret < 0)
break;
- cond_resched();
}
return ret;
@@ -300,7 +299,6 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq,
zero_fill_bio(bio);
break;
}
- cond_resched();
}
return 0;
@@ -1948,7 +1946,6 @@ static void loop_process_work(struct loop_worker *worker,
spin_unlock_irq(&lo->lo_work_lock);
loop_handle_cmd(cmd);
- cond_resched();
spin_lock_irq(&lo->lo_work_lock);
}
@@ -1259,9 +1259,6 @@ __do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
goto done;
break;
}
-
- /* Yield point for this unbounded loop. */
- cond_resched();
}
done:
return more_to_do;
@@ -1819,8 +1819,6 @@ static ssize_t recompress_store(struct device *dev,
ret = err;
break;
}
-
- cond_resched();
}
__free_page(page);
@@ -79,7 +79,6 @@ static int virtbt_close_vdev(struct virtio_bluetooth *vbt)
while ((skb = virtqueue_detach_unused_buf(vq)))
kfree_skb(skb);
- cond_resched();
}
return 0;
@@ -84,7 +84,6 @@ static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
tries++;
if (tries >= SMCCC_TRNG_MAX_TRIES)
return copied;
- cond_resched();
break;
default:
return -EIO;
@@ -478,8 +478,6 @@ static ssize_t lp_read(struct file *file, char __user *buf,
retval = -ERESTARTSYS;
break;
}
-
- cond_resched();
}
parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_COMPAT);
out:
@@ -92,8 +92,6 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
static inline bool should_stop_iteration(void)
{
- if (need_resched())
- cond_resched();
return signal_pending(current);
}
@@ -497,7 +495,6 @@ static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
continue;
if (iocb->ki_flags & IOCB_NOWAIT)
return written ? written : -EAGAIN;
- cond_resched();
}
return written;
}
@@ -523,7 +520,6 @@ static ssize_t read_zero(struct file *file, char __user *buf,
if (signal_pending(current))
break;
- cond_resched();
}
return cleared;
@@ -51,7 +51,7 @@
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/bitops.h>
-#include <linux/sched.h> /* cond_resched() */
+#include <linux/sched.h>
#include <asm/io.h>
#include <linux/uaccess.h>
@@ -64,9 +64,7 @@ static DEFINE_SPINLOCK(dsp_lock);
static void PaceMsaAccess(unsigned short usDspBaseIO)
{
- cond_resched();
udelay(100);
- cond_resched();
}
unsigned short dsp3780I_ReadMsaCfg(unsigned short usDspBaseIO,
@@ -176,8 +176,6 @@ static ssize_t pp_read(struct file *file, char __user *buf, size_t count,
bytes_read = -ERESTARTSYS;
break;
}
-
- cond_resched();
}
parport_set_timeout(pp->pdev, pp->default_inactivity);
@@ -256,8 +254,6 @@ static ssize_t pp_write(struct file *file, const char __user *buf,
if (signal_pending(current))
break;
-
- cond_resched();
}
parport_set_timeout(pp->pdev, pp->default_inactivity);
@@ -457,7 +457,6 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter)
if (ret % PAGE_SIZE == 0) {
if (signal_pending(current))
break;
- cond_resched();
}
}
@@ -1417,7 +1416,6 @@ static ssize_t write_pool_user(struct iov_iter *iter)
if (ret % PAGE_SIZE == 0) {
if (signal_pending(current))
break;
- cond_resched();
}
}
@@ -1936,7 +1936,6 @@ static void remove_vqs(struct ports_device *portdev)
flush_bufs(vq, true);
while ((buf = virtqueue_detach_unused_buf(vq)))
free_buf(buf, true);
- cond_resched();
}
portdev->vdev->config->del_vqs(portdev->vdev);
kfree(portdev->in_vqs);
@@ -490,7 +490,6 @@ static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
kfree(vc_req->req_data);
kfree(vc_req->sgs);
}
- cond_resched();
}
}
@@ -634,7 +634,6 @@ static irqreturn_t cxl_event_thread(int irq, void *id)
if (!status)
break;
cxl_mem_get_event_records(mds, status);
- cond_resched();
} while (status);
return IRQ_HANDLED;
@@ -93,7 +93,6 @@ __subtests(const char *caller, const struct subtest *st, int count, void *data)
int err;
for (; count--; st++) {
- cond_resched();
if (signal_pending(current))
return -EINTR;
@@ -431,7 +431,6 @@ static int __find_race(void *arg)
signal:
seqno = get_random_u32_below(data->fc.chain_length - 1);
dma_fence_signal(data->fc.fences[seqno]);
- cond_resched();
}
if (atomic_dec_and_test(&data->children))
@@ -372,7 +372,13 @@ static int sbefifo_request_reset(struct sbefifo *sbefifo)
return 0;
}
- cond_resched();
+ /*
+ * Use cond_resched_stall() to avoid spinning in a
+ * tight loop.
+ * Though, given that the timeout is in milliseconds,
+ * maybe this should be a timed or event wait?
+ */
+ cond_resched_stall();
}
dev_err(dev, "FIFO reset timed out\n");
@@ -462,7 +468,11 @@ static int sbefifo_wait(struct sbefifo *sbefifo, bool up,
end_time = jiffies + timeout;
while (!time_after(jiffies, end_time)) {
- cond_resched();
+ /*
+ * As above, maybe this should be a timed or event wait?
+ */
+ cond_resched_stall();
+
rc = sbefifo_regr(sbefifo, addr, &sts);
if (rc < 0) {
dev_err(dev, "FSI error %d reading status register\n", rc);
@@ -788,8 +788,13 @@ static int bcm_iproc_i2c_xfer_wait(struct bcm_iproc_i2c_dev *iproc_i2c,
break;
}
- cpu_relax();
- cond_resched();
+ /*
+ * Use cond_resched_stall() to avoid spinning in a
+ * tight loop.
+ * Though, given that the timeout is in milliseconds,
+ * maybe this should be a timed or event wait?
+ */
+ cond_resched_stall();
} while (!iproc_i2c->xfer_is_done);
}
@@ -187,8 +187,13 @@ static void highlander_i2c_poll(struct highlander_i2c_dev *dev)
if (time_after(jiffies, timeout))
break;
- cpu_relax();
- cond_resched();
+ /*
+ * Use cond_resched_stall() to avoid spinning in a
+ * tight loop.
+ * Though, given that the timeout is in milliseconds,
+ * maybe this should be a timed or event wait?
+ */
+ cond_resched_stall();
}
dev_err(dev->dev, "polling timed out\n");
@@ -207,9 +207,6 @@ static void iic_dev_reset(struct ibm_iic_private* dev)
udelay(10);
dc ^= DIRCNTL_SCC;
out_8(&iic->directcntl, dc);
-
- /* be nice */
- cond_resched();
}
}
@@ -231,7 +228,13 @@ static int iic_dc_wait(volatile struct iic_regs __iomem *iic, u8 mask)
while ((in_8(&iic->directcntl) & mask) != mask){
if (unlikely(time_after(jiffies, x)))
return -1;
- cond_resched();
+ /*
+ * Use cond_resched_stall() to avoid spinning in a
+ * tight loop.
+ * Though, given that the timeout is in milliseconds,
+ * maybe this should be a timed or event wait?
+ */
+ cond_resched_stall();
}
return 0;
}
@@ -712,7 +712,7 @@ static int mpc_i2c_execute_msg(struct mpc_i2c *i2c)
}
return -EIO;
}
- cond_resched();
+ cond_resched_stall();
}
return i2c->rc;
@@ -310,7 +310,14 @@ static int mxs_i2c_pio_wait_xfer_end(struct mxs_i2c_dev *i2c)
return -ENXIO;
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
- cond_resched();
+
+ /*
+ * Use cond_resched_stall() to avoid spinning in a
+ * tight loop.
+ * Though, given that the timeout is in milliseconds,
+ * maybe this should be a timed or event wait?
+ */
+ cond_resched_stall();
}
return 0;
@@ -232,8 +232,13 @@ static void scx200_acb_poll(struct scx200_acb_iface *iface)
}
if (time_after(jiffies, timeout))
break;
- cpu_relax();
- cond_resched();
+ /*
+ * Use cond_resched_stall() to avoid spinning in a
+ * tight loop.
+ * Though, given that the timeout is in milliseconds,
+ * maybe this should timeout or event wait?
+ */
+ cond_resched_stall();
}
dev_err(&iface->adapter.dev, "timeout in state %s\n",
@@ -215,7 +215,6 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
gup_flags |= FOLL_WRITE;
while (npages) {
- cond_resched();
pinned = pin_user_pages_fast(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE /
@@ -668,7 +668,6 @@ static noinline int max_packet_exceeded(struct hfi1_packet *packet, int thread)
if ((packet->numpkt & (MAX_PKT_RECV_THREAD - 1)) == 0)
/* allow defered processing */
process_rcv_qp_work(packet);
- cond_resched();
return RCV_PKT_OK;
} else {
this_cpu_inc(*packet->rcd->dd->rcv_limit);
@@ -560,7 +560,7 @@ static void __obtain_firmware(struct hfi1_devdata *dd)
* something that holds for 30 seconds. If we do that twice
* in a row it triggers task blocked warning.
*/
- cond_resched();
+ cond_resched_stall();
if (fw_8051_load)
dispose_one_firmware(&fw_8051);
if (fw_fabric_serdes_load)
@@ -1958,7 +1958,6 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
rcd->egrbufs.rcvtids[idx].dma, order);
- cond_resched();
}
return 0;
@@ -459,7 +459,6 @@ bool hfi1_schedule_send_yield(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
return true;
}
- cond_resched();
this_cpu_inc(*ps->ppd->dd->send_schedule);
ps->timeout = jiffies + ps->timeout_int;
}
@@ -2985,7 +2985,10 @@ static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout,
return -ETIMEDOUT;
}
- cond_resched();
+ /* The timeout is in hundreds of msecs. Maybe this should be a
+ * timed wait instead?
+ */
+ cond_resched_stall();
ret = -EBUSY;
}
@@ -1674,7 +1674,6 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
RCVHQ_RCV_TYPE_EAGER, pa);
pa += egrsize;
}
- cond_resched(); /* don't hog the cpu */
}
return 0;
@@ -778,12 +778,11 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
/* Applications that get this state typically spin on it.
- * Yield the processor
+ * Giving up the spinlock will reschedule if needed.
*/
spin_lock_irqsave(&qp->state_lock, flags);
if (qp->attr.sq_draining) {
spin_unlock_irqrestore(&qp->state_lock, flags);
- cond_resched();
} else {
spin_unlock_irqrestore(&qp->state_lock, flags);
}
@@ -227,7 +227,7 @@ void rxe_cleanup_task(struct rxe_task *task)
* for the previously scheduled tasks to finish.
*/
while (!is_done(task))
- cond_resched();
+ cond_resched_stall();
spin_lock_irqsave(&task->lock, flags);
task->state = TASK_STATE_INVALID;
@@ -289,7 +289,7 @@ void rxe_disable_task(struct rxe_task *task)
spin_unlock_irqrestore(&task->lock, flags);
while (!is_done(task))
- cond_resched();
+ cond_resched_stall();
spin_lock_irqsave(&task->lock, flags);
task->state = TASK_STATE_DRAINED;
@@ -529,7 +529,6 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
input_inject_event(&evdev->handle,
event.type, event.code, event.value);
- cond_resched();
}
out:
@@ -52,7 +52,7 @@ static void clps711x_keypad_poll(struct input_dev *input)
/* Read twice for protection against fluctuations */
do {
state = gpiod_get_value_cansleep(data->desc);
- cond_resched();
+ cond_resched_stall();
state1 = gpiod_get_value_cansleep(data->desc);
} while (state != state1);
@@ -624,7 +624,6 @@ static ssize_t uinput_inject_events(struct uinput_device *udev,
input_event(udev->dev, ev.type, ev.code, ev.value);
bytes += input_event_size();
- cond_resched();
}
return bytes;
@@ -704,7 +704,6 @@ static ssize_t mousedev_write(struct file *file, const char __user *buffer,
mousedev_generate_response(client, c);
spin_unlock_irq(&client->packet_lock);
- cond_resched();
}
kill_fasync(&client->fasync, SIGIO, POLL_IN);
@@ -1582,8 +1582,6 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
for (i = 0; i < ARRAY_SIZE(evt); ++i)
dev_info(smmu->dev, "\t0x%016llx\n",
(unsigned long long)evt[i]);
-
- cond_resched();
}
/*
@@ -81,9 +81,6 @@ static int vpx3220_fp_status(struct v4l2_subdev *sd)
return 0;
udelay(10);
-
- if (need_resched())
- cond_resched();
}
return -1;
@@ -140,7 +140,7 @@ static int cobalt_tx_bytes(struct cobalt_i2c_regs __iomem *regs,
while (status & M00018_SR_BITMAP_TIP_MSK) {
if (time_after(jiffies, start_time + adap->timeout))
return -ETIMEDOUT;
- cond_resched();
+ cond_resched_stall();
status = ioread8(®s->cr_sr);
}
@@ -199,7 +199,7 @@ static int cobalt_rx_bytes(struct cobalt_i2c_regs __iomem *regs,
while (status & M00018_SR_BITMAP_TIP_MSK) {
if (time_after(jiffies, start_time + adap->timeout))
return -ETIMEDOUT;
- cond_resched();
+ cond_resched_stall();
status = ioread8(®s->cr_sr);
}
@@ -364,8 +364,7 @@ static inline int bcm_vk_wait(struct bcm_vk *vk, enum pci_barno bar,
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
- cpu_relax();
- cond_resched();
+ cond_resched_stall();
} while ((rd_val & mask) != value);
return 0;
@@ -1295,8 +1295,7 @@ int bcm_vk_release(struct inode *inode, struct file *p_file)
break;
}
dma_cnt = atomic_read(&ctx->dma_cnt);
- cpu_relax();
- cond_resched();
+ cond_resched_stall();
} while (dma_cnt);
dev_dbg(dev, "Draining for [fd-%d] pid %d - delay %d ms\n",
ctx->idx, pid, jiffies_to_msecs(jiffies - start_time));
@@ -1004,7 +1004,6 @@ static int genwqe_health_thread(void *data)
}
cd->last_gfir = gfir;
- cond_resched();
}
return 0;
@@ -1041,7 +1040,7 @@ static int genwqe_health_thread(void *data)
/* genwqe_bus_reset failed(). Now wait for genwqe_remove(). */
while (!kthread_should_stop())
- cond_resched();
+ cond_resched_stall();
return -EIO;
}
@@ -1207,12 +1207,6 @@ static int genwqe_card_thread(void *data)
}
if (should_stop)
break;
-
- /*
- * Avoid soft lockups on heavy loads; we do not want
- * to disable our interrupts.
- */
- cond_resched();
}
return 0;
}
@@ -1322,7 +1322,6 @@ static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
genwqe_open_files(cd); i++) {
dev_info(&pci_dev->dev, " %d sec ...", i);
- cond_resched();
msleep(1000);
}
@@ -1340,7 +1339,6 @@ static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
genwqe_open_files(cd); i++) {
dev_warn(&pci_dev->dev, " %d sec ...", i);
- cond_resched();
msleep(1000);
}
}
@@ -1158,8 +1158,6 @@ static void vmballoon_inflate(struct vmballoon *b)
vmballoon_split_refused_pages(&ctl);
ctl.page_size--;
}
-
- cond_resched();
}
/*
@@ -1282,8 +1280,6 @@ static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames,
break;
ctl.page_size++;
}
-
- cond_resched();
}
return deflated_frames;
@@ -192,9 +192,6 @@ static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
if (cp[i] != byte)
return cp[i];
}
-
- /* If we need long timeouts, we may release the CPU */
- cond_resched();
} while (time_is_after_jiffies(start + timeout));
return -ETIMEDOUT;
}
@@ -435,7 +435,6 @@ static int btt_map_init(struct arena_info *arena)
offset += size;
mapsize -= size;
- cond_resched();
}
free:
@@ -479,7 +478,6 @@ static int btt_log_init(struct arena_info *arena)
offset += size;
logsize -= size;
- cond_resched();
}
for (i = 0; i < arena->nfree; i++) {
@@ -432,8 +432,6 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC,
GFP_KERNEL);
bio->bi_iter.bi_sector = sector;
- /* This may take a while, so be nice to others */
- cond_resched();
}
sector += bdev_zone_sectors(bdev);
}
@@ -1238,7 +1238,6 @@ static size_t parport_ip32_epp_write_addr(struct parport *p, const void *buf,
static unsigned int parport_ip32_fifo_wait_break(struct parport *p,
unsigned long expire)
{
- cond_resched();
if (time_after(jiffies, expire)) {
pr_debug1(PPIP32 "%s: FIFO write timed out\n", p->name);
return 1;
@@ -663,8 +663,6 @@ static size_t parport_pc_fifo_write_block_dma(struct parport *port,
}
/* Is serviceIntr set? */
if (!(inb(ECONTROL(port)) & (1<<2))) {
- cond_resched();
-
goto false_alarm;
}
@@ -674,8 +672,6 @@ static size_t parport_pc_fifo_write_block_dma(struct parport *port,
count = get_dma_residue(port->dma);
release_dma_lock(dmaflag);
- cond_resched(); /* Can't yield the port. */
-
/* Anyone else waiting for the port? */
if (port->waithead) {
printk(KERN_DEBUG "Somebody wants the port\n");
@@ -719,7 +719,6 @@ static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
data[off - init_off + 3] = (val >> 24) & 0xff;
off += 4;
size -= 4;
- cond_resched();
}
if (size >= 2) {
@@ -83,7 +83,6 @@ static ssize_t proc_bus_pci_read(struct file *file, char __user *buf,
buf += 4;
pos += 4;
cnt -= 4;
- cond_resched();
}
if (cnt >= 2) {
@@ -56,7 +56,7 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
ret = -EBUSY;
tm_delta = ktime_us_delta(ktime_get(), tm);
if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
- cond_resched();
+ cond_resched_stall();
continue;
}
ret = 0;
@@ -95,7 +95,7 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
ret = -EBUSY;
tm_delta = ktime_us_delta(ktime_get(), tm);
if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
- cond_resched();
+ cond_resched_stall();
continue;
}
@@ -659,11 +659,6 @@ static int slow_eval_known_fn(struct subchannel *sch, void *data)
rc = css_evaluate_known_subchannel(sch, 1);
if (rc == -EAGAIN)
css_schedule_eval(sch->schid);
- /*
- * The loop might take long time for platforms with lots of
- * known devices. Allow scheduling here.
- */
- cond_resched();
}
return 0;
}
@@ -695,9 +690,6 @@ static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
default:
rc = 0;
}
- /* Allow scheduling here since the containing loop might
- * take a while. */
- cond_resched();
}
return rc;
}
@@ -738,8 +738,6 @@ static void NCR5380_main(struct work_struct *work)
maybe_release_dma_irq(instance);
}
spin_unlock_irq(&hostdata->lock);
- if (!done)
- cond_resched();
} while (!done);
}
@@ -1696,7 +1696,6 @@ __mega_busywait_mbox (adapter_t *adapter)
if (!mbox->m_in.busy)
return 0;
udelay(100);
- cond_resched();
}
return -1; /* give up after 1 second */
}
@@ -1943,7 +1943,6 @@ static int qedi_percpu_io_thread(void *arg)
if (!work->is_solicited)
kfree(work);
}
- cond_resched();
spin_lock_irqsave(&p->p_work_lock, flags);
}
set_current_state(TASK_INTERRUPTIBLE);
@@ -972,7 +972,6 @@ qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
if (ret < 0 || (val & 1) == 0)
return ret;
udelay(10);
- cond_resched();
}
ql_log(ql_log_warn, vha, 0xb00d,
"Timeout reached waiting for write finish.\n");
@@ -1037,7 +1036,6 @@ ql82xx_rom_lock_d(struct qla_hw_data *ha)
while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
udelay(100);
- cond_resched();
loops++;
}
if (loops >= 50000) {
@@ -463,7 +463,6 @@ qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t *data)
return QLA_SUCCESS;
}
udelay(10);
- cond_resched();
}
ql_log(ql_log_warn, pci_get_drvdata(ha->pdev), 0x7090,
@@ -505,7 +504,6 @@ qla24xx_write_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t data)
if (!(rd_reg_dword(®->flash_addr) & FARX_DATA_FLAG))
return QLA_SUCCESS;
udelay(10);
- cond_resched();
}
ql_log(ql_log_warn, pci_get_drvdata(ha->pdev), 0x7090,
@@ -2151,7 +2149,6 @@ qla2x00_poll_flash(struct qla_hw_data *ha, uint32_t addr, uint8_t poll_data,
}
udelay(10);
barrier();
- cond_resched();
}
return status;
}
@@ -2301,7 +2298,6 @@ qla2x00_read_flash_data(struct qla_hw_data *ha, uint8_t *tmp_buf,
if (saddr % 100)
udelay(10);
*tmp_buf = data;
- cond_resched();
}
}
@@ -2589,7 +2585,6 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf,
rval = QLA_FUNCTION_FAILED;
break;
}
- cond_resched();
}
} while (0);
qla2x00_flash_disable(ha);
@@ -3643,7 +3643,6 @@ qla4_82xx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
int loops = 0;
while ((qla4_82xx_rom_lock(ha) != 0) && (loops < 50000)) {
udelay(100);
- cond_resched();
loops++;
}
if (loops >= 50000) {
@@ -442,7 +442,7 @@ static irqreturn_t scsifront_irq_fn(int irq, void *dev_id)
while (scsifront_cmd_done(info, &eoiflag))
/* Yield point for this unbounded loop. */
- cond_resched();
+ cond_resched_stall();
xen_irq_lateeoi(irq, eoiflag);
@@ -775,8 +775,7 @@ static void lantiq_ssc_bussy_work(struct work_struct *work)
spi_finalize_current_transfer(spi->host);
return;
}
-
- cond_resched();
+ cond_resched_stall();
} while (!time_after_eq(jiffies, end));
if (spi->host->cur_msg)
@@ -100,7 +100,7 @@ static int meson_spifc_wait_ready(struct meson_spifc *spifc)
regmap_read(spifc->regmap, REG_SLAVE, &data);
if (data & SLAVE_TRST_DONE)
return 0;
- cond_resched();
+ cond_resched_stall();
} while (!time_after(jiffies, deadline));
return -ETIMEDOUT;
@@ -1808,7 +1808,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
/* Prod the scheduler in case transfer_one() was busy waiting */
if (!ret)
- cond_resched();
+ cond_resched_stall();
return;
out_unlock:
@@ -3775,7 +3775,7 @@ unsigned int send_beacon(struct adapter *padapter)
issue_beacon(padapter, 100);
issue++;
do {
- cond_resched();
+ cond_resched_stall();
rtw_hal_get_hwreg(padapter, HW_VAR_BCN_VALID, (u8 *)(&bxmitok));
poll++;
} while ((poll%10) != 0 && false == bxmitok && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
@@ -576,8 +576,6 @@ void LPS_Leave_check(struct adapter *padapter)
bReady = false;
start_time = jiffies;
- cond_resched();
-
while (1) {
mutex_lock(&pwrpriv->lock);
@@ -581,7 +581,6 @@ static int optee_ffa_yielding_call(struct tee_context *ctx,
* filled in by ffa_mem_ops->sync_send_receive() returning
* above.
*/
- cond_resched();
optee_handle_ffa_rpc(ctx, optee, data->data1, rpc_arg);
cmd = OPTEE_FFA_YIELDING_CALL_RESUME;
data->data0 = cmd;
@@ -943,7 +943,6 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx,
*/
optee_cq_wait_for_completion(&optee->call_queue, &w);
} else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
- cond_resched();
param.a0 = res.a0;
param.a1 = res.a1;
param.a2 = res.a2;
@@ -538,7 +538,6 @@ static ssize_t hvc_write(struct tty_struct *tty, const u8 *buf, size_t count)
if (count) {
if (hp->n_outbuf > 0)
hvc_flush(hp);
- cond_resched();
}
}
@@ -653,7 +652,7 @@ static int __hvc_poll(struct hvc_struct *hp, bool may_sleep)
if (may_sleep) {
spin_unlock_irqrestore(&hp->lock, flags);
- cond_resched();
+
spin_lock_irqsave(&hp->lock, flags);
}
@@ -725,7 +724,7 @@ static int __hvc_poll(struct hvc_struct *hp, bool may_sleep)
if (may_sleep) {
/* Keep going until the flip is full */
spin_unlock_irqrestore(&hp->lock, flags);
- cond_resched();
+
spin_lock_irqsave(&hp->lock, flags);
goto read_again;
} else if (read_total < HVC_ATOMIC_READ_MAX) {
@@ -802,7 +801,6 @@ static int khvcd(void *unused)
mutex_lock(&hvc_structs_mutex);
list_for_each_entry(hp, &hvc_structs, next) {
poll_mask |= __hvc_poll(hp, true);
- cond_resched();
}
mutex_unlock(&hvc_structs_mutex);
} else
@@ -498,9 +498,6 @@ static void flush_to_ldisc(struct work_struct *work)
lookahead_bufs(port, head);
if (!rcvd)
break;
-
- if (need_resched())
- cond_resched();
}
mutex_unlock(&buf->lock);
@@ -1032,7 +1032,6 @@ static ssize_t iterate_tty_write(struct tty_ldisc *ld, struct tty_struct *tty,
ret = -ERESTARTSYS;
if (signal_pending(current))
break;
- cond_resched();
}
if (written) {
tty_update_time(tty, true);
@@ -451,7 +451,6 @@ static void __max3420_start(struct max3420_udc *udc)
val = spi_rd8(udc, MAX3420_REG_USBIRQ);
if (val & OSCOKIRQ)
break;
- cond_resched();
}
/* Enable PULL-UP only when Vbus detected */
@@ -1294,7 +1294,7 @@ max3421_reset_hcd(struct usb_hcd *hcd)
"timed out waiting for oscillator OK signal");
return 1;
}
- cond_resched();
+ cond_resched_stall();
}
/*
@@ -1086,7 +1086,7 @@ static irqreturn_t xenhcd_int(int irq, void *dev_id)
while (xenhcd_urb_request_done(info, &eoiflag) |
xenhcd_conn_notify(info, &eoiflag))
/* Yield point for this unbounded loop. */
- cond_resched();
+ cond_resched_stall();
xen_irq_lateeoi(irq, eoiflag);
return IRQ_HANDLED;
@@ -457,8 +457,6 @@ static int tce_iommu_clear(struct tce_container *container,
}
}
- cond_resched();
-
direction = DMA_NONE;
oldhpa = 0;
ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry, &oldhpa,
@@ -962,8 +962,6 @@ static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
kfree(entry);
}
- cond_resched();
-
return unlocked;
}
@@ -1029,7 +1027,6 @@ static size_t unmap_unpin_slow(struct vfio_domain *domain,
unmapped >> PAGE_SHIFT,
false);
*iova += unmapped;
- cond_resched();
}
return unmapped;
}
@@ -1062,7 +1059,6 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
list_for_each_entry_continue(d, &iommu->domain_list, next) {
iommu_unmap(d->domain, dma->iova, dma->size);
- cond_resched();
}
iommu_iotlb_gather_init(&iotlb_gather);
@@ -1439,8 +1435,6 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
GFP_KERNEL);
if (ret)
goto unwind;
-
- cond_resched();
}
return 0;
@@ -1448,7 +1442,6 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
unwind:
list_for_each_entry_continue_reverse(d, &iommu->domain_list, next) {
iommu_unmap(d->domain, iova, npage << PAGE_SHIFT);
- cond_resched();
}
return ret;
@@ -410,7 +410,6 @@ static bool vhost_worker(void *data)
kcov_remote_start_common(worker->kcov_handle);
work->fn(work);
kcov_remote_stop();
- cond_resched();
}
}
@@ -870,12 +870,10 @@ static int vgacon_do_font_op(struct vgastate *state, char *arg, int set,
if (set)
for (i = 0; i < cmapsz; i++) {
vga_writeb(arg[i], charmap + i);
- cond_resched();
}
else
for (i = 0; i < cmapsz; i++) {
arg[i] = vga_readb(charmap + i);
- cond_resched();
}
/*
@@ -889,12 +887,10 @@ static int vgacon_do_font_op(struct vgastate *state, char *arg, int set,
if (set)
for (i = 0; i < cmapsz; i++) {
vga_writeb(arg[i], charmap + i);
- cond_resched();
}
else
for (i = 0; i < cmapsz; i++) {
arg[i] = vga_readb(charmap + i);
- cond_resched();
}
}
}
@@ -1754,7 +1754,6 @@ static int virtio_mem_sbm_plug_request(struct virtio_mem *vm, uint64_t diff)
rc = virtio_mem_sbm_plug_any_sb(vm, mb_id, &nb_sb);
if (rc || !nb_sb)
goto out_unlock;
- cond_resched();
}
}
@@ -1772,7 +1771,6 @@ static int virtio_mem_sbm_plug_request(struct virtio_mem *vm, uint64_t diff)
rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb);
if (rc || !nb_sb)
return rc;
- cond_resched();
}
/* Try to prepare, plug and add new blocks */
@@ -1786,7 +1784,6 @@ static int virtio_mem_sbm_plug_request(struct virtio_mem *vm, uint64_t diff)
rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb);
if (rc)
return rc;
- cond_resched();
}
return 0;
@@ -1869,7 +1866,6 @@ static int virtio_mem_bbm_plug_request(struct virtio_mem *vm, uint64_t diff)
nb_bb--;
if (rc || !nb_bb)
return rc;
- cond_resched();
}
/* Try to prepare, plug and add new big blocks */
@@ -1885,7 +1881,6 @@ static int virtio_mem_bbm_plug_request(struct virtio_mem *vm, uint64_t diff)
nb_bb--;
if (rc)
return rc;
- cond_resched();
}
return 0;
@@ -2107,7 +2102,6 @@ static int virtio_mem_sbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
if (rc || !nb_sb)
goto out_unlock;
mutex_unlock(&vm->hotplug_mutex);
- cond_resched();
mutex_lock(&vm->hotplug_mutex);
}
if (!unplug_online && i == 1) {
@@ -2250,8 +2244,6 @@ static int virtio_mem_bbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
*/
for (i = 0; i < 3; i++) {
virtio_mem_bbm_for_each_bb_rev(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED) {
- cond_resched();
-
/*
* As we're holding no locks, these checks are racy,
* but we don't care.
There are broadly three sets of uses of cond_resched(): 1. Calls to cond_resched() out of the goodness of our heart, otherwise known as avoiding lockup splats. 2. Open coded variants of cond_resched_lock() which call cond_resched(). 3. Retry or error handling loops, where cond_resched() is used as a quick alternative to spinning in a tight-loop. When running under a full preemption model, the cond_resched() reduces to a NOP (not even a barrier) so removing it obviously cannot matter. But considering only voluntary preemption models (for say code that has been mostly tested under those), for set-1 and set-2 the scheduler can now preempt kernel tasks running beyond their time quanta anywhere they are preemptible() [1]. Which removes any need for these explicitly placed scheduling points. The cond_resched() calls in set-3 are a little more difficult. To start with, given it's NOP character under full preemption, it never actually saved us from a tight loop. With voluntary preemption, it's not a NOP, but it might as well be -- for most workloads the scheduler does not have an interminable supply of runnable tasks on the runqueue. So, cond_resched() is useful to not get softlockup splats, but not terribly good for error handling. Ideally, these should be replaced with some kind of timed or event wait. For now we use cond_resched_stall(), which tries to schedule if possible, and executes a cpu_relax() if not. The cond_resched() calls here are all kinds. Those from set-1 or set-2 are quite straight-forward to handle. There are quite a few from set-3, where as noted above, we use cond_resched() as if it were a amulent. Which I supppose it is, in that it wards off softlockup or RCU splats. Those are now cond_resched_stall(), but in most cases, given that the timeouts are in milliseconds, they could be easily timed waits. [1] https://lore.kernel.org/lkml/20231107215742.363031-1-ankur.a.arora@oracle.com/ Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Minchan Kim <minchan@kernel.org> Cc: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Sudip Mukherjee <sudipm.mukherjee@gmail.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: "Jason A. Donenfeld" <Jason@zx2c4.com> Cc: Amit Shah <amit@kernel.org> Cc: Gonglei <arei.gonglei@huawei.com> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: Jason Wang <jasowang@redhat.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Jonathan Cameron <jonathan.cameron@huawei.com> Cc: Dave Jiang <dave.jiang@intel.com> Cc: Alison Schofield <alison.schofield@intel.com> Cc: Vishal Verma <vishal.l.verma@intel.com> Cc: Ira Weiny <ira.weiny@intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Sumit Semwal <sumit.semwal@linaro.org> Cc: "Christian König" <christian.koenig@amd.com> Cc: Andi Shyti <andi.shyti@kernel.org> Cc: Ray Jui <rjui@broadcom.com> Cc: Scott Branden <sbranden@broadcom.com> Cc: Chris Packham <chris.packham@alliedtelesis.co.nz> Cc: Shawn Guo <shawnguo@kernel.org> Cc: Sascha Hauer <s.hauer@pengutronix.de> Cc: Junxian Huang <huangjunxian6@hisilicon.com> Cc: Dmitry Torokhov <dmitry.torokhov@gmail.com> Cc: Will Deacon <will@kernel.org> Cc: Joerg Roedel <joro@8bytes.org> Cc: Mauro Carvalho Chehab <mchehab@kernel.org> Cc: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> Cc: Hans de Goede <hdegoede@redhat.com> Cc: "Ilpo Järvinen" <ilpo.jarvinen@linux.intel.com> Cc: Mark Gross <markgross@kernel.org> Cc: Finn Thain <fthain@linux-m68k.org> Cc: Michael Schmitz <schmitzmic@gmail.com> Cc: "James E.J. Bottomley" <jejb@linux.ibm.com> Cc: "Martin K. Petersen" <martin.petersen@oracle.com> Cc: Kashyap Desai <kashyap.desai@broadcom.com> Cc: Sumit Saxena <sumit.saxena@broadcom.com> Cc: Shivasharan S <shivasharan.srikanteshwara@broadcom.com> Cc: Mark Brown <broonie@kernel.org> Cc: Neil Armstrong <neil.armstrong@linaro.org> Cc: Jens Wiklander <jens.wiklander@linaro.org> Cc: Alex Williamson <alex.williamson@redhat.com> Cc: Helge Deller <deller@gmx.de> Cc: David Hildenbrand <david@redhat.com> Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com> --- drivers/accel/ivpu/ivpu_drv.c | 2 -- drivers/accel/ivpu/ivpu_gem.c | 1 - drivers/accel/ivpu/ivpu_pm.c | 8 ++++++-- drivers/accel/qaic/qaic_data.c | 2 -- drivers/auxdisplay/charlcd.c | 11 ----------- drivers/base/power/domain.c | 1 - drivers/block/aoe/aoecmd.c | 3 +-- drivers/block/brd.c | 1 - drivers/block/drbd/drbd_bitmap.c | 4 ---- drivers/block/drbd/drbd_debugfs.c | 1 - drivers/block/loop.c | 3 --- drivers/block/xen-blkback/blkback.c | 3 --- drivers/block/zram/zram_drv.c | 2 -- drivers/bluetooth/virtio_bt.c | 1 - drivers/char/hw_random/arm_smccc_trng.c | 1 - drivers/char/lp.c | 2 -- drivers/char/mem.c | 4 ---- drivers/char/mwave/3780i.c | 4 +--- drivers/char/ppdev.c | 4 ---- drivers/char/random.c | 2 -- drivers/char/virtio_console.c | 1 - drivers/crypto/virtio/virtio_crypto_core.c | 1 - drivers/cxl/pci.c | 1 - drivers/dma-buf/selftest.c | 1 - drivers/dma-buf/st-dma-fence-chain.c | 1 - drivers/fsi/fsi-sbefifo.c | 14 ++++++++++++-- drivers/i2c/busses/i2c-bcm-iproc.c | 9 +++++++-- drivers/i2c/busses/i2c-highlander.c | 9 +++++++-- drivers/i2c/busses/i2c-ibm_iic.c | 11 +++++++---- drivers/i2c/busses/i2c-mpc.c | 2 +- drivers/i2c/busses/i2c-mxs.c | 9 ++++++++- drivers/i2c/busses/scx200_acb.c | 9 +++++++-- drivers/infiniband/core/umem.c | 1 - drivers/infiniband/hw/hfi1/driver.c | 1 - drivers/infiniband/hw/hfi1/firmware.c | 2 +- drivers/infiniband/hw/hfi1/init.c | 1 - drivers/infiniband/hw/hfi1/ruc.c | 1 - drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 5 ++++- drivers/infiniband/hw/qib/qib_init.c | 1 - drivers/infiniband/sw/rxe/rxe_qp.c | 3 +-- drivers/infiniband/sw/rxe/rxe_task.c | 4 ++-- drivers/input/evdev.c | 1 - drivers/input/keyboard/clps711x-keypad.c | 2 +- drivers/input/misc/uinput.c | 1 - drivers/input/mousedev.c | 1 - drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 2 -- drivers/media/i2c/vpx3220.c | 3 --- drivers/media/pci/cobalt/cobalt-i2c.c | 4 ++-- drivers/misc/bcm-vk/bcm_vk_dev.c | 3 +-- drivers/misc/bcm-vk/bcm_vk_msg.c | 3 +-- drivers/misc/genwqe/card_base.c | 3 +-- drivers/misc/genwqe/card_ddcb.c | 6 ------ drivers/misc/genwqe/card_dev.c | 2 -- drivers/misc/vmw_balloon.c | 4 ---- drivers/mmc/host/mmc_spi.c | 3 --- drivers/nvdimm/btt.c | 2 -- drivers/nvme/target/zns.c | 2 -- drivers/parport/parport_ip32.c | 1 - drivers/parport/parport_pc.c | 4 ---- drivers/pci/pci-sysfs.c | 1 - drivers/pci/proc.c | 1 - .../x86/intel/speed_select_if/isst_if_mbox_pci.c | 4 ++-- drivers/s390/cio/css.c | 8 -------- drivers/scsi/NCR5380.c | 2 -- drivers/scsi/megaraid.c | 1 - drivers/scsi/qedi/qedi_main.c | 1 - drivers/scsi/qla2xxx/qla_nx.c | 2 -- drivers/scsi/qla2xxx/qla_sup.c | 5 ----- drivers/scsi/qla4xxx/ql4_nx.c | 1 - drivers/scsi/xen-scsifront.c | 2 +- drivers/spi/spi-lantiq-ssc.c | 3 +-- drivers/spi/spi-meson-spifc.c | 2 +- drivers/spi/spi.c | 2 +- drivers/staging/rtl8723bs/core/rtw_mlme_ext.c | 2 +- drivers/staging/rtl8723bs/core/rtw_pwrctrl.c | 2 -- drivers/tee/optee/ffa_abi.c | 1 - drivers/tee/optee/smc_abi.c | 1 - drivers/tty/hvc/hvc_console.c | 6 ++---- drivers/tty/tty_buffer.c | 3 --- drivers/tty/tty_io.c | 1 - drivers/usb/gadget/udc/max3420_udc.c | 1 - drivers/usb/host/max3421-hcd.c | 2 +- drivers/usb/host/xen-hcd.c | 2 +- drivers/vfio/vfio_iommu_spapr_tce.c | 2 -- drivers/vfio/vfio_iommu_type1.c | 7 ------- drivers/vhost/vhost.c | 1 - drivers/video/console/vgacon.c | 4 ---- drivers/virtio/virtio_mem.c | 8 -------- 88 files changed, 82 insertions(+), 190 deletions(-)