@@ -1333,6 +1333,13 @@ static ssize_t scrub_show(struct device *dev,
busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags)
&& !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n");
+ /* Allow an admin to poll the busy state at a higher rate */
+ if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL,
+ &acpi_desc->scrub_flags)) {
+ acpi_desc->scrub_tmo = 1;
+ mod_delayed_work(nfit_wq, &acpi_desc->dwork, HZ);
+ }
+
mutex_unlock(&acpi_desc->init_mutex);
device_unlock(dev);
return rc;
@@ -3187,6 +3194,7 @@ static void acpi_nfit_scrub(struct work_struct *work)
else
notify_ars_done(acpi_desc);
memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
+ clear_bit(ARS_POLL, &acpi_desc->scrub_flags);
mutex_unlock(&acpi_desc->init_mutex);
}
@@ -213,6 +213,7 @@ struct nfit_mem {
enum scrub_flags {
ARS_BUSY,
ARS_CANCEL,
+ ARS_POLL,
};
struct acpi_nfit_desc {
The ARS implementation implements exponential back-off on the poll interval to prevent high-frequency access to the DIMM / platform interface. Depending on when the ARS completes the poll interval may exceed the completion event by minutes. Allow root to reset the timeout each time it probes the status. A one-second timeout is still enforced, but root can otherwise can control the poll interval. Reported-by: Erwin Tsaur <erwin.tsaur@oracle.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/acpi/nfit/core.c | 8 ++++++++ drivers/acpi/nfit/nfit.h | 1 + 2 files changed, 9 insertions(+)