@@ -161,13 +161,11 @@ device must complete the update before the driver callback returns.
When the device driver wants to populate a range of virtual addresses, it can
use::
- long hmm_range_fault(struct hmm_range *range, unsigned int flags);
+ long hmm_range_fault(struct hmm_range *range);
-With the HMM_RANGE_SNAPSHOT flag, it will only fetch present CPU page table
-entries and will not trigger a page fault on missing or non-present entries.
-Without that flag, it does trigger a page fault on missing or read-only entries
-if write access is requested (see below). Page faults use the generic mm page
-fault code path just like a CPU page fault.
+It will trigger a page fault on missing or read-only entries if write access is
+requested (see below). Page faults use the generic mm page fault code path just
+like a CPU page fault.
Both functions copy CPU page table entries into their pfns array argument. Each
entry in that array corresponds to an address in the virtual range. HMM
@@ -197,7 +195,7 @@ The usage pattern is::
again:
range.notifier_seq = mmu_interval_read_begin(&interval_sub);
down_read(&mm->mmap_sem);
- ret = hmm_range_fault(&range, HMM_RANGE_SNAPSHOT);
+ ret = hmm_range_fault(&range);
if (ret) {
up_read(&mm->mmap_sem);
if (ret == -EBUSY)
@@ -856,7 +856,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
down_read(&mm->mmap_sem);
- r = hmm_range_fault(range, 0);
+ r = hmm_range_fault(range);
up_read(&mm->mmap_sem);
if (unlikely(r <= 0)) {
/*
@@ -540,7 +540,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
range.default_flags = 0;
range.pfn_flags_mask = -1UL;
down_read(&mm->mmap_sem);
- ret = hmm_range_fault(&range, 0);
+ ret = hmm_range_fault(&range);
up_read(&mm->mmap_sem);
if (ret <= 0) {
if (ret == 0 || ret == -EBUSY)
@@ -167,13 +167,10 @@ static inline struct page *hmm_device_entry_to_page(const struct hmm_range *rang
return pfn_to_page(entry >> range->pfn_shift);
}
-/* Don't fault in missing PTEs, just snapshot the current state. */
-#define HMM_FAULT_SNAPSHOT (1 << 1)
-
/*
* Please see Documentation/vm/hmm.rst for how to use the range API.
*/
-long hmm_range_fault(struct hmm_range *range, unsigned int flags);
+long hmm_range_fault(struct hmm_range *range);
/*
* HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
@@ -29,7 +29,6 @@
struct hmm_vma_walk {
struct hmm_range *range;
unsigned long last;
- unsigned int flags;
};
enum {
@@ -111,9 +110,6 @@ static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
{
struct hmm_range *range = hmm_vma_walk->range;
- if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT)
- return 0;
-
/*
* So we not only consider the individual per page request we also
* consider the default flags requested for the range. The API can
@@ -146,10 +142,17 @@ hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
const uint64_t *pfns, unsigned long npages,
uint64_t cpu_flags)
{
+ struct hmm_range *range = hmm_vma_walk->range;
unsigned int required_fault = 0;
unsigned long i;
- if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT)
+ /*
+ * If there is no way for valid to be set in hmm_pte_need_fault() then
+ * don't bother to call it.
+ */
+ if (!(((range->flags[HMM_PFN_VALID] & range->pfn_flags_mask) |
+ range->default_flags) &
+ range->flags[HMM_PFN_VALID]))
return 0;
for (i = 0; i < npages; ++i) {
@@ -559,7 +562,6 @@ static const struct mm_walk_ops hmm_walk_ops = {
/**
* hmm_range_fault - try to fault some address in a virtual address range
* @range: range being faulted
- * @flags: HMM_FAULT_* flags
*
* Return: the number of valid pages in range->pfns[] (from range start
* address), which may be zero. On error one of the following status codes
@@ -583,12 +585,11 @@ static const struct mm_walk_ops hmm_walk_ops = {
* On error, for one virtual address in the range, the function will mark the
* corresponding HMM pfn entry with an error flag.
*/
-long hmm_range_fault(struct hmm_range *range, unsigned int flags)
+long hmm_range_fault(struct hmm_range *range)
{
struct hmm_vma_walk hmm_vma_walk = {
.range = range,
.last = range->start,
- .flags = flags,
};
struct mm_struct *mm = range->notifier->mm;
int ret;