mirror of
https://github.com/torvalds/linux.git
synced 2026-01-25 07:47:50 +00:00
lib/test_hmm: make dmirror_atomic_map() consume a single page
Patch series "mm: cleanups for device-exclusive entries (hmm)", v2. Some smaller device-exclusive cleanups I have lying around. This patch (of 5): The caller now always passes a single page; let's simplify, and return "0" on success. Link: https://lkml.kernel.org/r/20250226132257.2826043-1-david@redhat.com Link: https://lkml.kernel.org/r/20250226132257.2826043-2-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Alistair Popple <apopple@nvidia.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Jérôme Glisse <jglisse@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
173a3dc051
commit
66add5e909
@@ -706,34 +706,23 @@ static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dmirror_atomic_map(unsigned long start, unsigned long end,
|
||||
struct page **pages, struct dmirror *dmirror)
|
||||
static int dmirror_atomic_map(unsigned long addr, struct page *page,
|
||||
struct dmirror *dmirror)
|
||||
{
|
||||
unsigned long pfn, mapped = 0;
|
||||
int i;
|
||||
void *entry;
|
||||
|
||||
/* Map the migrated pages into the device's page tables. */
|
||||
mutex_lock(&dmirror->mutex);
|
||||
|
||||
for (i = 0, pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++, i++) {
|
||||
void *entry;
|
||||
|
||||
if (!pages[i])
|
||||
continue;
|
||||
|
||||
entry = pages[i];
|
||||
entry = xa_tag_pointer(entry, DPT_XA_TAG_ATOMIC);
|
||||
entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
|
||||
if (xa_is_err(entry)) {
|
||||
mutex_unlock(&dmirror->mutex);
|
||||
return xa_err(entry);
|
||||
}
|
||||
|
||||
mapped++;
|
||||
entry = xa_tag_pointer(page, DPT_XA_TAG_ATOMIC);
|
||||
entry = xa_store(&dmirror->pt, addr >> PAGE_SHIFT, entry, GFP_ATOMIC);
|
||||
if (xa_is_err(entry)) {
|
||||
mutex_unlock(&dmirror->mutex);
|
||||
return xa_err(entry);
|
||||
}
|
||||
|
||||
mutex_unlock(&dmirror->mutex);
|
||||
return mapped;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
|
||||
@@ -803,8 +792,7 @@ static int dmirror_exclusive(struct dmirror *dmirror,
|
||||
break;
|
||||
}
|
||||
|
||||
ret = dmirror_atomic_map(addr, addr + PAGE_SIZE, &page, dmirror);
|
||||
ret = ret == 1 ? 0 : -EBUSY;
|
||||
ret = dmirror_atomic_map(addr, page, dmirror);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user