aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/hmm.h
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-08-06 19:05:45 +0300
committerJason Gunthorpe <jgg@mellanox.com>2019-08-07 14:58:06 -0300
commit7f08263d9bc6627382da14f9e81d643d0329d5d1 (patch)
tree7151f306c17fb5f7e5a8b262c876e7bf0bb9e292 /include/linux/hmm.h
parentmm/hmm: remove superfluous arguments from hmm_range_register (diff)
downloadlinux-dev-7f08263d9bc6627382da14f9e81d643d0329d5d1.tar.xz
linux-dev-7f08263d9bc6627382da14f9e81d643d0329d5d1.zip
mm/hmm: remove the page_shift member from struct hmm_range
All users pass PAGE_SIZE here, and if we wanted to support single entries for huge pages we should really just add a HMM_FAULT_HUGEPAGE flag instead that uses the huge page size instead of having the caller calculate that size once, just for the hmm code to verify it. Link: https://lore.kernel.org/r/20190806160554.14046-8-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Felix Kuehling <Felix.Kuehling@amd.com> Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'include/linux/hmm.h')
-rw-r--r--include/linux/hmm.h22
1 files changed, 0 insertions, 22 deletions
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index c5b51376b453..51e18fbb8953 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -158,7 +158,6 @@ enum hmm_pfn_value_e {
* @values: pfn value for some special case (none, special, error, ...)
* @default_flags: default flags for the range (write, read, ... see hmm doc)
* @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
- * @page_shift: device virtual address shift value (should be >= PAGE_SHIFT)
* @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT)
* @valid: pfns array did not change since it has been fill by an HMM function
*/
@@ -172,32 +171,11 @@ struct hmm_range {
const uint64_t *values;
uint64_t default_flags;
uint64_t pfn_flags_mask;
- uint8_t page_shift;
uint8_t pfn_shift;
bool valid;
};
/*
- * hmm_range_page_shift() - return the page shift for the range
- * @range: range being queried
- * Return: page shift (page size = 1 << page shift) for the range
- */
-static inline unsigned hmm_range_page_shift(const struct hmm_range *range)
-{
- return range->page_shift;
-}
-
-/*
- * hmm_range_page_size() - return the page size for the range
- * @range: range being queried
- * Return: page size for the range in bytes
- */
-static inline unsigned long hmm_range_page_size(const struct hmm_range *range)
-{
- return 1UL << hmm_range_page_shift(range);
-}
-
-/*
* hmm_range_wait_until_valid() - wait for range to be valid
* @range: range affected by invalidation to wait on
* @timeout: time out for wait in ms (ie abort wait after that period of time)