@@ -74,4 +74,13 @@ struct xe_svm_range *xe_svm_range_from_addr(struct xe_svm *svm,
int xe_svm_build_sg(struct hmm_range *range, struct sg_table *st);
int xe_svm_devm_add(struct xe_tile *tile, struct xe_mem_region *mem);
void xe_svm_devm_remove(struct xe_device *xe, struct xe_mem_region *mem);
+
+
+int xe_devm_alloc_pages(struct xe_tile *tile,
+ unsigned long npages,
+ struct list_head *blocks,
+ unsigned long *pfn);
+
+void xe_devm_free_blocks(struct list_head *blocks);
+void xe_devm_page_free(struct page *page);
#endif
@@ -5,18 +5,162 @@
#include <linux/mm_types.h>
#include <linux/sched/mm.h>
+#include <linux/gfp.h>
+#include <linux/migrate.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-fence.h>
+#include <linux/bitops.h>
+#include <linux/bitmap.h>
+#include <drm/drm_buddy.h>
#include "xe_device_types.h"
#include "xe_trace.h"
+#include "xe_migrate.h"
+#include "xe_ttm_vram_mgr_types.h"
+#include "xe_assert.h"
+/**
+ * struct xe_svm_block_meta - svm uses this data structure to manage each
+ * block allocated from drm buddy. This will be set to the drm_buddy_block's
+ * private field.
+ *
+ * @lru: used to link this block to drm's lru lists. This will be replace
+ * with struct drm_lru_entity later.
+ * @tile: tile from which we allocated this block
+ * @bitmap: A bitmap of each page in this block. 1 means this page is used,
+ * 0 means this page is idle. When all bits of this block are 0, it is time
+ * to return this block to drm buddy subsystem.
+ */
+struct xe_svm_block_meta {
+ struct list_head lru;
+ struct xe_tile *tile;
+ unsigned long bitmap[];
+};
+
+static u64 block_offset_to_pfn(struct xe_mem_region *mr, u64 offset)
+{
+ /** DRM buddy's block offset is 0-based*/
+ offset += mr->hpa_base;
+
+ return PHYS_PFN(offset);
+}
+
+/**
+ * xe_devm_alloc_pages() - allocate device pages from buddy allocator
+ *
+ * @xe_tile: which tile to allocate device memory from
+ * @npages: how many pages to allocate
+ * @blocks: used to return the allocated blocks
+ * @pfn: used to return the pfn of all allocated pages. Must be big enough
+ * to hold at @npages entries.
+ *
+ * This function allocate blocks of memory from drm buddy allocator, and
+ * performs initialization work: set struct page::zone_device_data to point
+ * to the memory block; set/initialize drm_buddy_block::private field;
+ * lock_page for each page allocated; add memory block to lru managers lru
+ * list - this is TBD.
+ *
+ * return: 0 on success
+ * error code otherwise
+ */
+int xe_devm_alloc_pages(struct xe_tile *tile,
+ unsigned long npages,
+ struct list_head *blocks,
+ unsigned long *pfn)
+{
+ struct drm_buddy *mm = &tile->mem.vram_mgr->mm;
+ struct drm_buddy_block *block, *tmp;
+ u64 size = npages << PAGE_SHIFT;
+ int ret = 0, i, j = 0;
+
+ ret = drm_buddy_alloc_blocks(mm, 0, mm->size, size, PAGE_SIZE,
+ blocks, DRM_BUDDY_TOPDOWN_ALLOCATION);
+
+ if (unlikely(ret))
+ return ret;
+
+ list_for_each_entry_safe(block, tmp, blocks, link) {
+ struct xe_mem_region *mr = &tile->mem.vram;
+ u64 block_pfn_first, pages_per_block;
+ struct xe_svm_block_meta *meta;
+ u32 meta_size;
+
+ size = drm_buddy_block_size(mm, block);
+ pages_per_block = size >> PAGE_SHIFT;
+ meta_size = BITS_TO_BYTES(pages_per_block) +
+ sizeof(struct xe_svm_block_meta);
+ meta = kzalloc(meta_size, GFP_KERNEL);
+ bitmap_fill(meta->bitmap, pages_per_block);
+ meta->tile = tile;
+ block->private = meta;
+ block_pfn_first =
+ block_offset_to_pfn(mr, drm_buddy_block_offset(block));
+ for(i = 0; i < pages_per_block; i++) {
+ struct page *page;
+
+ pfn[j++] = block_pfn_first + i;
+ page = pfn_to_page(block_pfn_first + i);
+ /**Lock page per hmm requirement, see hmm.rst.*/
+ zone_device_page_init(page);
+ page->zone_device_data = block;
+ }
+ }
+
+ return ret;
+}
+
+/** FIXME: we locked page by calling zone_device_page_init
+ * inxe_dev_alloc_pages. Should we unlock pages here?
+ */
+static void free_block(struct drm_buddy_block *block)
+{
+ struct xe_svm_block_meta *meta =
+ (struct xe_svm_block_meta *)block->private;
+ struct xe_tile *tile = meta->tile;
+ struct drm_buddy *mm = &tile->mem.vram_mgr->mm;
+
+ kfree(block->private);
+ drm_buddy_free_block(mm, block);
+}
+
+/**
+ * xe_devm_free_blocks() - free all memory blocks
+ *
+ * @blocks: memory blocks list head
+ */
+void xe_devm_free_blocks(struct list_head *blocks)
+{
+ struct drm_buddy_block *block, *tmp;
+
+ list_for_each_entry_safe(block, tmp, blocks, link)
+ free_block(block);
+}
static vm_fault_t xe_devm_migrate_to_ram(struct vm_fault *vmf)
{
return 0;
}
-static void xe_devm_page_free(struct page *page)
+void xe_devm_page_free(struct page *page)
{
+ struct drm_buddy_block *block =
+ (struct drm_buddy_block *)page->zone_device_data;
+ struct xe_svm_block_meta *meta =
+ (struct xe_svm_block_meta *)block->private;
+ struct xe_tile *tile = meta->tile;
+ struct xe_mem_region *mr = &tile->mem.vram;
+ struct drm_buddy *mm = &tile->mem.vram_mgr->mm;
+ u64 size = drm_buddy_block_size(mm, block);
+ u64 pages_per_block = size >> PAGE_SHIFT;
+ u64 block_pfn_first =
+ block_offset_to_pfn(mr, drm_buddy_block_offset(block));
+ u64 page_pfn = page_to_pfn(page);
+ u64 i = page_pfn - block_pfn_first;
+
+ xe_assert(tile->xe, i < pages_per_block);
+ clear_bit(i, meta->bitmap);
+ if (bitmap_empty(meta->bitmap, pages_per_block))
+ free_block(block);
}
static const struct dev_pagemap_ops xe_devm_pagemap_ops = {