@@ -215,6 +215,21 @@ i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset);
}
+void __iomem *
+i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n,
+ unsigned long size)
+{
+ resource_size_t offset;
+
+ GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
+
+ offset = i915_gem_object_get_dma_address(obj, n);
+ offset -= obj->mm.region->region.start;
+
+ return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
+}
+
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
{
struct intel_memory_region *region = obj->mm.region;
@@ -229,6 +244,32 @@ bool i915_gem_object_is_devmem(struct drm_i915_gem_object *obj)
return region && region->is_devmem;
}
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
+ const void *data, size_t size)
+{
+ struct drm_i915_gem_object *obj;
+ void *map;
+
+ obj = i915_gem_object_create_lmem(i915,
+ round_up(size, PAGE_SIZE),
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return obj;
+
+ map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
+ if (IS_ERR(map)) {
+ i915_gem_object_put(obj);
+ return map;
+ }
+
+ memcpy(map, data, size);
+
+ i915_gem_object_unpin_map(obj);
+
+ return obj;
+}
+
struct drm_i915_gem_object *
i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size,
@@ -14,6 +14,10 @@ struct intel_memory_region;
extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
+void __iomem *
+i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n,
+ unsigned long size);
void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
unsigned long n);
void __iomem *
@@ -23,6 +27,10 @@ i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
bool i915_gem_object_is_devmem(struct drm_i915_gem_object *obj);
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
+ const void *data, size_t size);
+
struct drm_i915_gem_object *
i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size,
@@ -3,6 +3,7 @@
* Copyright © 2014-2019 Intel Corporation
*/
+#include "gem/i915_gem_lmem.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm_irq.h"
@@ -650,7 +651,13 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
u64 flags;
int ret;
- obj = i915_gem_object_create_shmem(gt->i915, size);
+ if (HAS_LMEM(gt->i915))
+ obj = i915_gem_object_create_lmem(gt->i915, size,
+ I915_BO_ALLOC_CPU_CLEAR |
+ I915_BO_ALLOC_CONTIGUOUS);
+ else
+ obj = i915_gem_object_create_shmem(gt->i915, size);
+
if (IS_ERR(obj))
return ERR_CAST(obj);
@@ -41,7 +41,7 @@ static void guc_prepare_xfer(struct intel_uncore *uncore)
}
/* Copy RSA signature from the fw image to HW for verification */
-static void guc_xfer_rsa(struct intel_uc_fw *guc_fw,
+static int guc_xfer_rsa(struct intel_uc_fw *guc_fw,
struct intel_uncore *uncore)
{
u32 rsa[UOS_RSA_SCRATCH_COUNT];
@@ -49,10 +49,13 @@ static void guc_xfer_rsa(struct intel_uc_fw *guc_fw,
int i;
copied = intel_uc_fw_copy_rsa(guc_fw, rsa, sizeof(rsa));
- GEM_BUG_ON(copied < sizeof(rsa));
+ if (copied < sizeof(rsa))
+ return -ENOMEM;
for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
intel_uncore_write(uncore, UOS_RSA_SCRATCH(i), rsa[i]);
+
+ return 0;
}
/*
@@ -142,7 +145,9 @@ int intel_guc_fw_upload(struct intel_guc *guc)
* by the DMA engine in one operation, whereas the RSA signature is
* loaded via MMIO.
*/
- guc_xfer_rsa(&guc->fw, uncore);
+ ret = guc_xfer_rsa(&guc->fw, uncore);
+ if (ret)
+ goto out;
/*
* Current uCode expects the code to be loaded at 8k; locations below
@@ -87,17 +87,25 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc)
vma->obj, true));
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma, 0);
- return PTR_ERR(vaddr);
+ err = PTR_ERR(vaddr);
+ goto unpin_out;
}
copied = intel_uc_fw_copy_rsa(&huc->fw, vaddr, vma->size);
- GEM_BUG_ON(copied < huc->fw.rsa_size);
-
i915_gem_object_unpin_map(vma->obj);
+ if (copied < huc->fw.rsa_size) {
+ err = -ENOMEM;
+ goto unpin_out;
+ }
+
huc->rsa_data = vma;
return 0;
+
+unpin_out:
+ i915_vma_unpin_and_release(&vma, 0);
+ return err;
}
static void intel_huc_rsa_data_destroy(struct intel_huc *huc)
@@ -7,6 +7,7 @@
#include <linux/firmware.h>
#include <drm/drm_print.h>
+#include "gem/i915_gem_lmem.h"
#include "intel_uc_fw.h"
#include "intel_uc_fw_abi.h"
#include "i915_drv.h"
@@ -371,7 +372,11 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
uc_fw->private_data_size = css->private_data_size;
- obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
+ if (HAS_LMEM(i915))
+ obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
+ else
+ obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
+
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto fail;
@@ -420,14 +425,19 @@ static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
.pages = obj->mm.pages,
.vm = &ggtt->vm,
};
+ u32 pte_flags = 0;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
GEM_BUG_ON(dummy.node.size > ggtt->uc_fw.size);
/* uc_fw->obj cache domains were not controlled across suspend */
- drm_clflush_sg(dummy.pages);
+ if (i915_gem_object_has_struct_page(obj))
+ drm_clflush_sg(dummy.pages);
+
+ if (i915_gem_object_is_lmem(obj))
+ pte_flags |= PTE_LM;
- ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0);
+ ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, pte_flags);
}
static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
@@ -592,7 +602,24 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));
- return sg_pcopy_to_buffer(pages->sgl, pages->nents, dst, size, offset);
+ if (i915_gem_object_is_lmem(uc_fw->obj)) {
+ unsigned long page_idx = offset >> PAGE_SHIFT;
+ unsigned int page_off = offset_in_page(offset);
+ void __iomem *vaddr;
+
+ vaddr = i915_gem_object_lmem_io_map(uc_fw->obj,
+ page_idx,
+ page_off + size);
+ if (!vaddr)
+ return 0;
+
+ memcpy(dst, vaddr + page_off, size);
+ io_mapping_unmap(vaddr);
+ return size;
+ } else {
+ return sg_pcopy_to_buffer(pages->sgl, pages->nents,
+ dst, size, offset);
+ }
}
/**