@@ -30,28 +30,37 @@
static inline void OUT(struct etnaviv_gem_object *buffer, uint32_t data)
{
u32 *vaddr = (u32 *)buffer->vaddr;
+
BUG_ON(buffer->offset >= buffer->base.size / sizeof(*vaddr));
vaddr[buffer->offset++] = data;
}
-static inline void CMD_LOAD_STATE(struct etnaviv_gem_object *buffer, u32 reg, u32 value)
+static inline void CMD_LOAD_STATE(struct etnaviv_gem_object *buffer,
+ u32 reg, u32 value)
{
+ u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
+
buffer->offset = ALIGN(buffer->offset, 2);
/* write a register via cmd stream */
- OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE | VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
- VIV_FE_LOAD_STATE_HEADER_OFFSET(reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR));
+ OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
+ VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
+ VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
OUT(buffer, value);
}
-static inline void CMD_LOAD_STATES(struct etnaviv_gem_object *buffer, u32 reg, u16 count, u32 *values)
+static inline void CMD_LOAD_STATES(struct etnaviv_gem_object *buffer,
+ u32 reg, u16 count, u32 *values)
{
+ u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
u16 i;
+
buffer->offset = ALIGN(buffer->offset, 2);
- OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE | VIV_FE_LOAD_STATE_HEADER_COUNT(count) |
- VIV_FE_LOAD_STATE_HEADER_OFFSET(reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR));
+ OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
+ VIV_FE_LOAD_STATE_HEADER_COUNT(count) |
+ VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
for (i = 0; i < count; i++)
OUT(buffer, values[i]);
@@ -78,15 +87,18 @@ static inline void CMD_WAIT(struct etnaviv_gem_object *buffer)
OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
}
-static inline void CMD_LINK(struct etnaviv_gem_object *buffer, u16 prefetch, u32 address)
+static inline void CMD_LINK(struct etnaviv_gem_object *buffer,
+ u16 prefetch, u32 address)
{
buffer->offset = ALIGN(buffer->offset, 2);
- OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK | VIV_FE_LINK_HEADER_PREFETCH(prefetch));
+ OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
+ VIV_FE_LINK_HEADER_PREFETCH(prefetch));
OUT(buffer, address);
}
-static inline void CMD_STALL(struct etnaviv_gem_object *buffer, u32 from, u32 to)
+static inline void CMD_STALL(struct etnaviv_gem_object *buffer,
+ u32 from, u32 to)
{
buffer->offset = ALIGN(buffer->offset, 2);
@@ -105,14 +117,15 @@ static void etnaviv_cmd_select_pipe(struct etnaviv_gem_object *buffer, u8 pipe)
flush = VIVS_GL_FLUSH_CACHE_TEXTURE;
stall = VIVS_GL_SEMAPHORE_TOKEN_FROM(SYNC_RECIPIENT_FE) |
- VIVS_GL_SEMAPHORE_TOKEN_TO(SYNC_RECIPIENT_PE);
+ VIVS_GL_SEMAPHORE_TOKEN_TO(SYNC_RECIPIENT_PE);
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN, stall);
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
- CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT, VIVS_GL_PIPE_SELECT_PIPE(pipe));
+ CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
+ VIVS_GL_PIPE_SELECT_PIPE(pipe));
}
static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
@@ -143,7 +156,8 @@ u32 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
return buffer->offset;
}
-void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event, struct etnaviv_gem_submit *submit)
+void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
+ struct etnaviv_gem_submit *submit)
{
struct etnaviv_gem_object *buffer = to_etnaviv_bo(gpu->buffer);
struct etnaviv_gem_object *cmd;
@@ -170,7 +184,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event, struct et
gpu->event[event].ring_pos = buffer->offset;
/* trigger event */
- CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) | VIVS_GL_EVENT_FROM_PE);
+ CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
+ VIVS_GL_EVENT_FROM_PE);
/* append WAIT/LINK to main buffer */
CMD_WAIT(buffer);
@@ -24,11 +24,12 @@
void etnaviv_register_mmu(struct drm_device *dev, struct etnaviv_iommu *mmu)
{
struct etnaviv_drm_private *priv = dev->dev_private;
+
priv->mmu = mmu;
}
#ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
-static bool reglog = false;
+static bool reglog;
MODULE_PARM_DESC(reglog, "Enable register read/write logging");
module_param(reglog, bool, 0600);
#else
@@ -64,14 +65,17 @@ void etnaviv_writel(u32 data, void __iomem *addr)
{
if (reglog)
printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
+
writel(data, addr);
}
u32 etnaviv_readl(const void __iomem *addr)
{
u32 val = readl(addr);
+
if (reglog)
printk(KERN_DEBUG "IO:R %p %08x\n", addr, val);
+
return val;
}
@@ -90,6 +94,7 @@ static int etnaviv_unload(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
for (i = 0; i < ETNA_MAX_PIPES; i++) {
struct etnaviv_gpu *g = priv->gpu[i];
+
if (g)
etnaviv_gpu_pm_suspend(g);
}
@@ -114,12 +119,15 @@ static void load_gpu(struct drm_device *dev)
for (i = 0; i < ETNA_MAX_PIPES; i++) {
struct etnaviv_gpu *g = priv->gpu[i];
+
if (g) {
int ret;
+
etnaviv_gpu_pm_resume(g);
ret = etnaviv_gpu_init(g);
if (ret) {
- dev_err(dev->dev, "%s hw init failed: %d\n", g->name, ret);
+ dev_err(dev->dev, "%s hw init failed: %d\n",
+ g->name, ret);
priv->gpu[i] = NULL;
}
}
@@ -370,11 +378,15 @@ static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_etnaviv_gem_new *args = data;
+
return etnaviv_gem_new_handle(dev, file, args->size,
args->flags, &args->handle);
}
-#define TS(t) ((struct timespec){ .tv_sec = (t).tv_sec, .tv_nsec = (t).tv_nsec })
+#define TS(t) ((struct timespec){ \
+ .tv_sec = (t).tv_sec, \
+ .tv_nsec = (t).tv_nsec \
+})
static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -437,17 +449,21 @@ static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_etnaviv_wait_fence *args = data;
- return etnaviv_wait_fence_interruptable(dev, args->pipe, args->fence, &TS(args->timeout));
+
+ return etnaviv_wait_fence_interruptable(dev, args->pipe, args->fence,
+ &TS(args->timeout));
}
static const struct drm_ioctl_desc etnaviv_ioctls[] = {
- DRM_IOCTL_DEF_DRV(ETNAVIV_GET_PARAM, etnaviv_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(ETNAVIV_GEM_NEW, etnaviv_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(ETNAVIV_GEM_INFO, etnaviv_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(ETNAVIV_GEM_CPU_PREP, etnaviv_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(ETNAVIV_GEM_CPU_FINI, etnaviv_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(ETNAVIV_GEM_SUBMIT, etnaviv_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(ETNAVIV_WAIT_FENCE, etnaviv_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+#define ETNA_IOCTL(n, func, flags) \
+ DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
+ ETNA_IOCTL(GET_PARAM, get_param, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_NEW, gem_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_INFO, gem_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
@@ -77,9 +77,10 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
-int etnaviv_gem_get_iova_locked(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
- uint32_t *iova);
-int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, int id, uint32_t *iova);
+int etnaviv_gem_get_iova_locked(struct etnaviv_gpu *gpu,
+ struct drm_gem_object *obj, uint32_t *iova);
+int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
+ int id, uint32_t *iova);
struct page **etnaviv_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
void etnaviv_gem_put_iova(struct drm_gem_object *obj);
@@ -111,7 +112,8 @@ struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
uint32_t size, struct sg_table *sgt);
u32 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
-void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event, struct etnaviv_gem_submit *submit);
+void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
+ struct etnaviv_gem_submit *submit);
#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
@@ -148,6 +150,7 @@ static inline bool fence_completed(struct drm_device *dev, uint32_t fence)
static inline int align_pitch(int width, int bpp)
{
int bytespp = (bpp + 7) / 8;
+
/* adreno needs pitch aligned to 32 pixels: */
return bytespp * ALIGN(width, 32);
}
@@ -55,7 +55,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
*/
if (etnaviv_obj->flags & (ETNA_BO_WC|ETNA_BO_UNCACHED))
dma_map_sg(dev->dev, etnaviv_obj->sgt->sgl,
- etnaviv_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ etnaviv_obj->sgt->nents, DMA_BIDIRECTIONAL);
}
return etnaviv_obj->pages;
@@ -71,7 +71,8 @@ static void put_pages(struct drm_gem_object *obj)
*/
if (etnaviv_obj->flags & (ETNA_BO_WC|ETNA_BO_UNCACHED))
dma_unmap_sg(obj->dev->dev, etnaviv_obj->sgt->sgl,
- etnaviv_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ etnaviv_obj->sgt->nents,
+ DMA_BIDIRECTIONAL);
sg_free_table(etnaviv_obj->sgt);
kfree(etnaviv_obj->sgt);
@@ -85,9 +86,11 @@ struct page **etnaviv_gem_get_pages(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct page **p;
+
mutex_lock(&dev->struct_mutex);
p = get_pages(obj);
mutex_unlock(&dev->struct_mutex);
+
return p;
}
@@ -121,14 +124,17 @@ static int etnaviv_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ pgprot_t vm_page_prot;
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
+ vm_page_prot = vm_get_page_prot(vma->vm_flags);
+
if (etnaviv_obj->flags & ETNA_BO_WC) {
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
} else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
- vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+ vma->vm_page_prot = pgprot_noncached(vm_page_prot);
} else {
/*
* Shunt off cached objs to shmem file so they have their own
@@ -140,7 +146,7 @@ static int etnaviv_gem_mmap_obj(struct drm_gem_object *obj,
vma->vm_pgoff = 0;
vma->vm_file = obj->filp;
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ vma->vm_page_prot = vm_page_prot;
}
return 0;
@@ -243,9 +249,11 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
{
uint64_t offset;
+
mutex_lock(&obj->dev->struct_mutex);
offset = mmap_offset(obj);
mutex_unlock(&obj->dev->struct_mutex);
+
return offset;
}
@@ -296,7 +304,8 @@ int etnaviv_gem_get_iova_locked(struct etnaviv_gpu *gpu,
return ret;
}
-int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, int id, uint32_t *iova)
+int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
+ int id, uint32_t *iova)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
int ret;
@@ -312,6 +321,7 @@ int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, in
mutex_lock(&obj->dev->struct_mutex);
ret = etnaviv_gem_get_iova_locked(gpu, obj, iova);
mutex_unlock(&obj->dev->struct_mutex);
+
return ret;
}
@@ -361,29 +371,37 @@ fail:
void *etnaviv_gem_vaddr_locked(struct drm_gem_object *obj)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+
if (!etnaviv_obj->vaddr) {
struct page **pages = get_pages(obj);
+
if (IS_ERR(pages))
return ERR_CAST(pages);
+
etnaviv_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
}
+
return etnaviv_obj->vaddr;
}
void *msm_gem_vaddr(struct drm_gem_object *obj)
{
void *ret;
+
mutex_lock(&obj->dev->struct_mutex);
ret = etnaviv_gem_vaddr_locked(obj);
mutex_unlock(&obj->dev->struct_mutex);
+
return ret;
}
dma_addr_t etnaviv_gem_paddr_locked(struct drm_gem_object *obj)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
return etnaviv_obj->paddr;
@@ -393,11 +411,14 @@ void etnaviv_gem_move_to_active(struct drm_gem_object *obj,
struct etnaviv_gpu *gpu, bool write, uint32_t fence)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
etnaviv_obj->gpu = gpu;
+
if (write)
etnaviv_obj->write_fence = fence;
else
etnaviv_obj->read_fence = fence;
+
list_del_init(&etnaviv_obj->mm_list);
list_add_tail(&etnaviv_obj->mm_list, &gpu->active_list);
}
@@ -459,6 +480,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
uint64_t off = drm_vma_node_start(&obj->vma_node);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
etnaviv_obj->read_fence, etnaviv_obj->write_fence,
@@ -474,6 +496,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
list_for_each_entry(etnaviv_obj, list, mm_list) {
struct drm_gem_object *obj = &etnaviv_obj->base;
+
seq_puts(m, " ");
msm_gem_describe(obj, m);
count++;
@@ -504,6 +527,7 @@ static void etnaviv_free_obj(struct drm_gem_object *obj)
if (mmu && etnaviv_obj->iova) {
uint32_t offset = etnaviv_obj->gpu_vram_node->start;
+
etnaviv_iommu_unmap(mmu, offset, etnaviv_obj->sgt, obj->size);
drm_mm_remove_node(etnaviv_obj->gpu_vram_node);
kfree(etnaviv_obj->gpu_vram_node);
@@ -513,7 +537,8 @@ static void etnaviv_free_obj(struct drm_gem_object *obj)
if (obj->import_attach) {
if (etnaviv_obj->vaddr)
- dma_buf_vunmap(obj->import_attach->dmabuf, etnaviv_obj->vaddr);
+ dma_buf_vunmap(obj->import_attach->dmabuf,
+ etnaviv_obj->vaddr);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
@@ -695,7 +720,8 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
goto fail;
}
- ret = drm_prime_sg_to_page_addr_arrays(sgt, etnaviv_obj->pages, NULL, npages);
+ ret = drm_prime_sg_to_page_addr_arrays(sgt, etnaviv_obj->pages,
+ NULL, npages);
if (ret)
goto fail;
@@ -22,7 +22,9 @@
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
BUG_ON(!etnaviv_obj->sgt); /* should have already pinned! */
+
return etnaviv_obj->sgt;
}
@@ -92,7 +92,8 @@ static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
*/
obj = idr_find(&file->object_idr, submit_bo.handle);
if (!obj) {
- DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
+ DRM_ERROR("invalid handle %u at index %u\n",
+ submit_bo.handle, i);
ret = -EINVAL;
goto out_unlock;
}
@@ -101,7 +102,7 @@ static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
if (!list_empty(&etnaviv_obj->submit_entry)) {
DRM_ERROR("handle %u at index %u already on submit list\n",
- submit_bo.handle, i);
+ submit_bo.handle, i);
ret = -EINVAL;
goto out_unlock;
}
@@ -163,7 +164,8 @@ retry:
/* if locking succeeded, pin bo: */
- ret = etnaviv_gem_get_iova_locked(submit->gpu, &etnaviv_obj->base, &iova);
+ ret = etnaviv_gem_get_iova_locked(submit->gpu,
+ &etnaviv_obj->base, &iova);
/* this would break the logic in the fail path.. there is no
* reason for this to happen, but just to be on the safe side
@@ -197,7 +199,10 @@ fail:
submit_unlock_unpin_bo(submit, slow_locked);
if (ret == -EDEADLK) {
- struct etnaviv_gem_object *etnaviv_obj = submit->bos[contended].obj;
+ struct etnaviv_gem_object *etnaviv_obj;
+
+ etnaviv_obj = submit->bos[contended].obj;
+
/* we lost out in a seqno race, lock and retry.. */
ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock,
&submit->ticket);
@@ -251,7 +256,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, struct etnaviv_gem_ob
uint32_t iova, off;
bool valid;
- ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
+ ret = copy_from_user(&submit_reloc, userptr,
+ sizeof(submit_reloc));
if (ret)
return -EFAULT;
@@ -305,6 +311,7 @@ static void submit_cleanup(struct etnaviv_gem_submit *submit, bool fail)
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+
submit_unlock_unpin_bo(submit, i);
list_del_init(&etnaviv_obj->submit_entry);
drm_gem_object_unreference(&etnaviv_obj->base);
@@ -397,7 +404,8 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
if (submit_cmd.size > max_size ||
submit_cmd.submit_offset > max_size - submit_cmd.size) {
- DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
+ DRM_ERROR("invalid cmdstream size: %u\n",
+ submit_cmd.size);
ret = -EINVAL;
goto out;
}
@@ -410,8 +418,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
if (submit->valid)
continue;
- ret = submit_reloc(submit, etnaviv_obj, submit_cmd.submit_offset,
- submit_cmd.nr_relocs, submit_cmd.relocs);
+ ret = submit_reloc(submit, etnaviv_obj,
+ submit_cmd.submit_offset,
+ submit_cmd.nr_relocs, submit_cmd.relocs);
if (ret)
goto out;
}
@@ -31,7 +31,8 @@
* Driver functions:
*/
-int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, uint32_t param, uint64_t *value)
+int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, uint32_t param,
+ uint64_t *value)
{
switch (param) {
case ETNAVIV_PARAM_GPU_MODEL:
@@ -112,37 +113,49 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, uint32_t param, uint64_t *val
static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
{
- if (gpu->identity.minor_features0 & chipMinorFeatures0_MORE_MINOR_FEATURES) {
+ if (gpu->identity.minor_features0 &
+ chipMinorFeatures0_MORE_MINOR_FEATURES) {
u32 specs[2];
specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
- gpu->identity.stream_count = (specs[0] & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK)
+ gpu->identity.stream_count =
+ (specs[0] & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK)
>> VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT;
- gpu->identity.register_max = (specs[0] & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK)
+ gpu->identity.register_max =
+ (specs[0] & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK)
>> VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT;
- gpu->identity.thread_count = (specs[0] & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK)
+ gpu->identity.thread_count =
+ (specs[0] & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK)
>> VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT;
- gpu->identity.vertex_cache_size = (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK)
+ gpu->identity.vertex_cache_size =
+ (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK)
>> VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT;
- gpu->identity.shader_core_count = (specs[0] & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK)
+ gpu->identity.shader_core_count =
+ (specs[0] & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK)
>> VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT;
- gpu->identity.pixel_pipes = (specs[0] & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK)
+ gpu->identity.pixel_pipes =
+ (specs[0] & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK)
>> VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT;
- gpu->identity.vertex_output_buffer_size = (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK)
+ gpu->identity.vertex_output_buffer_size =
+ (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK)
>> VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT;
- gpu->identity.buffer_size = (specs[1] & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK)
+ gpu->identity.buffer_size =
+ (specs[1] & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK)
>> VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT;
- gpu->identity.instruction_count = (specs[1] & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK)
+ gpu->identity.instruction_count =
+ (specs[1] & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK)
>> VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT;
- gpu->identity.num_constants = (specs[1] & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK)
+ gpu->identity.num_constants =
+ (specs[1] & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK)
>> VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT;
gpu->identity.register_max = 1 << gpu->identity.register_max;
gpu->identity.thread_count = 1 << gpu->identity.thread_count;
- gpu->identity.vertex_output_buffer_size = 1 << gpu->identity.vertex_output_buffer_size;
+ gpu->identity.vertex_output_buffer_size =
+ 1 << gpu->identity.vertex_output_buffer_size;
} else {
dev_err(gpu->dev->dev, "TODO: determine GPU specs based on model\n");
}
@@ -165,16 +178,26 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
break;
}
- dev_info(gpu->dev->dev, "stream_count: %x\n", gpu->identity.stream_count);
- dev_info(gpu->dev->dev, "register_max: %x\n", gpu->identity.register_max);
- dev_info(gpu->dev->dev, "thread_count: %x\n", gpu->identity.thread_count);
- dev_info(gpu->dev->dev, "vertex_cache_size: %x\n", gpu->identity.vertex_cache_size);
- dev_info(gpu->dev->dev, "shader_core_count: %x\n", gpu->identity.shader_core_count);
- dev_info(gpu->dev->dev, "pixel_pipes: %x\n", gpu->identity.pixel_pipes);
- dev_info(gpu->dev->dev, "vertex_output_buffer_size: %x\n", gpu->identity.vertex_output_buffer_size);
- dev_info(gpu->dev->dev, "buffer_size: %x\n", gpu->identity.buffer_size);
- dev_info(gpu->dev->dev, "instruction_count: %x\n", gpu->identity.instruction_count);
- dev_info(gpu->dev->dev, "num_constants: %x\n", gpu->identity.num_constants);
+ dev_info(gpu->dev->dev, "stream_count: %x\n",
+ gpu->identity.stream_count);
+ dev_info(gpu->dev->dev, "register_max: %x\n",
+ gpu->identity.register_max);
+ dev_info(gpu->dev->dev, "thread_count: %x\n",
+ gpu->identity.thread_count);
+ dev_info(gpu->dev->dev, "vertex_cache_size: %x\n",
+ gpu->identity.vertex_cache_size);
+ dev_info(gpu->dev->dev, "shader_core_count: %x\n",
+ gpu->identity.shader_core_count);
+ dev_info(gpu->dev->dev, "pixel_pipes: %x\n",
+ gpu->identity.pixel_pipes);
+ dev_info(gpu->dev->dev, "vertex_output_buffer_size: %x\n",
+ gpu->identity.vertex_output_buffer_size);
+ dev_info(gpu->dev->dev, "buffer_size: %x\n",
+ gpu->identity.buffer_size);
+ dev_info(gpu->dev->dev, "instruction_count: %x\n",
+ gpu->identity.instruction_count);
+ dev_info(gpu->dev->dev, "num_constants: %x\n",
+ gpu->identity.num_constants);
}
static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
@@ -192,23 +215,28 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
- /* !!!! HACK ALERT !!!! */
- /* Because people change device IDs without letting software know
- ** about it - here is the hack to make it all look the same. Only
- ** for GC400 family. Next time - TELL ME!!! */
- if (((gpu->identity.model & 0xFF00) == 0x0400)
- && (gpu->identity.model != 0x0420)) {
+ /*
+ * !!!! HACK ALERT !!!!
+ * Because people change device IDs without letting software
+ * know about it - here is the hack to make it all look the
+ * same. Only for GC400 family.
+ */
+ if ((gpu->identity.model & 0xff00) == 0x0400 &&
+ gpu->identity.model != 0x0420) {
gpu->identity.model = gpu->identity.model & 0x0400;
}
- /* An other special case */
- if ((gpu->identity.model == 0x300)
- && (gpu->identity.revision == 0x2201)) {
+ /* Another special case */
+ if (gpu->identity.model == 0x300 &&
+ gpu->identity.revision == 0x2201) {
u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
- if ((chipDate == 0x20080814) && (chipTime == 0x12051100)) {
- /* This IP has an ECO; put the correct revision in it. */
+ if (chipDate == 0x20080814 && chipTime == 0x12051100) {
+ /*
+ * This IP has an ECO; put the correct
+ * revision in it.
+ */
gpu->identity.revision = 0x1051;
}
}
@@ -223,27 +251,38 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
if (gpu->identity.model == 0x700)
gpu->identity.features &= ~BIT(0);
- if (((gpu->identity.model == 0x500) && (gpu->identity.revision < 2))
- || ((gpu->identity.model == 0x300) && (gpu->identity.revision < 0x2000))) {
+ if ((gpu->identity.model == 0x500 && gpu->identity.revision < 2) ||
+ (gpu->identity.model == 0x300 && gpu->identity.revision < 0x2000)) {
- /* GC500 rev 1.x and GC300 rev < 2.0 doesn't have these registers. */
+ /*
+ * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
+ * registers.
+ */
gpu->identity.minor_features0 = 0;
gpu->identity.minor_features1 = 0;
gpu->identity.minor_features2 = 0;
gpu->identity.minor_features3 = 0;
} else
- gpu->identity.minor_features0 = gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
+ gpu->identity.minor_features0 =
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
if (gpu->identity.minor_features0 & BIT(21)) {
- gpu->identity.minor_features1 = gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
- gpu->identity.minor_features2 = gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
- gpu->identity.minor_features3 = gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
+ gpu->identity.minor_features1 =
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
+ gpu->identity.minor_features2 =
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
+ gpu->identity.minor_features3 =
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
}
- dev_info(gpu->dev->dev, "minor_features: %x\n", gpu->identity.minor_features0);
- dev_info(gpu->dev->dev, "minor_features1: %x\n", gpu->identity.minor_features1);
- dev_info(gpu->dev->dev, "minor_features2: %x\n", gpu->identity.minor_features2);
- dev_info(gpu->dev->dev, "minor_features3: %x\n", gpu->identity.minor_features3);
+ dev_info(gpu->dev->dev, "minor_features: %x\n",
+ gpu->identity.minor_features0);
+ dev_info(gpu->dev->dev, "minor_features1: %x\n",
+ gpu->identity.minor_features1);
+ dev_info(gpu->dev->dev, "minor_features2: %x\n",
+ gpu->identity.minor_features2);
+ dev_info(gpu->dev->dev, "minor_features3: %x\n",
+ gpu->identity.minor_features3);
etnaviv_hw_specs(gpu);
}
@@ -295,7 +334,8 @@ static void etnaviv_hw_reset(struct etnaviv_gpu *gpu)
/* try reseting again if FE it not idle */
if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
- dev_dbg(gpu->dev->dev, "%s: FE is not idle\n", gpu->name);
+ dev_dbg(gpu->dev->dev, "%s: FE is not idle\n",
+ gpu->name);
continue;
}
@@ -305,7 +345,8 @@ static void etnaviv_hw_reset(struct etnaviv_gpu *gpu)
/* is the GPU idle? */
if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0)
|| ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
- dev_dbg(gpu->dev->dev, "%s: GPU is not idle\n", gpu->name);
+ dev_dbg(gpu->dev->dev, "%s: GPU is not idle\n",
+ gpu->name);
continue;
}
@@ -392,8 +433,11 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
words = ALIGN(words, 2) / 2;
gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
- gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, etnaviv_gem_paddr_locked(gpu->buffer));
- gpu_write(gpu, VIVS_FE_COMMAND_CONTROL, VIVS_FE_COMMAND_CONTROL_ENABLE | VIVS_FE_COMMAND_CONTROL_PREFETCH(words));
+ gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS,
+ etnaviv_gem_paddr_locked(gpu->buffer));
+ gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
+ VIVS_FE_COMMAND_CONTROL_ENABLE |
+ VIVS_FE_COMMAND_CONTROL_PREFETCH(words));
return 0;
@@ -478,13 +522,13 @@ void etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
seq_puts(m, "\tDMA ");
- if ((debug.address[0] == debug.address[1]) && (debug.state[0] == debug.state[1])) {
+ if (debug.address[0] == debug.address[1] &&
+ debug.state[0] == debug.state[1]) {
seq_puts(m, "seems to be stuck\n");
+ } else if (debug.address[0] == debug.address[1]) {
+ seq_puts(m, "adress is constant\n");
} else {
- if (debug.address[0] == debug.address[1])
- seq_puts(m, "adress is constant\n");
- else
- seq_puts(m, "is runing\n");
+ seq_puts(m, "is runing\n");
}
seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
@@ -509,7 +553,8 @@ static int enable_pwrrail(struct etnaviv_gpu *gpu)
if (gpu->gpu_reg) {
ret = regulator_enable(gpu->gpu_reg);
if (ret) {
- dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
+ dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n",
+ ret);
return ret;
}
}
@@ -517,7 +562,8 @@ static int enable_pwrrail(struct etnaviv_gpu *gpu)
if (gpu->gpu_cx) {
ret = regulator_enable(gpu->gpu_cx);
if (ret) {
- dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
+ dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n",
+ ret);
return ret;
}
}
@@ -619,7 +665,8 @@ int etnaviv_gpu_pm_suspend(struct etnaviv_gpu *gpu)
*/
static void recover_worker(struct work_struct *work)
{
- struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu, recover_work);
+ struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
+ recover_work);
struct drm_device *dev = gpu->dev;
dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
@@ -674,7 +721,8 @@ static unsigned int event_alloc(struct etnaviv_gpu *gpu)
unsigned long ret, flags;
unsigned int i, event = ~0U;
- ret = wait_for_completion_timeout(&gpu->event_free, msecs_to_jiffies(10 * 10000));
+ ret = wait_for_completion_timeout(&gpu->event_free,
+ msecs_to_jiffies(10 * 10000));
if (!ret)
dev_err(gpu->dev->dev, "wait_for_completion_timeout failed");
@@ -701,7 +749,8 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
spin_lock_irqsave(&gpu->event_spinlock, flags);
if (gpu->event[event].used == false) {
- dev_warn(gpu->dev->dev, "event %u is already marked as free", event);
+ dev_warn(gpu->dev->dev, "event %u is already marked as free",
+ event);
spin_unlock_irqrestore(&gpu->event_spinlock, flags);
} else {
gpu->event[event].used = false;
@@ -717,7 +766,8 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
static void retire_worker(struct work_struct *work)
{
- struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu, retire_work);
+ struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
+ retire_work);
struct drm_device *dev = gpu->dev;
uint32_t fence = gpu->retired_fence;
@@ -749,12 +799,13 @@ static void retire_worker(struct work_struct *work)
void etnaviv_gpu_retire(struct etnaviv_gpu *gpu)
{
struct etnaviv_drm_private *priv = gpu->dev->dev_private;
+
queue_work(priv->wq, &gpu->retire_work);
}
/* add bo's to gpu's ring, and kick gpu: */
-int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, struct etnaviv_gem_submit *submit,
- struct etnaviv_file_private *ctx)
+int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
+ struct etnaviv_gem_submit *submit, struct etnaviv_file_private *ctx)
{
struct drm_device *dev = gpu->dev;
struct etnaviv_drm_private *priv = dev->dev_private;
@@ -798,14 +849,17 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, struct etnaviv_gem_submit *submi
/* ring takes a reference to the bo and iova: */
drm_gem_object_reference(&etnaviv_obj->base);
- etnaviv_gem_get_iova_locked(gpu, &etnaviv_obj->base, &iova);
+ etnaviv_gem_get_iova_locked(gpu, &etnaviv_obj->base,
+ &iova);
}
if (submit->bos[i].flags & ETNA_SUBMIT_BO_READ)
- etnaviv_gem_move_to_active(&etnaviv_obj->base, gpu, false, submit->fence);
+ etnaviv_gem_move_to_active(&etnaviv_obj->base, gpu,
+ false, submit->fence);
if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
- etnaviv_gem_move_to_active(&etnaviv_obj->base, gpu, true, submit->fence);
+ etnaviv_gem_move_to_active(&etnaviv_obj->base, gpu,
+ true, submit->fence);
}
hangcheck_timer_reset(gpu);
@@ -830,6 +884,7 @@ static irqreturn_t irq_handler(int irq, void *data)
dev_err(gpu->dev->dev, "AXI bus error\n");
else {
uint8_t event = __fls(intr);
+
dev_dbg(gpu->dev->dev, "event %u\n", event);
gpu->retired_fence = gpu->event[event].fence;
gpu->last_ring_pos = gpu->event[event].ring_pos;
@@ -96,7 +96,7 @@ struct etnaviv_gpu {
/* event management: */
struct etnaviv_event event[30];
struct completion event_free;
- struct spinlock event_spinlock;
+ spinlock_t event_spinlock;
/* list of GEM active objects: */
struct list_head active_list;
@@ -139,7 +139,8 @@ static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg)
return etnaviv_readl(gpu->mmio + reg);
}
-int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, uint32_t param, uint64_t *value);
+int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, uint32_t param,
+ uint64_t *value);
int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
int etnaviv_gpu_pm_suspend(struct etnaviv_gpu *gpu);
@@ -150,8 +151,8 @@ void etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
#endif
void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
-int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, struct etnaviv_gem_submit *submit,
- struct etnaviv_file_private *ctx);
+int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
+ struct etnaviv_gem_submit *submit, struct etnaviv_file_private *ctx);
extern struct platform_driver etnaviv_gpu_driver;
@@ -122,8 +122,8 @@ static int etnaviv_iommu_map(struct iommu_domain *domain, unsigned long iova,
return 0;
}
-static size_t etnaviv_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size)
+static size_t etnaviv_iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
{
struct etnaviv_iommu_domain *etnaviv_domain = domain->priv;
@@ -158,6 +158,7 @@ struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu)
{
struct iommu_domain *domain;
struct etnaviv_iommu_domain *etnaviv_domain;
+ uint32_t pgtable;
int ret;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
@@ -172,12 +173,13 @@ struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu)
/* set page table address in MC */
etnaviv_domain = domain->priv;
+ pgtable = (uint32_t)etnaviv_domain->pgtable.paddr;
- gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, (uint32_t)etnaviv_domain->pgtable.paddr);
- gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, (uint32_t)etnaviv_domain->pgtable.paddr);
- gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, (uint32_t)etnaviv_domain->pgtable.paddr);
- gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, (uint32_t)etnaviv_domain->pgtable.paddr);
- gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, (uint32_t)etnaviv_domain->pgtable.paddr);
+ gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
+ gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
+ gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable);
+ gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable);
+ gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
return domain;
@@ -57,6 +57,7 @@ fail:
for_each_sg(sgt->sgl, sg, i, j) {
size_t bytes = sg->length + sg->offset;
+
iommu_unmap(domain, da, bytes);
da += bytes;
}
@@ -95,7 +96,8 @@ void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
kfree(mmu);
}
-struct etnaviv_iommu *etnaviv_iommu_new(struct drm_device *dev, struct iommu_domain *domain)
+struct etnaviv_iommu *etnaviv_iommu_new(struct drm_device *dev,
+ struct iommu_domain *domain)
{
struct etnaviv_iommu *mmu;
@@ -25,13 +25,15 @@ struct etnaviv_iommu {
struct iommu_domain *domain;
};
-int etnaviv_iommu_attach(struct etnaviv_iommu *iommu, const char **names, int cnt);
-int etnaviv_iommu_map(struct etnaviv_iommu *iommu, uint32_t iova, struct sg_table *sgt,
- unsigned len, int prot);
-int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, uint32_t iova, struct sg_table *sgt,
- unsigned len);
+int etnaviv_iommu_attach(struct etnaviv_iommu *iommu, const char **names,
+ int cnt);
+int etnaviv_iommu_map(struct etnaviv_iommu *iommu, uint32_t iova,
+ struct sg_table *sgt, unsigned len, int prot);
+int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, uint32_t iova,
+ struct sg_table *sgt, unsigned len);
void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
-struct etnaviv_iommu *etnaviv_iommu_new(struct drm_device *dev, struct iommu_domain *domain);
+struct etnaviv_iommu *etnaviv_iommu_new(struct drm_device *dev,
+ struct iommu_domain *domain);
#endif /* __ETNAVIV_MMU_H__ */
@@ -131,7 +131,7 @@ struct drm_etnaviv_gem_cpu_fini {
struct drm_etnaviv_gem_submit_reloc {
uint32_t submit_offset; /* in, offset from submit_bo */
uint32_t or; /* in, value OR'd with result */
- int32_t shift; /* in, amount of left shift (can be negative) */
+ int32_t shift; /* in, amount of left shift (can be -ve) */
uint32_t reloc_idx; /* in, index of reloc_bo buffer */
uint64_t reloc_offset; /* in, offset from start of reloc_bo */
};