@@ -1125,6 +1125,9 @@ void gem_require_ring(int fd, int ring_id)
/* prime */
+#ifndef DRM_RDWR
+#define DRM_RDWR O_RDWR
+#endif
/**
* prime_handle_to_fd:
* @fd: open i915 drm file descriptor
@@ -1142,7 +1145,7 @@ int prime_handle_to_fd(int fd, uint32_t handle)
memset(&args, 0, sizeof(args));
args.handle = handle;
- args.flags = DRM_CLOEXEC;
+ args.flags = DRM_CLOEXEC | DRM_RDWR;
args.fd = -1;
do_ioctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
@@ -22,6 +22,7 @@
*
* Authors:
* Rob Bradford <rob at linux.intel.com>
+ * Tiago Vignatti <tiago.vignatti at intel.com>
*
*/
@@ -66,6 +67,12 @@ fill_bo(uint32_t handle, size_t size)
}
static void
+fill_bo_cpu(char *ptr)
+{
+ memcpy(ptr, pattern, sizeof(pattern));
+}
+
+static void
test_correct(void)
{
int dma_buf_fd;
@@ -180,6 +187,62 @@ test_forked(void)
gem_close(fd, handle);
}
+/* test CPU write. This has a rather big implication for the driver which must
+ * guarantee cache synchronization when writing the bo using CPU. */
+static void
+test_correct_cpu_write(void)
+{
+ int dma_buf_fd;
+ char *ptr;
+ uint32_t handle;
+
+ handle = gem_create(fd, BO_SIZE);
+
+ dma_buf_fd = prime_handle_to_fd(fd, handle);
+ igt_assert(errno == 0);
+
+ /* Check correctness of map using write protection (PROT_WRITE) */
+ ptr = mmap(NULL, BO_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, dma_buf_fd, 0);
+ igt_assert(ptr != MAP_FAILED);
+
+ /* Fill bo using CPU */
+ fill_bo_cpu(ptr);
+
+ /* Check pattern correctness */
+ igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
+
+ munmap(ptr, BO_SIZE);
+ close(dma_buf_fd);
+ gem_close(fd, handle);
+}
+
+/* map from another process and then write using CPU */
+static void
+test_forked_cpu_write(void)
+{
+ int dma_buf_fd;
+ char *ptr;
+ uint32_t handle;
+
+ handle = gem_create(fd, BO_SIZE);
+
+ dma_buf_fd = prime_handle_to_fd(fd, handle);
+ igt_assert(errno == 0);
+
+ igt_fork(childno, 1) {
+ ptr = mmap(NULL, BO_SIZE, PROT_READ | PROT_WRITE , MAP_SHARED, dma_buf_fd, 0);
+ igt_assert(ptr != MAP_FAILED);
+ fill_bo_cpu(ptr);
+
+ igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
+ munmap(ptr, BO_SIZE);
+ close(dma_buf_fd);
+ }
+ close(dma_buf_fd);
+ igt_waitchildren();
+ gem_close(fd, handle);
+}
+
static void
test_refcounting(void)
{
@@ -346,6 +409,8 @@ igt_main
{ "test_map_unmap", test_map_unmap },
{ "test_reprime", test_reprime },
{ "test_forked", test_forked },
+ { "test_correct_cpu_write", test_correct_cpu_write },
+ { "test_forked_cpu_write", test_forked_cpu_write },
{ "test_refcounting", test_refcounting },
{ "test_dup", test_dup },
{ "test_errors", test_errors },
This patch adds test_correct_cpu_write, which maps the texture buffer through a prime fd and then writes directly to it using the CPU. It stresses the driver to guarantee cache synchronization among the different domains. This test also adds test_forked_cpu_write, which creates the GEM bo in one process and pass the prime handle of the it to another process, which in turn uses the handle only to map and write. Grossly speaking this test simulates Chrome OS architecture, where the Web content ("unpriviledged process") maps and CPU-draws a buffer, which was previously allocated in the GPU process ("priviledged process"). This requires kernel modifications (Daniel Thompson's "drm: prime: Honour O_RDWR during prime-handle-to-fd"). Signed-off-by: Tiago Vignatti <tiago.vignatti@intel.com> --- lib/ioctl_wrappers.c | 5 +++- tests/prime_mmap.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+), 1 deletion(-)