diff mbox series

[V1,3/3] drivers/fpga/amd: Add remote queue service APIs

Message ID 20241007220128.3023169-3-yidong.zhang@amd.com (mailing list archive)
State New
Headers show
Series Add new driver for AMD Versal PCIe Card | expand

Commit Message

Yidong Zhang Oct. 7, 2024, 10:01 p.m. UTC
From: Yidong Zhang <yidong.zhang@amd.com>

Adds remote queue services inlcuding init, fini, and send command.

Co-developed-by: Nishad Saraf <nishads@amd.com>
Signed-off-by: Nishad Saraf <nishads@amd.com>
Co-developed-by: Prapul Krishnamurthy <prapulk@amd.com>
Signed-off-by: Prapul Krishnamurthy <prapulk@amd.com>
Signed-off-by: Yidong Zhang <yidong.zhang@amd.com>
---
 drivers/fpga/amd/vmgmt-rm-queue.c | 342 +++++++++++++++++++++++++++++-
 1 file changed, 341 insertions(+), 1 deletion(-)

Comments

kernel test robot Oct. 9, 2024, 7:13 a.m. UTC | #1
Hi David,

kernel test robot noticed the following build warnings:

[auto build test WARNING on linus/master]
[also build test WARNING on v6.12-rc2 next-20241008]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/David-Zhang/drivers-fpga-amd-Add-communication-with-firmware/20241008-060253
base:   linus/master
patch link:    https://lore.kernel.org/r/20241007220128.3023169-3-yidong.zhang%40amd.com
patch subject: [PATCH V1 3/3] drivers/fpga/amd: Add remote queue service APIs
config: i386-randconfig-054-20241009 (https://download.01.org/0day-ci/archive/20241009/202410091512.rcCqJO6z-lkp@intel.com/config)
compiler: clang version 18.1.8 (https://github.com/llvm/llvm-project 3b5b5c1ec4a3095ab096dd780e84d7ab81f3d7ff)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202410091512.rcCqJO6z-lkp@intel.com/

cocci warnings: (new ones prefixed by >>)
>> drivers/fpga/amd/vmgmt-rm-queue.c:280:2-3: Unneeded semicolon

vim +280 drivers/fpga/amd/vmgmt-rm-queue.c

   254	
   255	static void rm_check_msg(struct work_struct *w)
   256	{
   257		struct rm_device *rdev = to_rdev_msg_monitor(w);
   258		int ret;
   259	
   260		mutex_lock(&rdev->queue);
   261	
   262		ret = rm_queue_get_cidx(rdev, RM_QUEUE_SQ, &rdev->sq.cidx);
   263		if (ret)
   264			goto error;
   265	
   266		ret = rm_queue_get_pidx(rdev, RM_QUEUE_CQ, &rdev->cq.pidx);
   267		if (ret)
   268			goto error;
   269	
   270		while (rdev->cq.cidx < rdev->cq.pidx) {
   271			ret = rm_process_msg(rdev);
   272			if (ret)
   273				break;
   274	
   275			rdev->cq.cidx++;
   276	
   277			ret = rm_queue_set_cidx(rdev, RM_QUEUE_CQ, rdev->cq.cidx);
   278			if (ret)
   279				break;
 > 280		};
   281	
   282	error:
   283		mutex_unlock(&rdev->queue);
   284	}
   285
diff mbox series

Patch

diff --git a/drivers/fpga/amd/vmgmt-rm-queue.c b/drivers/fpga/amd/vmgmt-rm-queue.c
index fe805373ea32..f68439833d51 100644
--- a/drivers/fpga/amd/vmgmt-rm-queue.c
+++ b/drivers/fpga/amd/vmgmt-rm-queue.c
@@ -23,16 +23,356 @@ 
 #include "vmgmt-rm.h"
 #include "vmgmt-rm-queue.h"
 
+static inline struct rm_device *to_rdev_msg_monitor(struct work_struct *w)
+{
+	return container_of(w, struct rm_device, msg_monitor);
+}
+
+static inline struct rm_device *to_rdev_msg_timer(struct timer_list *t)
+{
+	return container_of(t, struct rm_device, msg_timer);
+}
+
+static inline int rm_queue_write(struct rm_device *rdev, u32 offset, u32 value)
+{
+	return regmap_write(rdev->shmem_regmap, rdev->queue_base + offset, value);
+}
+
+static inline int rm_queue_read(struct rm_device *rdev, u32 offset, u32 *value)
+{
+	return regmap_read(rdev->shmem_regmap, rdev->queue_base + offset, value);
+}
+
+static inline int rm_queue_bulk_read(struct rm_device *rdev, u32 offset,
+				     u32 *value, u32 size)
+{
+	if (size & 0x3) {
+		vmgmt_err(rdev->vdev, "size %d is not 4 Bytes aligned", size);
+		return -EINVAL;
+	}
+
+	return regmap_bulk_read(rdev->shmem_regmap, rdev->queue_base + offset,
+				value, DIV_ROUND_UP(size, 4));
+}
+
+static inline int rm_queue_bulk_write(struct rm_device *rdev, u32 offset,
+				      u32 *value, u32 size)
+{
+	if (size & 0x3) {
+		vmgmt_err(rdev->vdev, "size %d is not 4 Bytes aligned", size);
+		return -EINVAL;
+	}
+
+	return regmap_bulk_write(rdev->shmem_regmap, rdev->queue_base + offset,
+				 value, DIV_ROUND_UP(size, 4));
+}
+
+static inline int rm_queue_get_cidx(struct rm_device *rdev,
+				    enum rm_queue_type type, u32 *value)
+{
+	u32 off;
+
+	if (type == RM_QUEUE_SQ)
+		off = offsetof(struct rm_queue_header, sq_cidx);
+	else
+		off = offsetof(struct rm_queue_header, cq_cidx);
+
+	return rm_queue_read(rdev, off, value);
+}
+
+static inline int rm_queue_set_cidx(struct rm_device *rdev,
+				    enum rm_queue_type type, u32 value)
+{
+	u32 off;
+
+	if (type == RM_QUEUE_SQ)
+		off = offsetof(struct rm_queue_header, sq_cidx);
+	else
+		off = offsetof(struct rm_queue_header, cq_cidx);
+
+	return rm_queue_write(rdev, off, value);
+}
+
+static inline int rm_queue_get_pidx(struct rm_device *rdev,
+				    enum rm_queue_type type, u32 *value)
+{
+	if (type == RM_QUEUE_SQ)
+		return regmap_read(rdev->io_regmap, RM_IO_SQ_PIDX_OFF, value);
+	else
+		return regmap_read(rdev->io_regmap, RM_IO_CQ_PIDX_OFF, value);
+}
+
+static inline int rm_queue_set_pidx(struct rm_device *rdev,
+				    enum rm_queue_type type, u32 value)
+{
+	if (type == RM_QUEUE_SQ)
+		return regmap_write(rdev->io_regmap, RM_IO_SQ_PIDX_OFF, value);
+	else
+		return regmap_write(rdev->io_regmap, RM_IO_CQ_PIDX_OFF, value);
+}
+
+static inline u32 rm_queue_get_sq_slot_offset(struct rm_device *rdev)
+{
+	u32 index;
+
+	if ((rdev->sq.pidx - rdev->sq.cidx) >= rdev->queue_size)
+		return RM_INVALID_SLOT;
+
+	index = rdev->sq.pidx & (rdev->queue_size - 1);
+	return rdev->sq.offset + RM_CMD_SQ_SLOT_SIZE * index;
+}
+
+static inline u32 rm_queue_get_cq_slot_offset(struct rm_device *rdev)
+{
+	u32 index;
+
+	index = rdev->cq.cidx & (rdev->queue_size - 1);
+	return rdev->cq.offset + RM_CMD_CQ_SLOT_SIZE * index;
+}
+
+static int rm_queue_submit_cmd(struct rm_cmd *cmd)
+{
+	struct vmgmt_device *vdev = cmd->rdev->vdev;
+	struct rm_device *rdev = cmd->rdev;
+	u32 offset;
+	int ret;
+
+	mutex_lock(&rdev->queue);
+
+	offset = rm_queue_get_sq_slot_offset(rdev);
+	if (!offset) {
+		vmgmt_err(vdev, "No SQ slot available");
+		ret = -ENOSPC;
+		goto exit;
+	}
+
+	ret = rm_queue_bulk_write(rdev, offset, (u32 *)&cmd->sq_msg,
+				  sizeof(cmd->sq_msg));
+	if (ret) {
+		vmgmt_err(vdev, "Failed to write msg to ring, ret %d", ret);
+		goto exit;
+	}
+
+	ret = rm_queue_set_pidx(rdev, RM_QUEUE_SQ, ++rdev->sq.pidx);
+	if (ret) {
+		vmgmt_err(vdev, "Failed to update PIDX, ret %d", ret);
+		goto exit;
+	}
+
+	list_add_tail(&cmd->list, &rdev->submitted_cmds);
+exit:
+	mutex_unlock(&rdev->queue);
+	return ret;
+}
+
+static void rm_queue_withdraw_cmd(struct rm_cmd *cmd)
+{
+	mutex_lock(&cmd->rdev->queue);
+	list_del(&cmd->list);
+	mutex_unlock(&cmd->rdev->queue);
+}
+
+static int rm_queue_wait_cmd_timeout(struct rm_cmd *cmd, unsigned long timeout)
+{
+	struct vmgmt_device *vdev = cmd->rdev->vdev;
+	int ret;
+
+	if (wait_for_completion_timeout(&cmd->executed, timeout)) {
+		ret = cmd->cq_msg.data.rcode;
+		if (!ret)
+			return 0;
+
+		vmgmt_err(vdev, "CMD returned with a failure: %d", ret);
+		return ret;
+	}
+
+	/*
+	 * each cmds will be cleaned up by complete before it times out.
+	 * if we reach here, the cmd should be cleared and hot reset should
+	 * be issued.
+	 */
+	vmgmt_err(vdev, "cmd is timedout after, please reset the card");
+	rm_queue_withdraw_cmd(cmd);
+	return -ETIME;
+}
+
 int rm_queue_send_cmd(struct rm_cmd *cmd, unsigned long timeout)
 {
-	return 0;
+	int ret;
+
+	ret = rm_queue_submit_cmd(cmd);
+	if (ret)
+		return ret;
+
+	return rm_queue_wait_cmd_timeout(cmd, timeout);
+}
+
+static int rm_process_msg(struct rm_device *rdev)
+{
+	struct rm_cmd *cmd, *next;
+	struct vmgmt_device *vdev = rdev->vdev;
+	struct rm_cmd_cq_hdr header;
+	u32 offset;
+	int ret;
+
+	offset = rm_queue_get_cq_slot_offset(rdev);
+	if (!offset) {
+		vmgmt_err(vdev, "Invalid CQ offset");
+		return -EINVAL;
+	}
+
+	ret = rm_queue_bulk_read(rdev, offset, (u32 *)&header, sizeof(header));
+	if (ret) {
+		vmgmt_err(vdev, "Failed to read queue msg, %d", ret);
+		return ret;
+	}
+
+	list_for_each_entry_safe(cmd, next, &rdev->submitted_cmds, list) {
+		u32 value = 0;
+
+		if (cmd->sq_msg.hdr.id != header.id)
+			continue;
+
+		ret = rm_queue_bulk_read(rdev, offset + sizeof(cmd->cq_msg.hdr),
+					 (u32 *)&cmd->cq_msg.data,
+					 sizeof(cmd->cq_msg.data));
+		if (ret)
+			vmgmt_warn(vdev, "Failed to read queue msg, %d", ret);
+
+		ret = rm_queue_write(rdev, offset, value);
+		if (ret)
+			vmgmt_warn(vdev, "Failed to write queue msg, %d", ret);
+
+		list_del(&cmd->list);
+		complete(&cmd->executed);
+		return 0;
+	}
+
+	vmgmt_err(vdev, "Unknown cmd ID %d found in CQ", header.id);
+	return -EFAULT;
+}
+
+static void rm_check_msg(struct work_struct *w)
+{
+	struct rm_device *rdev = to_rdev_msg_monitor(w);
+	int ret;
+
+	mutex_lock(&rdev->queue);
+
+	ret = rm_queue_get_cidx(rdev, RM_QUEUE_SQ, &rdev->sq.cidx);
+	if (ret)
+		goto error;
+
+	ret = rm_queue_get_pidx(rdev, RM_QUEUE_CQ, &rdev->cq.pidx);
+	if (ret)
+		goto error;
+
+	while (rdev->cq.cidx < rdev->cq.pidx) {
+		ret = rm_process_msg(rdev);
+		if (ret)
+			break;
+
+		rdev->cq.cidx++;
+
+		ret = rm_queue_set_cidx(rdev, RM_QUEUE_CQ, rdev->cq.cidx);
+		if (ret)
+			break;
+	};
+
+error:
+	mutex_unlock(&rdev->queue);
+}
+
+static void rm_sched_work(struct timer_list *t)
+{
+	struct rm_device *rdev = to_rdev_msg_timer(t);
+
+	/* Schedule a work in the general workqueue */
+	schedule_work(&rdev->msg_monitor);
+	/* Periodic timer */
+	mod_timer(&rdev->msg_timer, jiffies + RM_COMPLETION_TIMER);
 }
 
 void rm_queue_fini(struct rm_device *rdev)
 {
+	del_timer_sync(&rdev->msg_timer);
+	cancel_work_sync(&rdev->msg_monitor);
+	mutex_destroy(&rdev->queue);
 }
 
 int rm_queue_init(struct rm_device *rdev)
 {
+	struct vmgmt_device *vdev = rdev->vdev;
+	struct rm_queue_header header = {0};
+	int ret;
+
+	INIT_LIST_HEAD(&rdev->submitted_cmds);
+	mutex_init(&rdev->queue);
+
+	ret = rm_queue_bulk_read(rdev, RM_HDR_OFF, (u32 *)&header,
+				 sizeof(header));
+	if (ret) {
+		vmgmt_err(vdev, "Failed to read RM shared mem, ret %d", ret);
+		goto error;
+	}
+
+	if (header.magic != RM_QUEUE_HDR_MAGIC_NUM) {
+		vmgmt_err(vdev, "Invalid RM queue header");
+		ret = -ENODEV;
+		goto error;
+	}
+
+	if (!header.version) {
+		vmgmt_err(vdev, "Invalid RM queue header");
+		ret = -ENODEV;
+		goto error;
+	}
+
+	sema_init(&rdev->sq.data_lock, 1);
+	sema_init(&rdev->cq.data_lock, 1);
+	rdev->queue_size = header.size;
+	rdev->sq.offset = header.sq_off;
+	rdev->cq.offset = header.cq_off;
+	rdev->sq.type = RM_QUEUE_SQ;
+	rdev->cq.type = RM_QUEUE_CQ;
+	rdev->sq.data_size = rdev->queue_buffer_size - RM_CMD_CQ_BUFFER_SIZE;
+	rdev->cq.data_size = RM_CMD_CQ_BUFFER_SIZE;
+	rdev->sq.data_offset = rdev->queue_buffer_start +
+			       RM_CMD_CQ_BUFFER_OFFSET + RM_CMD_CQ_BUFFER_SIZE;
+	rdev->cq.data_offset = rdev->queue_buffer_start +
+			       RM_CMD_CQ_BUFFER_OFFSET;
+	rdev->sq.cidx = header.sq_cidx;
+	rdev->cq.cidx = header.cq_cidx;
+
+	ret = rm_queue_get_pidx(rdev, RM_QUEUE_SQ, &rdev->sq.pidx);
+	if (ret) {
+		vmgmt_err(vdev, "Failed to read sq.pidx, ret %d", ret);
+		goto error;
+	}
+
+	ret = rm_queue_get_pidx(rdev, RM_QUEUE_CQ, &rdev->cq.pidx);
+	if (ret) {
+		vmgmt_err(vdev, "Failed to read cq.pidx, ret %d", ret);
+		goto error;
+	}
+
+	if (rdev->cq.cidx != rdev->cq.pidx) {
+		vmgmt_warn(vdev, "Clearing stale completions");
+		rdev->cq.cidx = rdev->cq.pidx;
+		ret = rm_queue_set_cidx(rdev, RM_QUEUE_CQ, rdev->cq.cidx);
+		if (ret) {
+			vmgmt_err(vdev, "Failed to cleanup CQ, ret %d", ret);
+			goto error;
+		}
+	}
+
+	/* Create and schedule timer to do recurring work */
+	INIT_WORK(&rdev->msg_monitor, &rm_check_msg);
+	timer_setup(&rdev->msg_timer, &rm_sched_work, 0);
+	mod_timer(&rdev->msg_timer, jiffies + RM_COMPLETION_TIMER);
+
 	return 0;
+error:
+	mutex_destroy(&rdev->queue);
+	return ret;
 }