@@ -50,6 +50,304 @@ static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
return result;
}
+static MemTxResult update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
+ uint64_t rdbase)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint64_t value;
+ uint64_t l2t_addr;
+ bool valid_l2t;
+ uint32_t l2t_id;
+ uint32_t max_l2_entries;
+ uint64_t cte = 0;
+ MemTxResult res = MEMTX_OK;
+
+ if (!s->ct.valid) {
+ return res;
+ }
+
+ if (valid) {
+ /* add mapping entry to collection table */
+ cte = (valid & TABLE_ENTRY_VALID_MASK) | (rdbase << 1ULL);
+ }
+
+ /*
+ * The specification defines the format of level 1 entries of a
+ * 2-level table, but the format of level 2 entries and the format
+ * of flat-mapped tables is IMPDEF.
+ */
+ if (s->ct.indirect) {
+ l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
+
+ value = address_space_ldq_le(as,
+ s->ct.base_addr +
+ (l2t_id * L1TABLE_ENTRY_SIZE),
+ MEMTXATTRS_UNSPECIFIED, &res);
+
+ if (res != MEMTX_OK) {
+ return res;
+ }
+
+ valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
+
+ if (valid_l2t) {
+ max_l2_entries = s->ct.page_sz / s->ct.entry_sz;
+
+ l2t_addr = value & ((1ULL << 51) - 1);
+
+ address_space_stq_le(as, l2t_addr +
+ ((icid % max_l2_entries) * GITS_CTE_SIZE),
+ cte, MEMTXATTRS_UNSPECIFIED, &res);
+ }
+ } else {
+ /* Flat level table */
+ address_space_stq_le(as, s->ct.base_addr + (icid * GITS_CTE_SIZE),
+ cte, MEMTXATTRS_UNSPECIFIED, &res);
+ }
+ return res;
+}
+
+static MemTxResult process_mapc(GICv3ITSState *s, uint32_t offset)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint16_t icid;
+ uint64_t rdbase;
+ bool valid;
+ MemTxResult res = MEMTX_OK;
+ uint64_t value;
+
+ offset += NUM_BYTES_IN_DW;
+ offset += NUM_BYTES_IN_DW;
+
+ value = address_space_ldq_le(as, s->cq.base_addr + offset,
+ MEMTXATTRS_UNSPECIFIED, &res);
+
+ if (res != MEMTX_OK) {
+ return res;
+ }
+
+ icid = value & ICID_MASK;
+
+ rdbase = (value >> R_MAPC_RDBASE_SHIFT) & RDBASE_PROCNUM_MASK;
+
+ valid = (value & CMD_FIELD_VALID_MASK);
+
+ if ((icid > s->ct.maxids.max_collids) || (rdbase > s->gicv3->num_cpu)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ITS MAPC: invalid collection table attributes "
+ "icid %d rdbase %lu\n", icid, rdbase);
+ /*
+ * in this implementation, in case of error
+ * we ignore this command and move onto the next
+ * command in the queue
+ */
+ } else {
+ res = update_cte(s, icid, valid, rdbase);
+ }
+
+ return res;
+}
+
+static MemTxResult update_dte(GICv3ITSState *s, uint32_t devid, bool valid,
+ uint8_t size, uint64_t itt_addr)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint64_t value;
+ uint64_t l2t_addr;
+ bool valid_l2t;
+ uint32_t l2t_id;
+ uint32_t max_l2_entries;
+ uint64_t dte = 0;
+ MemTxResult res = MEMTX_OK;
+
+ if (s->dt.valid) {
+ if (valid) {
+ /* add mapping entry to device table */
+ dte = (valid & TABLE_ENTRY_VALID_MASK) |
+ ((size & SIZE_MASK) << 1U) |
+ (itt_addr << GITS_DTE_ITTADDR_SHIFT);
+ }
+ } else {
+ return res;
+ }
+
+ /*
+ * The specification defines the format of level 1 entries of a
+ * 2-level table, but the format of level 2 entries and the format
+ * of flat-mapped tables is IMPDEF.
+ */
+ if (s->dt.indirect) {
+ l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
+
+ value = address_space_ldq_le(as,
+ s->dt.base_addr +
+ (l2t_id * L1TABLE_ENTRY_SIZE),
+ MEMTXATTRS_UNSPECIFIED, &res);
+
+ if (res != MEMTX_OK) {
+ return res;
+ }
+
+ valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
+
+ if (valid_l2t) {
+ max_l2_entries = s->dt.page_sz / s->dt.entry_sz;
+
+ l2t_addr = value & ((1ULL << 51) - 1);
+
+ address_space_stq_le(as, l2t_addr +
+ ((devid % max_l2_entries) * GITS_DTE_SIZE),
+ dte, MEMTXATTRS_UNSPECIFIED, &res);
+ }
+ } else {
+ /* Flat level table */
+ address_space_stq_le(as, s->dt.base_addr + (devid * GITS_DTE_SIZE),
+ dte, MEMTXATTRS_UNSPECIFIED, &res);
+ }
+ return res;
+}
+
+static MemTxResult process_mapd(GICv3ITSState *s, uint64_t value,
+ uint32_t offset)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint32_t devid;
+ uint8_t size;
+ uint64_t itt_addr;
+ bool valid;
+ MemTxResult res = MEMTX_OK;
+
+ devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
+
+ offset += NUM_BYTES_IN_DW;
+ value = address_space_ldq_le(as, s->cq.base_addr + offset,
+ MEMTXATTRS_UNSPECIFIED, &res);
+
+ if (res != MEMTX_OK) {
+ return res;
+ }
+
+ size = (value & SIZE_MASK);
+
+ offset += NUM_BYTES_IN_DW;
+ value = address_space_ldq_le(as, s->cq.base_addr + offset,
+ MEMTXATTRS_UNSPECIFIED, &res);
+
+ if (res != MEMTX_OK) {
+ return res;
+ }
+
+ itt_addr = (value & ITTADDR_MASK) >> ITTADDR_SHIFT;
+
+ valid = (value & CMD_FIELD_VALID_MASK);
+
+ if ((devid > s->dt.maxids.max_devids) ||
+ (size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ITS MAPD: invalid device table attributes "
+ "devid %d or size %d\n", devid, size);
+ /*
+ * in this implementation, in case of error
+ * we ignore this command and move onto the next
+ * command in the queue
+ */
+ } else {
+ res = update_dte(s, devid, valid, size, itt_addr);
+ }
+
+ return res;
+}
+
+/*
+ * Current implementation blocks until all
+ * commands are processed
+ */
+static void process_cmdq(GICv3ITSState *s)
+{
+ uint32_t wr_offset = 0;
+ uint32_t rd_offset = 0;
+ uint32_t cq_offset = 0;
+ uint64_t data;
+ AddressSpace *as = &s->gicv3->dma_as;
+ MemTxResult res = MEMTX_OK;
+ uint8_t cmd;
+
+ if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ return;
+ }
+
+ wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
+
+ if (wr_offset > s->cq.max_entries) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid write offset "
+ "%d\n", __func__, wr_offset);
+ return;
+ }
+
+ rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
+
+ if (rd_offset > s->cq.max_entries) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid read offset "
+ "%d\n", __func__, rd_offset);
+ return;
+ }
+
+ while (wr_offset != rd_offset) {
+ cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
+ data = address_space_ldq_le(as, s->cq.base_addr + cq_offset,
+ MEMTXATTRS_UNSPECIFIED, &res);
+ cmd = (data & CMD_MASK);
+
+ switch (cmd) {
+ case GITS_CMD_INT:
+ break;
+ case GITS_CMD_CLEAR:
+ break;
+ case GITS_CMD_SYNC:
+ /*
+ * Current implementation makes a blocking synchronous call
+ * for every command issued earlier, hence the internal state
+ * is already consistent by the time SYNC command is executed.
+ * Hence no further processing is required for SYNC command.
+ */
+ break;
+ case GITS_CMD_MAPD:
+ res = process_mapd(s, data, cq_offset);
+ break;
+ case GITS_CMD_MAPC:
+ res = process_mapc(s, cq_offset);
+ break;
+ case GITS_CMD_MAPTI:
+ break;
+ case GITS_CMD_MAPI:
+ break;
+ case GITS_CMD_DISCARD:
+ break;
+ case GITS_CMD_INV:
+ case GITS_CMD_INVALL:
+ break;
+ default:
+ break;
+ }
+ if (res == MEMTX_OK) {
+ rd_offset++;
+ rd_offset %= s->cq.max_entries;
+ s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
+ } else {
+ /*
+ * in this implementation, in case of dma read/write error
+ * we stall the command processing
+ */
+ s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: %x cmd processing failed\n", __func__, cmd);
+ break;
+ }
+ }
+}
+
/*
* This function extracts the ITS Device and Collection table specific
* parameters (like base_addr, size etc) from GITS_BASER register.
@@ -206,6 +504,7 @@ static MemTxResult its_writel(GICv3ITSState *s, hwaddr offset,
extract_table_params(s);
extract_cmdq_params(s);
s->creadr = 0;
+ process_cmdq(s);
}
break;
case GITS_CBASER:
@@ -233,6 +532,9 @@ static MemTxResult its_writel(GICv3ITSState *s, hwaddr offset,
case GITS_CWRITER:
s->cwriter = deposit64(s->cwriter, 0, 32,
(value & ~R_GITS_CWRITER_RETRY_MASK));
+ if (s->cwriter != s->creadr) {
+ process_cmdq(s);
+ }
break;
case GITS_CWRITER + 4:
s->cwriter = deposit64(s->cwriter, 32, 32, value);
@@ -379,6 +681,9 @@ static MemTxResult its_writell(GICv3ITSState *s, hwaddr offset,
break;
case GITS_CWRITER:
s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
+ if (s->cwriter != s->creadr) {
+ process_cmdq(s);
+ }
break;
case GITS_CREADR:
if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
@@ -304,6 +304,43 @@ FIELD(GITS_TYPER, CIL, 36, 1)
#define L1TABLE_ENTRY_SIZE 8
#define GITS_CMDQ_ENTRY_SIZE 32
+#define NUM_BYTES_IN_DW 8
+
+#define CMD_MASK 0xff
+
+/* ITS Commands */
+#define GITS_CMD_CLEAR 0x04
+#define GITS_CMD_DISCARD 0x0F
+#define GITS_CMD_INT 0x03
+#define GITS_CMD_MAPC 0x09
+#define GITS_CMD_MAPD 0x08
+#define GITS_CMD_MAPI 0x0B
+#define GITS_CMD_MAPTI 0x0A
+#define GITS_CMD_INV 0x0C
+#define GITS_CMD_INVALL 0x0D
+#define GITS_CMD_SYNC 0x05
+
+/* MAPC command fields */
+#define ICID_LENGTH 16
+#define ICID_MASK ((1U << ICID_LENGTH) - 1)
+FIELD(MAPC, RDBASE, 16, 32)
+
+#define RDBASE_PROCNUM_LENGTH 16
+#define RDBASE_PROCNUM_MASK ((1ULL << RDBASE_PROCNUM_LENGTH) - 1)
+
+/* MAPD command fields */
+#define ITTADDR_LENGTH 44
+#define ITTADDR_SHIFT 8
+#define ITTADDR_MASK MAKE_64BIT_MASK(ITTADDR_SHIFT, ITTADDR_LENGTH)
+#define SIZE_MASK 0x1f
+
+#define DEVID_SHIFT 32
+#define DEVID_MASK MAKE_64BIT_MASK(32, 32)
+
+#define VALID_SHIFT 63
+#define CMD_FIELD_VALID_MASK (1ULL << VALID_SHIFT)
+#define L2_TABLE_VALID_MASK CMD_FIELD_VALID_MASK
+#define TABLE_ENTRY_VALID_MASK (1ULL << 0)
/**
* Default features advertised by this version of ITS
@@ -338,6 +375,9 @@ FIELD(GITS_TYPER, CIL, 36, 1)
* Valid = 1 bit,ITTAddr = 44 bits,Size = 5 bits
*/
#define GITS_DTE_SIZE (0x8ULL)
+#define GITS_DTE_ITTADDR_SHIFT 6
+#define GITS_DTE_ITTADDR_MASK MAKE_64BIT_MASK(GITS_DTE_ITTADDR_SHIFT, \
+ ITTADDR_LENGTH)
/*
* 8 bytes Collection Table Entry size