@@ -5533,8 +5533,8 @@ static void hpsa_cmd_init(struct ctlr_info *h, int index,
{
dma_addr_t cmd_dma_handle, err_dma_handle;
- /* Zero out all of commandlist except the last field, refcount */
- memset(c, 0, offsetof(struct CommandList, refcount));
+ /* Zero out all of commandlist */
+ memset(c, 0, sizeof(struct CommandList));
c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
c->err_info = h->errinfo_pool + index;
@@ -5556,7 +5556,6 @@ static void hpsa_preinitialize_commands(struct ctlr_info *h)
struct CommandList *c = h->cmd_pool + i;
hpsa_cmd_init(h, i, c);
- atomic_set(&c->refcount, 0);
}
}
@@ -6172,7 +6171,6 @@ static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
return NULL;
}
- atomic_inc(&c->refcount);
hpsa_cmd_partial_init(h, idx, c);
/*
@@ -6186,11 +6184,6 @@ static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
{
- /*
- * Release our reference to the block. We don't need to do anything
- * else to free it, because it is accessed by index.
- */
- (void)atomic_dec(&c->refcount);
c->scsi_cmd = NULL;
}
@@ -454,18 +454,8 @@ struct CommandList {
bool retry_pending;
struct hpsa_scsi_dev_t *device;
- atomic_t refcount; /* Must be last to avoid memset in hpsa_cmd_init() */
} __aligned(COMMANDLIST_ALIGNMENT);
-/*
- * Make sure our embedded atomic variable is aligned. Otherwise we break atomic
- * operations on architectures that don't support unaligned atomics like IA64.
- *
- * The assert guards against reintroductin against unwanted __packed to
- * the struct CommandList.
- */
-static_assert(offsetof(struct CommandList, refcount) % __alignof__(atomic_t) == 0);
-
/* Max S/G elements in I/O accelerator command */
#define IOACCEL1_MAXSGENTRIES 24
#define IOACCEL2_MAXSGENTRIES 28