@@ -504,6 +504,32 @@ static struct dma_chan *private_candidate(const
dma_cap_mask_t *mask,
}
/**
+ * dma_request_channel - try to get specific channel exclusively
+ * @chan: target channel
+ */
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
+{
+ int err = -EBUSY;
+
+ /* lock against __dma_request_channel */
+ mutex_lock(&dma_list_mutex);
+
+ if (chan->client_count == 0)
+ err = dma_chan_get(chan);
+ else
+ chan = NULL;
+
+ mutex_unlock(&dma_list_mutex);
+
+ if (err)
+ pr_debug("%s: failed to get %s: (%d)\n",
+ __func__, dma_chan_name(chan), err);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(dma_get_slave_channel);
+
+/**
* dma_request_channel - try to allocate an exclusive channel
* @mask: capabilities that the channel must satisfy
* @fn: optional callback to disposition available channels
@@ -99,9 +99,11 @@ struct k3_dma_dev {
spinlock_t lock;
struct list_head chan_pending;
struct k3_dma_phy *phy;
+ struct k3_dma_chan *chans;
struct dma_pool *pool;
struct clk *clk;
u32 dma_channels;
+ u32 dma_requests;
};
#define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
@@ -599,10 +601,17 @@ static struct of_device_id k3_pdma_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
-static struct of_dma_filter_info k3_dma_filter;
-static bool k3_dma_filter_fn(struct dma_chan *chan, void *param)
+static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args
*dma_spec,
+ struct of_dma *ofdma)
{
- return (*(int *)param == chan->chan_id);
+ struct k3_dma_dev *d = ofdma->of_dma_data;
+ unsigned int request = dma_spec->args[0];
+
+ if (request > d->dma_requests)
+ return NULL;
+
+ return dma_get_slave_channel(&(d->chans[request].vc.chan));
}
static int k3_dma_probe(struct platform_device *op)
@@ -611,8 +620,6 @@ static int k3_dma_probe(struct platform_device *op)
const struct of_device_id *of_id;
struct resource *iores;
int i, ret, irq = 0;
- int dma_requests = 0;
- struct k3_dma_chan *chans;
iores = platform_get_resource(op, IORESOURCE_MEM, 0);
if (!iores)
@@ -631,7 +638,7 @@ static int k3_dma_probe(struct platform_device *op)
of_property_read_u32((&op->dev)->of_node,
"dma-channels", &d->dma_channels);
of_property_read_u32((&op->dev)->of_node,
- "dma-requests", &dma_requests);
+ "dma-requests", &d->dma_requests);
}
d->clk = devm_clk_get(&op->dev, NULL);
@@ -672,16 +679,16 @@ static int k3_dma_probe(struct platform_device *op)
d->slave.device_issue_pending = k3_dma_issue_pending;
d->slave.device_control = k3_dma_control;
d->slave.copy_align = DMA_ALIGN;
- d->slave.chancnt = dma_requests;
+ d->slave.chancnt = d->dma_requests;
/* init virtual channel */
- chans = devm_kzalloc(&op->dev,
- dma_requests * sizeof(struct k3_dma_chan), GFP_KERNEL);
- if (chans == NULL)
+ d->chans = devm_kzalloc(&op->dev,
+ d->dma_requests * sizeof(struct k3_dma_chan), GFP_KERNEL);
+ if (d->chans == NULL)
return -ENOMEM;
- for (i = 0; i < dma_requests; i++) {
- struct k3_dma_chan *c = &chans[i];
+ for (i = 0; i < d->dma_requests; i++) {
+ struct k3_dma_chan *c = &d->chans[i];
INIT_LIST_HEAD(&c->node);
c->vc.desc_free = k3_dma_free_desc;
@@ -707,9 +714,8 @@ static int k3_dma_probe(struct platform_device *op)
if (ret)
goto of_dma_register_fail;
- k3_dma_filter.dma_cap = d->slave.cap_mask;
- k3_dma_filter.filter_fn = k3_dma_filter_fn;
- ret = of_dma_controller_register((&op->dev)->of_node,
of_dma_simple_xlate, &k3_dma_filter);
+ ret = of_dma_controller_register((&op->dev)->of_node,
+ k3_of_dma_simple_xlate, d);
if (ret)
goto dma_async_regitster_fail;
@@ -1000,6 +1000,7 @@ int dma_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
struct dma_chan *net_dma_find_channel(void);
#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
#define dma_request_slave_channel_compat(mask, x, y, dev, name) \