blob: 27c17807b1a3f56daf47bd2ea65335b3afdbd35c [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Support for asr spi controller dma mode
4 *
5 * Copyright (C) 2019 ASR Micro Limited
6 *
7 * Tim Wang <timwang@asrmicro.com>
8 */
9
10#include <linux/device.h>
11#include <linux/dma-mapping.h>
12#include <linux/dmaengine.h>
13#include <linux/scatterlist.h>
14#include <linux/sizes.h>
15#include <linux/spi/spi.h>
16#include <linux/delay.h>
17
18#include "spi-asr.h"
19
20#define MAX_SEG_SIZE SZ_4K
21
22static int asr_spi_map_dma_buffer(struct spi_driver_data *drv_data,
23 enum dma_data_direction dir)
24{
25 int i, nents, len = drv_data->len;
26 struct scatterlist *sg;
27 struct device *dmadev;
28 struct sg_table *sgt;
29 void *buf, *pbuf;
30 int desc_len;
31 size_t bytes;
32 struct page *vm_page;
33 bool vmalloced_buf = false;
34
35 if (dir == DMA_TO_DEVICE) {
36 dmadev = drv_data->tx_chan->device->dev;
37 sgt = &drv_data->tx_sgt;
38 buf = drv_data->tx;
39 drv_data->tx_map_len = len;
40 } else {
41 dmadev = drv_data->rx_chan->device->dev;
42 sgt = &drv_data->rx_sgt;
43 buf = drv_data->rx;
44 drv_data->rx_map_len = len;
45 }
46
47 desc_len = MAX_SEG_SIZE;
48 if (buf && is_vmalloc_addr(buf)) {
49 vmalloced_buf = true;
50 desc_len = min_t(unsigned long, MAX_SEG_SIZE, PAGE_SIZE);
51 nents = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
52 } else {
53 nents = DIV_ROUND_UP(len, desc_len);
54 }
55
56 if (nents != sgt->nents) {
57 int ret;
58
59 sg_free_table(sgt);
60 ret = sg_alloc_table(sgt, nents, GFP_ATOMIC);
61 if (ret)
62 return ret;
63 }
64
65 pbuf = buf;
66 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
67
68 if (vmalloced_buf) {
69 bytes = min_t(size_t, desc_len,
70 min_t(size_t, len,
71 PAGE_SIZE - offset_in_page(pbuf)));
72 vm_page = vmalloc_to_page(pbuf);
73 if (!vm_page) {
74 return -ENOMEM;
75 }
76 sg_set_page(sg, vm_page,
77 bytes, offset_in_page(pbuf));
78 } else if (buf) {
79 bytes = min_t(size_t, len, desc_len);
80 sg_set_buf(sg, pbuf, bytes);
81 } else {
82 bytes = min_t(size_t, len, desc_len);
83 sg_set_buf(sg, drv_data->dummy, bytes);
84 }
85
86 pbuf += bytes;
87 len -= bytes;
88 }
89
90 nents = dma_map_sg(dmadev, sgt->sgl, sgt->nents, dir);
91 if (!nents)
92 return -ENOMEM;
93
94 return nents;
95}
96
97static void asr_spi_unmap_dma_buffer(struct spi_driver_data *drv_data,
98 enum dma_data_direction dir)
99{
100 struct device *dmadev;
101 struct sg_table *sgt;
102
103 if (dir == DMA_TO_DEVICE) {
104 dmadev = drv_data->tx_chan->device->dev;
105 sgt = &drv_data->tx_sgt;
106 } else {
107 dmadev = drv_data->rx_chan->device->dev;
108 sgt = &drv_data->rx_sgt;
109 }
110
111 dma_unmap_sg(dmadev, sgt->sgl, sgt->nents, dir);
112}
113
114static void asr_spi_unmap_dma_buffers(struct spi_driver_data *drv_data)
115{
116 if (!drv_data->dma_mapped)
117 return;
118
119 asr_spi_unmap_dma_buffer(drv_data, DMA_FROM_DEVICE);
120 asr_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE);
121
122 drv_data->dma_mapped = 0;
123}
124
125static void asr_spi_dma_transfer_complete(struct spi_driver_data *drv_data,
126 bool error)
127{
128 struct spi_message *msg = drv_data->cur_msg;
129
130 /*
131 * It is possible that one CPU is handling ROR interrupt and other
132 * just gets DMA completion. Calling pump_transfers() twice for the
133 * same transfer leads to problems thus we prevent concurrent calls
134 * by using ->dma_running.
135 */
136 if (atomic_dec_and_test(&drv_data->dma_running)) {
137 /*
138 * If the other CPU is still handling the ROR interrupt we
139 * might not know about the error yet. So we re-check the
140 * ROR bit here before we clear the status register.
141 */
142 if (!error) {
143 u32 status = asr_spi_read(drv_data, STATUS)
144 & drv_data->mask_sr;
145 error = status & STATUS_ROR;
146 }
147
148 /* Clear status & disable interrupts */
149 asr_spi_write(drv_data, FIFO_CTRL,
150 asr_spi_read(drv_data, FIFO_CTRL)
151 & ~drv_data->dma_fifo_ctrl);
152 asr_spi_write(drv_data, TOP_CTRL,
153 asr_spi_read(drv_data, TOP_CTRL)
154 & ~drv_data->dma_top_ctrl);
155 asr_spi_write(drv_data, STATUS, drv_data->clear_sr);
156 asr_spi_write(drv_data, TO, 0);
157
158 if (drv_data->xfer_way == XFER_SPIMEM) {
159 asr_spi_unmap_dma_buffers(drv_data);
160
161 complete(&drv_data->dma_completion);
162 return;
163 }
164
165 if (!error) {
166 asr_spi_unmap_dma_buffers(drv_data);
167
168 drv_data->tx += drv_data->tx_map_len;
169 drv_data->rx += drv_data->rx_map_len;
170
171 msg->actual_length += drv_data->len;
172 msg->state = asr_spi_next_transfer(drv_data);
173 } else {
174 /* In case we got an error we disable the SSP now */
175 asr_spi_write(drv_data, TOP_CTRL,
176 asr_spi_read(drv_data, TOP_CTRL)
177 & ~TOP_SSE);
178
179 msg->state = ERROR_STATE;
180 }
181 asr_spi_pump_transfers(drv_data);
182 }
183}
184
185static void asr_spi_dma_callback(void *data)
186{
187 asr_spi_dma_transfer_complete(data, false);
188}
189
190static struct dma_async_tx_descriptor *
191asr_spi_dma_prepare_one(struct spi_driver_data *drv_data,
192 enum dma_transfer_direction dir)
193{
194 struct chip_data *chip = drv_data->cur_chip;
195 enum dma_slave_buswidth width;
196 struct dma_slave_config cfg;
197 struct dma_chan *chan;
198 struct sg_table *sgt;
199 int nents, ret;
200
201 switch (drv_data->n_bytes) {
202 case 1:
203 width = DMA_SLAVE_BUSWIDTH_1_BYTE;
204 break;
205 case 2:
206 width = DMA_SLAVE_BUSWIDTH_2_BYTES;
207 break;
208 default:
209 width = DMA_SLAVE_BUSWIDTH_4_BYTES;
210 break;
211 }
212
213 memset(&cfg, 0, sizeof(cfg));
214 cfg.direction = dir;
215
216 if (dir == DMA_MEM_TO_DEV) {
217 cfg.dst_addr = drv_data->ssdr_physical;
218 cfg.dst_addr_width = width;
219 cfg.dst_maxburst = chip->dma_burst_size;
220
221 sgt = &drv_data->tx_sgt;
222 nents = drv_data->tx_nents;
223 chan = drv_data->tx_chan;
224 } else {
225 cfg.src_addr = drv_data->ssdr_physical;
226 cfg.src_addr_width = width;
227 cfg.src_maxburst = chip->dma_burst_size;
228
229 sgt = &drv_data->rx_sgt;
230 nents = drv_data->rx_nents;
231 chan = drv_data->rx_chan;
232 }
233
234 ret = dmaengine_slave_config(chan, &cfg);
235 if (ret) {
236 dev_warn(&drv_data->pdev->dev, "DMA slave config failed\n");
237 return NULL;
238 }
239
240 return dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir,
241 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
242}
243
244bool asr_spi_dma_is_possible(size_t len)
245{
246 return len <= MAX_DMA_LEN;
247}
248
249int asr_spi_map_dma_buffers(struct spi_driver_data *drv_data)
250{
251 const struct chip_data *chip = drv_data->cur_chip;
252 int ret;
253
254 if (!chip->enable_dma)
255 return 0;
256
257 /* Don't bother with DMA if we can't do even a single burst */
258 if (drv_data->len < chip->dma_burst_size)
259 return 0;
260
261 ret = asr_spi_map_dma_buffer(drv_data, DMA_TO_DEVICE);
262 if (ret <= 0) {
263 dev_warn(&drv_data->pdev->dev, "failed to DMA map TX\n");
264 return 0;
265 }
266
267 drv_data->tx_nents = ret;
268
269 ret = asr_spi_map_dma_buffer(drv_data, DMA_FROM_DEVICE);
270 if (ret <= 0) {
271 asr_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE);
272 dev_warn(&drv_data->pdev->dev, "failed to DMA map RX\n");
273 return 0;
274 }
275
276 drv_data->rx_nents = ret;
277 return 1;
278}
279
280irqreturn_t asr_spi_dma_transfer(struct spi_driver_data *drv_data)
281{
282 u32 status;
283
284 status = asr_spi_read(drv_data, STATUS) & drv_data->mask_sr;
285
286 if ( status & STATUS_ROR ) {
287 dmaengine_terminate_all(drv_data->rx_chan);
288 dmaengine_terminate_all(drv_data->tx_chan);
289 asr_spi_dma_transfer_complete(drv_data, true);
290 return IRQ_HANDLED;
291 }
292
293 return IRQ_NONE;
294}
295
296void asr_spi_slave_sw_timeout_callback(struct spi_driver_data *drv_data)
297{
298 dmaengine_terminate_all(drv_data->rx_chan);
299 dmaengine_terminate_all(drv_data->tx_chan);
300 asr_spi_dma_transfer_complete(drv_data, true);
301}
302
303int asr_spi_dma_prepare(struct spi_driver_data *drv_data, u32 dma_burst)
304{
305 struct dma_async_tx_descriptor *tx_desc, *rx_desc;
306
307 tx_desc = asr_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV);
308 if (!tx_desc) {
309 dev_err(&drv_data->pdev->dev,
310 "failed to get DMA TX descriptor\n");
311 return -EBUSY;
312 }
313
314 rx_desc = asr_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM);
315 if (!rx_desc) {
316 dev_err(&drv_data->pdev->dev,
317 "failed to get DMA RX descriptor\n");
318 return -EBUSY;
319 }
320
321 /* We are ready when RX completes */
322 rx_desc->callback = asr_spi_dma_callback;
323 rx_desc->callback_param = drv_data;
324
325 dmaengine_submit(rx_desc);
326 dmaengine_submit(tx_desc);
327 return 0;
328}
329
330void asr_spi_dma_start(struct spi_driver_data *drv_data)
331{
332
333 if (drv_data->xfer_way == XFER_SPIMEM)
334 reinit_completion(&drv_data->dma_completion);
335
336 dma_async_issue_pending(drv_data->rx_chan);
337 dma_async_issue_pending(drv_data->tx_chan);
338
339 atomic_set(&drv_data->dma_running, 1);
340}
341
342int asr_spi_dma_setup(struct spi_driver_data *drv_data)
343{
344 struct asr_spi_master *pdata = drv_data->master_info;
345 struct device *dev = &drv_data->pdev->dev;
346 dma_cap_mask_t mask;
347
348 dma_cap_zero(mask);
349 dma_cap_set(DMA_SLAVE, mask);
350
351 drv_data->dummy = devm_kzalloc(dev, MAX_SEG_SIZE, GFP_KERNEL);
352 if (!drv_data->dummy)
353 return -ENOMEM;
354
355 drv_data->tx_chan = dma_request_slave_channel_compat(mask,
356 pdata->dma_filter, pdata->tx_param, dev, "tx");
357 if (!drv_data->tx_chan)
358 return -ENODEV;
359
360 drv_data->rx_chan = dma_request_slave_channel_compat(mask,
361 pdata->dma_filter, pdata->rx_param, dev, "rx");
362 if (!drv_data->rx_chan) {
363 dma_release_channel(drv_data->tx_chan);
364 drv_data->tx_chan = NULL;
365 return -ENODEV;
366 }
367
368 return 0;
369}
370
371void asr_spi_dma_release(struct spi_driver_data *drv_data)
372{
373 if (drv_data->rx_chan) {
374 dmaengine_terminate_all(drv_data->rx_chan);
375 dma_release_channel(drv_data->rx_chan);
376 sg_free_table(&drv_data->rx_sgt);
377 drv_data->rx_chan = NULL;
378 }
379 if (drv_data->tx_chan) {
380 dmaengine_terminate_all(drv_data->tx_chan);
381 dma_release_channel(drv_data->tx_chan);
382 sg_free_table(&drv_data->tx_sgt);
383 drv_data->tx_chan = NULL;
384 }
385}
386
387int asr_spi_set_dma_burst_and_threshold(struct chip_data *chip,
388 struct spi_device *spi,
389 u8 bits_per_word, u32 *burst_code,
390 u32 *threshold)
391{
392 /*
393 * If the DMA burst size is given in chip_info we use
394 * that, otherwise we set it to half of FIFO size; SPI
395 * FIFO has 16 entry, so FIFO size = 16*bits_per_word/8;
396 * Also we use the default FIFO thresholds for now.
397 */
398 if (chip && chip->dma_burst_size)
399 *burst_code = chip->dma_burst_size;
400 else if (bits_per_word <= 8) {
401 *burst_code = 8;
402 }
403 else if (bits_per_word <= 16)
404 *burst_code = 16;
405 else
406 *burst_code = 32;
407
408 *threshold = FIFO_RxTresh(RX_THRESH_DFLT)
409 | FIFO_TxTresh(TX_THRESH_DFLT);
410
411 return 0;
412}