Skip to content

Commit c89e089

Browse files
committed
drivers/dma/dma_stm32_v3.c: add LLI support
Add support for linked list items (LLI) in the STM32 DMA v3 driver. This allows for more flexible DMA configurations, such as scatter-gather and cyclic transfers. This allows to pass both the dma_m2m_cyclic and the dma_m2m_sg tests on the stm32mp257f_ev1 board. Signed-off-by: Youssef Zini <[email protected]>
1 parent 05a8494 commit c89e089

File tree

1 file changed

+92
-19
lines changed

1 file changed

+92
-19
lines changed

drivers/dma/dma_stm32_v3.c

Lines changed: 92 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,8 @@ LOG_MODULE_REGISTER(dma_stm32_v3, CONFIG_DMA_LOG_LEVEL);
1616
#define DT_DRV_COMPAT st_stm32_dma_v3
1717
#define DMA_STM32_MAX_DATA_ITEMS 0xffff
1818

19-
/* Since at this point we only support cyclic mode , we only need 3 descriptors
20-
* at most to update the source and destination addresses and the update
21-
* registers. TODO: Raise this number for larger linked lists.
19+
/* Since the descriptors pool is allocated statically, we define the number of
20+
* descriptors per channel to be used for linked list trasnfers.
2221
*/
2322
#define DMA_STM32_NUM_DESCRIPTORS_PER_CHANNEL 24
2423
#define POLLING_TIMEOUT_US (10 * USEC_PER_MSEC)
@@ -40,6 +39,7 @@ struct dma_stm32_channel {
4039
uint32_t src_size;
4140
uint32_t dst_size;
4241
void *user_data;
42+
uint32_t complete_callback_en;
4343
dma_callback_t dma_callback;
4444
bool cyclic;
4545
int block_count;
@@ -240,8 +240,8 @@ static int dma_stm32_disable_channel(DMA_TypeDef *dma, uint32_t channel)
240240
static int dma_stm32_validate_transfer_sizes(struct dma_config *config)
241241
{
242242
if (config->head_block->block_size > DMA_STM32_MAX_DATA_ITEMS) {
243-
LOG_ERR("Data size exceeds the maximum limit: %d>%d", config->head_block->block_size,
244-
DMA_STM32_MAX_DATA_ITEMS);
243+
LOG_ERR("Data size exceeds the maximum limit: %d>%d",
244+
config->head_block->block_size, DMA_STM32_MAX_DATA_ITEMS);
245245
return -EINVAL;
246246
}
247247

@@ -272,11 +272,65 @@ static int dma_stm32_validate_transfer_sizes(struct dma_config *config)
272272
return 0;
273273
}
274274

275+
static void dma_stm32_configure_linked_list(uint32_t channel, struct dma_config *config,
276+
uint32_t *linked_list_node, DMA_TypeDef *dma)
277+
{
278+
uint32_t next_desc = 1;
279+
struct dma_block_config *block_config;
280+
uint32_t registers_update = 0;
281+
uint32_t addr_offset = 0;
282+
uint32_t descriptor_index = 0;
283+
uint32_t base_addr = 0;
284+
uint32_t next_desc_addr = 0;
285+
286+
block_config = config->head_block;
287+
base_addr = (uint32_t)&linked_list_node[descriptor_index];
288+
289+
LL_DMA_SetLinkedListBaseAddr(dma, channel, base_addr);
290+
291+
for (uint32_t i = 0; i < config->block_count; i++) {
292+
registers_update = 0;
293+
LOG_DBG("Configuring block descriptor %d for channel %d", i, channel);
294+
295+
linked_list_node[descriptor_index] = block_config->source_address;
296+
descriptor_index++;
297+
linked_list_node[descriptor_index] = block_config->dest_address;
298+
descriptor_index++;
299+
300+
if (i < config->block_count - 1) {
301+
registers_update |=
302+
LL_DMA_UPDATE_CSAR | LL_DMA_UPDATE_CDAR | LL_DMA_UPDATE_CLLR;
303+
block_config = block_config->next_block;
304+
next_desc_addr = (uint32_t)&linked_list_node[descriptor_index + 1];
305+
} else if (config->cyclic) {
306+
LOG_DBG("Last descriptor %d for channel %d, linking to first", i, channel);
307+
registers_update |=
308+
LL_DMA_UPDATE_CSAR | LL_DMA_UPDATE_CDAR | LL_DMA_UPDATE_CLLR;
309+
next_desc_addr = base_addr;
310+
} else {
311+
LOG_DBG("Last descriptor %d for channel %d, no link", i, channel);
312+
registers_update = 0;
313+
next_desc = 0;
314+
}
315+
316+
if (next_desc != 0) {
317+
addr_offset = next_desc_addr & GENMASK(15, 2);
318+
registers_update |= addr_offset;
319+
}
320+
321+
linked_list_node[descriptor_index] = registers_update;
322+
descriptor_index++;
323+
324+
if (i == 0) {
325+
LL_DMA_ConfigLinkUpdate(dma, channel, registers_update, addr_offset);
326+
}
327+
}
328+
}
329+
275330
int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config *config)
276331
{
277332
const struct dma_stm32_config *dev_config;
278333
struct dma_stm32_channel *channel_config;
279-
struct dma_block_config *block_config;
280334
struct dma_stm32_descriptor hwdesc;
281335
uint32_t channel;
282336
DMA_TypeDef *dma;
@@ -326,15 +380,13 @@ int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config
326380
return ret;
327381
}
328382

329-
block_config = config->head_block;
330-
331-
ret = dma_stm32_get_src_inc_mode(block_config->source_addr_adj, &src_inc_mode);
383+
ret = dma_stm32_get_src_inc_mode(config->head_block->source_addr_adj, &src_inc_mode);
332384
if (ret < 0) {
333385
return ret;
334386
}
335387
LOG_DBG("Source address increment: %d", src_inc_mode);
336388

337-
ret = dma_stm32_get_dest_inc_mode(block_config->dest_addr_adj, &dest_inc_mode);
389+
ret = dma_stm32_get_dest_inc_mode(config->head_block->dest_addr_adj, &dest_inc_mode);
338390
if (ret < 0) {
339391
return ret;
340392
}
@@ -360,6 +412,7 @@ int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config
360412
channel_config->src_size = config->source_data_size;
361413
channel_config->dst_size = config->dest_data_size;
362414
channel_config->cyclic = config->cyclic;
415+
channel_config->complete_callback_en = config->complete_callback_en;
363416

364417
dma_stm32_disable_it(dma, channel);
365418

@@ -371,8 +424,12 @@ int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config
371424
if (!linked_list_needed) {
372425
ccr |= LL_DMA_LSM_1LINK_EXECUTION;
373426
} else {
374-
LOG_ERR("Only single block transfers are supported for now");
375-
return -ENOTSUP;
427+
ccr |= LL_DMA_LSM_FULL_EXECUTION;
428+
429+
dma_stm32_configure_linked_list(channel, config,
430+
dev_config->linked_list_buffer +
431+
id * DMA_STM32_NUM_DESCRIPTORS_PER_CHANNEL,
432+
dma);
376433
}
377434

378435
/* TODO: support port specifier from configuration */
@@ -390,7 +447,19 @@ int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config
390447
config->dest_burst_length);
391448

392449
hwdesc.channel_tr2 = ll_direction;
393-
hwdesc.channel_tr2 |= LL_DMA_TCEM_BLK_TRANSFER;
450+
451+
if (linked_list_needed) {
452+
if (channel_config->complete_callback_en == 1) {
453+
hwdesc.channel_tr2 |= LL_DMA_TCEM_EACH_LLITEM_TRANSFER;
454+
LOG_DBG("Enabling TC callback at the end of each linked list item");
455+
} else {
456+
hwdesc.channel_tr2 |= LL_DMA_TCEM_LAST_LLITEM_TRANSFER;
457+
LOG_DBG("Enabling TC callback at the end of last linked list item");
458+
}
459+
} else {
460+
hwdesc.channel_tr2 |= LL_DMA_TCEM_BLK_TRANSFER;
461+
LOG_DBG("Enabling TC callback at the end of the block");
462+
}
394463

395464
LL_DMA_ConfigChannelTransfer(dma, channel, hwdesc.channel_tr2);
396465

@@ -691,16 +760,20 @@ static void dma_stm32_irq_handler(const struct device *dev, uint32_t id)
691760
channel_config->dma_callback(dev, channel_config->user_data, callback_arg,
692761
DMA_STATUS_BLOCK);
693762
} else if (dma_stm32_is_tc_irq_active(dma, channel)) {
694-
if (!channel_config->cyclic) {
695-
channel_config->busy = false;
696-
}
697-
698763
if (!channel_config->hal_override) {
699764
LL_DMA_ClearFlag_TC(dma, channel);
700765
}
701766

702-
channel_config->dma_callback(dev, channel_config->user_data, callback_arg,
703-
DMA_STATUS_COMPLETE);
767+
if (channel_config->complete_callback_en == 1) {
768+
channel_config->dma_callback(dev, channel_config->user_data, callback_arg,
769+
DMA_STATUS_BLOCK);
770+
} else {
771+
if (!channel_config->cyclic) {
772+
channel_config->busy = false;
773+
}
774+
channel_config->dma_callback(dev, channel_config->user_data, callback_arg,
775+
DMA_STATUS_COMPLETE);
776+
}
704777
} else {
705778
LOG_ERR("Transfer Error.");
706779
channel_config->busy = false;

0 commit comments

Comments
 (0)