Skip to content

Commit d5f6d4e

Browse files
committed
drivers/dma/dma_stm32_v3.c: add LLI support
Add support for linked list items (LLI) in the STM32 DMA v3 driver. This allows for more flexible DMA configurations, such as scatter-gather and cyclic transfers. This allows to pass both the dma_m2m_cyclic and the dma_m2m_sg tests on the stm32mp257f_ev1 board. Signed-off-by: Youssef Zini <[email protected]>
1 parent 742902d commit d5f6d4e

File tree

1 file changed

+73
-12
lines changed

1 file changed

+73
-12
lines changed

drivers/dma/dma_stm32_v3.c

Lines changed: 73 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,8 @@ LOG_MODULE_REGISTER(dma_stm32_v3, CONFIG_DMA_LOG_LEVEL);
1717
#define DT_DRV_COMPAT st_stm32_dma_v3
1818
#define DMA_STM32_MAX_DATA_ITEMS 0xffff
1919

20-
/* Since at this point we only support cyclic mode , we only need 3 descriptors
21-
* at most to update the source and destinantion addresses and the update
22-
* registers. TODO: Raise this number for larger linked lists.
20+
/* Since the descriptors pool is allocated statically, we define the number of
21+
* descriptors per channel to be used for linked list trasnfers.
2322
*/
2423
#define DMA_STM32_NUM_DESCRIPTORS_PER_CHANNEL 24
2524
#define POLLING_TIMEOUT_US 10*USEC_PER_MSEC
@@ -235,8 +234,8 @@ static int dma_stm32_validate_arguments(const struct device *dev, struct dma_con
235234
static int dma_stm32_validate_transfer_sizes(struct dma_config *config)
236235
{
237236
if (config->head_block->block_size > DMA_STM32_MAX_DATA_ITEMS) {
238-
LOG_ERR("Data size exceeds the maximum limit: %d>%d", config->head_block->block_size,
239-
DMA_STM32_MAX_DATA_ITEMS);
237+
LOG_ERR("Data size exceeds the maximum limit: %d>%d",
238+
config->head_block->block_size, DMA_STM32_MAX_DATA_ITEMS);
240239
return -EINVAL;
241240
}
242241

@@ -266,11 +265,71 @@ static int dma_stm32_validate_transfer_sizes(struct dma_config *config)
266265
return 0;
267266
}
268267

268+
static void dma_stm32_configure_linked_list(uint32_t id, struct dma_config *config,
269+
volatile uint32_t *linked_list_node,
270+
const DMA_TypeDef *dma)
271+
{
272+
uint32_t next_desc = 1;
273+
struct dma_block_config *block_config;
274+
275+
uint32_t registers_update = 0;
276+
uint32_t addr_offset = 0;
277+
278+
uint32_t descriptor_index = 0;
279+
uint32_t base_addr = 0;
280+
uint32_t next_desc_addr = 0;
281+
282+
uint32_t channel = dma_stm32_id_to_channel(id);
283+
block_config = config->head_block;
284+
base_addr = (uint32_t)&linked_list_node[descriptor_index];
285+
286+
LL_DMA_SetLinkedListBaseAddr(dma, channel, base_addr);
287+
288+
for (int i = 0; i < config->block_count; i++) {
289+
registers_update = 0;
290+
LOG_DBG("Configuring block descriptor %d for channel %d", i, channel);
291+
292+
linked_list_node[descriptor_index] = block_config->source_address;
293+
descriptor_index++;
294+
linked_list_node[descriptor_index] = block_config->dest_address;
295+
descriptor_index++;
296+
297+
if (i < config->block_count - 1) {
298+
registers_update |=
299+
LL_DMA_UPDATE_CSAR | LL_DMA_UPDATE_CDAR | LL_DMA_UPDATE_CLLR;
300+
block_config = block_config->next_block;
301+
next_desc_addr = (uint32_t)&linked_list_node[descriptor_index + 1];
302+
} else if (config->cyclic) {
303+
LOG_DBG("Last descriptor %d for channel %d, linking to first", i, channel);
304+
registers_update |=
305+
LL_DMA_UPDATE_CSAR | LL_DMA_UPDATE_CDAR | LL_DMA_UPDATE_CLLR;
306+
next_desc_addr = base_addr;
307+
} else {
308+
LOG_DBG("Last descriptor %d for channel %d, no link", i, channel);
309+
registers_update = 0;
310+
next_desc = 0;
311+
}
312+
313+
if (next_desc != 0) {
314+
addr_offset = next_desc_addr & GENMASK(15, 2);
315+
registers_update |= addr_offset;
316+
}
317+
318+
linked_list_node[descriptor_index] = registers_update;
319+
descriptor_index++;
320+
321+
if (i == 0) {
322+
LL_DMA_ConfigLinkUpdate(dma, channel, registers_update, addr_offset);
323+
}
324+
}
325+
326+
LL_DMA_EnableIT_HT(dma, channel);
327+
}
328+
269329
int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config *config)
270330
{
271331
const struct dma_stm32_config *dev_config;
272332
struct dma_stm32_channel *channel_config;
273-
struct dma_block_config *block_config;
274333
struct dma_stm32_descriptor hwdesc;
275334
uint32_t channel;
276335
const DMA_TypeDef *dma;
@@ -314,15 +373,13 @@ int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config
314373
return ret;
315374
}
316375

317-
block_config = config->head_block;
318-
319-
ret = dma_stm32_get_src_inc_mode(block_config->source_addr_adj, &src_inc_mode);
376+
ret = dma_stm32_get_src_inc_mode(config->head_block->source_addr_adj, &src_inc_mode);
320377
if (ret < 0) {
321378
return ret;
322379
}
323380
LOG_DBG("Source address increment: %d", src_inc_mode);
324381

325-
ret = dma_stm32_get_dest_inc_mode(block_config->dest_addr_adj, &dest_inc_mode);
382+
ret = dma_stm32_get_dest_inc_mode(config->head_block->dest_addr_adj, &dest_inc_mode);
326383
if (ret < 0) {
327384
return ret;
328385
}
@@ -355,8 +412,12 @@ int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config
355412
/* DMAx CCR Configuration */
356413
ccr |= LL_DMA_LSM_1LINK_EXECUTION;
357414
} else {
358-
LOG_ERR("Only single block transfers are supported for now");
359-
return -ENOTSUP;
415+
/* DMAx CCR Configuration */
416+
ccr |= LL_DMA_LSM_FULL_EXECUTION;
417+
418+
volatile uint32_t *linked_list_node =
419+
&dev_config->linked_list_buffer[id * DMA_STM32_NUM_DESCRIPTORS_PER_CHANNEL];
420+
dma_stm32_configure_linked_list(id, config, linked_list_node, dma);
360421
}
361422

362423
/* DMAx CCR Configuration */

0 commit comments

Comments
 (0)