Skip to content

Commit 12b871e

Browse files
committed
drivers/dma/dma_stm32_v3.c: add LLI support
Add support for linked list items (LLI) in the STM32 DMA v3 driver. This allows for more flexible DMA configurations, such as scatter-gather and cyclic transfers. This allows to pass both the dma_m2m_cyclic and the dma_m2m_sg tests on the stm32mp257f_ev1 board. Signed-off-by: Youssef Zini <[email protected]>
1 parent 42f3530 commit 12b871e

File tree

1 file changed

+73
-13
lines changed

1 file changed

+73
-13
lines changed

drivers/dma/dma_stm32_v3.c

Lines changed: 73 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,8 @@ LOG_MODULE_REGISTER(dma_stm32_v3, CONFIG_DMA_LOG_LEVEL);
1616
#define DT_DRV_COMPAT st_stm32_dma_v3
1717
#define DMA_STM32_MAX_DATA_ITEMS 0xffff
1818

19-
/* Since at this point we only support cyclic mode , we only need 3 descriptors
20-
* at most to update the source and destinantion addresses and the update
21-
* registers. TODO: Raise this number for larger linked lists.
19+
/* Since the descriptors pool is allocated statically, we define the number of
20+
* descriptors per channel to be used for linked list trasnfers.
2221
*/
2322
#define DMA_STM32_NUM_DESCRIPTORS_PER_CHANNEL 24
2423

@@ -195,8 +194,8 @@ static int dma_stm32_validate_arguments(const struct device *dev, struct dma_con
195194
static int dma_stm32_validate_transfer_sizes(struct dma_config *config)
196195
{
197196
if (config->head_block->block_size > DMA_STM32_MAX_DATA_ITEMS) {
198-
LOG_ERR("Data size exceeds the maximum limit: %d>%d", config->head_block->block_size,
199-
DMA_STM32_MAX_DATA_ITEMS);
197+
LOG_ERR("Data size exceeds the maximum limit: %d>%d",
198+
config->head_block->block_size, DMA_STM32_MAX_DATA_ITEMS);
200199
return -EINVAL;
201200
}
202201

@@ -226,11 +225,70 @@ static int dma_stm32_validate_transfer_sizes(struct dma_config *config)
226225
return 0;
227226
}
228227

228+
static void dma_stm32_configure_linked_list(uint32_t id, struct dma_config *config,
229+
volatile uint32_t *linked_list_node,
230+
const DMA_TypeDef *dma)
231+
{
232+
uint32_t next_desc = 1;
233+
struct dma_block_config *block_config;
234+
235+
uint32_t registers_update = 0;
236+
uint32_t addr_offset = 0;
237+
238+
uint32_t descriptor_index = 0;
239+
uint32_t base_addr = 0;
240+
uint32_t next_desc_addr = 0;
241+
242+
uint32_t channel = dma_stm32_id_to_channel(id);
243+
block_config = config->head_block;
244+
base_addr = (uint32_t)&linked_list_node[descriptor_index];
245+
LL_DMA_SetLinkedListBaseAddr(dma, channel, base_addr);
246+
247+
for (int i = 0; i < config->block_count; i++) {
248+
registers_update = 0;
249+
LOG_DBG("Configuring block descriptor %d for channel %d", i, channel);
250+
251+
linked_list_node[descriptor_index] = block_config->source_address;
252+
descriptor_index++;
253+
linked_list_node[descriptor_index] = block_config->dest_address;
254+
descriptor_index++;
255+
256+
if (i < config->block_count - 1) {
257+
registers_update |=
258+
LL_DMA_UPDATE_CSAR | LL_DMA_UPDATE_CDAR | LL_DMA_UPDATE_CLLR;
259+
block_config = block_config->next_block;
260+
next_desc_addr = (uint32_t)&linked_list_node[descriptor_index + 1];
261+
} else if (config->cyclic) {
262+
LOG_DBG("Last descriptor %d for channel %d, linking to first", i, channel);
263+
registers_update |=
264+
LL_DMA_UPDATE_CSAR | LL_DMA_UPDATE_CDAR | LL_DMA_UPDATE_CLLR;
265+
next_desc_addr = base_addr;
266+
} else {
267+
LOG_DBG("Last descriptor %d for channel %d, no link", i, channel);
268+
registers_update = 0;
269+
next_desc = 0;
270+
}
271+
272+
if (next_desc != 0) {
273+
addr_offset = next_desc_addr & GENMASK(15, 2);
274+
registers_update |= addr_offset;
275+
}
276+
277+
linked_list_node[descriptor_index] = registers_update;
278+
descriptor_index++;
279+
280+
if (i == 0) {
281+
LL_DMA_ConfigLinkUpdate(dma, channel, registers_update, addr_offset);
282+
}
283+
}
284+
285+
LL_DMA_EnableIT_HT(dma, channel);
286+
}
287+
229288
int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config *config)
230289
{
231290
const struct dma_stm32_config *dev_config;
232291
struct dma_stm32_channel *channel_config;
233-
struct dma_block_config *block_config;
234292
struct dma_stm32_descriptor hwdesc;
235293
uint32_t channel;
236294
const DMA_TypeDef *dma;
@@ -274,15 +332,13 @@ int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config
274332
return ret;
275333
}
276334

277-
block_config = config->head_block;
278-
279-
ret = dma_stm32_get_src_inc_mode(block_config->source_addr_adj, &src_inc_mode);
335+
ret = dma_stm32_get_src_inc_mode(config->head_block->source_addr_adj, &src_inc_mode);
280336
if (ret < 0) {
281337
return ret;
282338
}
283339
LOG_DBG("Source address increment: %d", src_inc_mode);
284340

285-
ret = dma_stm32_get_dest_inc_mode(block_config->dest_addr_adj, &dest_inc_mode);
341+
ret = dma_stm32_get_dest_inc_mode(config->head_block->dest_addr_adj, &dest_inc_mode);
286342
if (ret < 0) {
287343
return ret;
288344
}
@@ -315,8 +371,12 @@ int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config
315371
/* DMAx CCR Configuration */
316372
ccr |= LL_DMA_LSM_1LINK_EXECUTION;
317373
} else {
318-
LOG_ERR("Only single block transfers are supported for now");
319-
return -ENOTSUP;
374+
/* DMAx CCR Configuration */
375+
ccr |= LL_DMA_LSM_FULL_EXECUTION;
376+
377+
volatile uint32_t *linked_list_node =
378+
&dev_config->linked_list_buffer[id * DMA_STM32_NUM_DESCRIPTORS_PER_CHANNEL];
379+
dma_stm32_configure_linked_list(id, config, linked_list_node, dma);
320380
}
321381

322382
/* DMAx CCR Configuration */
@@ -651,7 +711,7 @@ static DEVICE_API(dma, dma_funcs) = {
651711
.enr = __cenr },), \
652712
(/* Nothing if clocks not present */)) .config_irq = \
653713
dma_stm32_config_irq_##index, \
654-
.base = (DMA_TypeDef *) DT_INST_REG_ADDR(index), \
714+
.base = (DMA_TypeDef *)DT_INST_REG_ADDR(index), \
655715
.max_channels = \
656716
DT_INST_PROP_OR(index, dma_channels, DT_NUM_IRQS(DT_DRV_INST(index))), \
657717
.channels = dma_stm32_channels_##index, \

0 commit comments

Comments
 (0)