@@ -16,9 +16,8 @@ LOG_MODULE_REGISTER(dma_stm32_v3, CONFIG_DMA_LOG_LEVEL);
16
16
#define DT_DRV_COMPAT st_stm32_dma_v3
17
17
#define DMA_STM32_MAX_DATA_ITEMS 0xffff
18
18
19
- /* Since at this point we only support cyclic mode , we only need 3 descriptors
20
- * at most to update the source and destinantion addresses and the update
21
- * registers. TODO: Raise this number for larger linked lists.
19
+ /* Since the descriptors pool is allocated statically, we define the number of
20
+ * descriptors per channel to be used for linked list trasnfers.
22
21
*/
23
22
#define DMA_STM32_NUM_DESCRIPTORS_PER_CHANNEL 24
24
23
@@ -195,8 +194,8 @@ static int dma_stm32_validate_arguments(const struct device *dev, struct dma_con
195
194
static int dma_stm32_validate_transfer_sizes (struct dma_config * config )
196
195
{
197
196
if (config -> head_block -> block_size > DMA_STM32_MAX_DATA_ITEMS ) {
198
- LOG_ERR ("Data size exceeds the maximum limit: %d>%d" , config -> head_block -> block_size ,
199
- DMA_STM32_MAX_DATA_ITEMS );
197
+ LOG_ERR ("Data size exceeds the maximum limit: %d>%d" ,
198
+ config -> head_block -> block_size , DMA_STM32_MAX_DATA_ITEMS );
200
199
return - EINVAL ;
201
200
}
202
201
@@ -226,11 +225,70 @@ static int dma_stm32_validate_transfer_sizes(struct dma_config *config)
226
225
return 0 ;
227
226
}
228
227
228
+ static void dma_stm32_configure_linked_list (uint32_t id , struct dma_config * config ,
229
+ volatile uint32_t * linked_list_node ,
230
+ const DMA_TypeDef * dma )
231
+ {
232
+ uint32_t next_desc = 1 ;
233
+ struct dma_block_config * block_config ;
234
+
235
+ uint32_t registers_update = 0 ;
236
+ uint32_t addr_offset = 0 ;
237
+
238
+ uint32_t descriptor_index = 0 ;
239
+ uint32_t base_addr = 0 ;
240
+ uint32_t next_desc_addr = 0 ;
241
+
242
+ uint32_t channel = dma_stm32_id_to_channel (id );
243
+ block_config = config -> head_block ;
244
+ base_addr = (uint32_t )& linked_list_node [descriptor_index ];
245
+ LL_DMA_SetLinkedListBaseAddr (dma , channel , base_addr );
246
+
247
+ for (int i = 0 ; i < config -> block_count ; i ++ ) {
248
+ registers_update = 0 ;
249
+ LOG_DBG ("Configuring block descriptor %d for channel %d" , i , channel );
250
+
251
+ linked_list_node [descriptor_index ] = block_config -> source_address ;
252
+ descriptor_index ++ ;
253
+ linked_list_node [descriptor_index ] = block_config -> dest_address ;
254
+ descriptor_index ++ ;
255
+
256
+ if (i < config -> block_count - 1 ) {
257
+ registers_update |=
258
+ LL_DMA_UPDATE_CSAR | LL_DMA_UPDATE_CDAR | LL_DMA_UPDATE_CLLR ;
259
+ block_config = block_config -> next_block ;
260
+ next_desc_addr = (uint32_t )& linked_list_node [descriptor_index + 1 ];
261
+ } else if (config -> cyclic ) {
262
+ LOG_DBG ("Last descriptor %d for channel %d, linking to first" , i , channel );
263
+ registers_update |=
264
+ LL_DMA_UPDATE_CSAR | LL_DMA_UPDATE_CDAR | LL_DMA_UPDATE_CLLR ;
265
+ next_desc_addr = base_addr ;
266
+ } else {
267
+ LOG_DBG ("Last descriptor %d for channel %d, no link" , i , channel );
268
+ registers_update = 0 ;
269
+ next_desc = 0 ;
270
+ }
271
+
272
+ if (next_desc != 0 ) {
273
+ addr_offset = next_desc_addr & GENMASK (15 , 2 );
274
+ registers_update |= addr_offset ;
275
+ }
276
+
277
+ linked_list_node [descriptor_index ] = registers_update ;
278
+ descriptor_index ++ ;
279
+
280
+ if (i == 0 ) {
281
+ LL_DMA_ConfigLinkUpdate (dma , channel , registers_update , addr_offset );
282
+ }
283
+ }
284
+
285
+ LL_DMA_EnableIT_HT (dma , channel );
286
+ }
287
+
229
288
int dma_stm32_configure (const struct device * dev , uint32_t id , struct dma_config * config )
230
289
{
231
290
const struct dma_stm32_config * dev_config ;
232
291
struct dma_stm32_channel * channel_config ;
233
- struct dma_block_config * block_config ;
234
292
struct dma_stm32_descriptor hwdesc ;
235
293
uint32_t channel ;
236
294
const DMA_TypeDef * dma ;
@@ -274,15 +332,13 @@ int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config
274
332
return ret ;
275
333
}
276
334
277
- block_config = config -> head_block ;
278
-
279
- ret = dma_stm32_get_src_inc_mode (block_config -> source_addr_adj , & src_inc_mode );
335
+ ret = dma_stm32_get_src_inc_mode (config -> head_block -> source_addr_adj , & src_inc_mode );
280
336
if (ret < 0 ) {
281
337
return ret ;
282
338
}
283
339
LOG_DBG ("Source address increment: %d" , src_inc_mode );
284
340
285
- ret = dma_stm32_get_dest_inc_mode (block_config -> dest_addr_adj , & dest_inc_mode );
341
+ ret = dma_stm32_get_dest_inc_mode (config -> head_block -> dest_addr_adj , & dest_inc_mode );
286
342
if (ret < 0 ) {
287
343
return ret ;
288
344
}
@@ -315,8 +371,12 @@ int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config
315
371
/* DMAx CCR Configuration */
316
372
ccr |= LL_DMA_LSM_1LINK_EXECUTION ;
317
373
} else {
318
- LOG_ERR ("Only single block transfers are supported for now" );
319
- return - ENOTSUP ;
374
+ /* DMAx CCR Configuration */
375
+ ccr |= LL_DMA_LSM_FULL_EXECUTION ;
376
+
377
+ volatile uint32_t * linked_list_node =
378
+ & dev_config -> linked_list_buffer [id * DMA_STM32_NUM_DESCRIPTORS_PER_CHANNEL ];
379
+ dma_stm32_configure_linked_list (id , config , linked_list_node , dma );
320
380
}
321
381
322
382
/* DMAx CCR Configuration */
@@ -651,7 +711,7 @@ static DEVICE_API(dma, dma_funcs) = {
651
711
.enr = __cenr },), \
652
712
(/* Nothing if clocks not present */ )) .config_irq = \
653
713
dma_stm32_config_irq_##index, \
654
- .base = (DMA_TypeDef *) DT_INST_REG_ADDR(index), \
714
+ .base = (DMA_TypeDef *)DT_INST_REG_ADDR(index), \
655
715
.max_channels = \
656
716
DT_INST_PROP_OR(index, dma_channels, DT_NUM_IRQS(DT_DRV_INST(index))), \
657
717
.channels = dma_stm32_channels_##index, \
0 commit comments