@@ -16,11 +16,10 @@ LOG_MODULE_REGISTER(dma_stm32_v3, CONFIG_DMA_LOG_LEVEL);
16
16
#define DT_DRV_COMPAT st_stm32_dma_v3
17
17
#define DMA_STM32_MAX_DATA_ITEMS 0xffff
18
18
19
- /* Since at this point we only support cyclic mode , we only need 3 descriptors
20
- * at most to update the source and destinantion addresses and the update
21
- * registers. TODO: Raise this number for larger linked lists.
19
+ /* Since the descriptors pool is allocated statically, we define the number of
20
+ * descriptors per channel to be used for linked list trasnfers.
22
21
*/
23
- #define CONFIG_DMA_STM32_NUM_DESCRIPTORS_PER_CHANNEL 3
22
+ #define CONFIG_DMA_STM32_NUM_DESCRIPTORS_PER_CHANNEL 24
24
23
25
24
static const uint32_t table_src_size [4 ] = {
26
25
LL_DMA_SRC_DATAWIDTH_BYTE ,
@@ -365,8 +364,57 @@ int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config
365
364
/* DMAx CCR Configuration */
366
365
ccr |= LL_DMA_LSM_1LINK_EXECUTION ;
367
366
} else {
368
- LOG_ERR ("Only single block transfers are supported for now" );
369
- return - ENOTSUP ;
367
+ uint32_t next_desc = 1 ;
368
+ volatile uint32_t * linked_list_node = & dev_config -> linked_list_buffer [id * CONFIG_DMA_STM32_NUM_DESCRIPTORS_PER_CHANNEL ];
369
+ uint32_t registers_update = 0 ;
370
+ uint32_t addr_offset = 0 ;
371
+
372
+ uint32_t descriptor_index = 0 ;
373
+ uint32_t base_addr = 0 ;
374
+ uint32_t next_desc_addr = 0 ;
375
+
376
+ /* DMAx CCR Configuration */
377
+ ccr |= LL_DMA_LSM_FULL_EXECUTION ;
378
+ base_addr = (uint32_t )& linked_list_node [descriptor_index ];
379
+ LL_DMA_SetLinkedListBaseAddr (dma , channel , base_addr );
380
+
381
+ for (int i = 0 ; i < config -> block_count ; i ++ ) {
382
+ registers_update = 0 ;
383
+ LOG_DBG ("Configuring block descriptor %d for channel %d" , i , channel );
384
+
385
+ linked_list_node [descriptor_index ] = block_config -> source_address ;
386
+ descriptor_index ++ ;
387
+ linked_list_node [descriptor_index ] = block_config -> dest_address ;
388
+ descriptor_index ++ ;
389
+
390
+ if (i < config -> block_count - 1 ) {
391
+ registers_update |= LL_DMA_UPDATE_CSAR | LL_DMA_UPDATE_CDAR | LL_DMA_UPDATE_CLLR ;
392
+ block_config = block_config -> next_block ;
393
+ next_desc_addr = (uint32_t )& linked_list_node [descriptor_index + 1 ];
394
+ } else if (reload ) {
395
+ LOG_DBG ("Last descriptor %d for channel %d, linking to first" , i , channel );
396
+ registers_update |= LL_DMA_UPDATE_CSAR | LL_DMA_UPDATE_CDAR | LL_DMA_UPDATE_CLLR ;
397
+ next_desc_addr = base_addr ;
398
+ } else {
399
+ LOG_DBG ("Last descriptor %d for channel %d, no link" , i , channel );
400
+ registers_update = 0 ;
401
+ next_desc = 0 ;
402
+ }
403
+
404
+ if (next_desc != 0 ) {
405
+ addr_offset = next_desc_addr & GENMASK (15 , 2 );
406
+ registers_update |= addr_offset ;
407
+ }
408
+
409
+ linked_list_node [descriptor_index ] = registers_update ;
410
+ descriptor_index ++ ;
411
+
412
+ if (i == 0 ) {
413
+ LL_DMA_ConfigLinkUpdate (dma , channel , registers_update , addr_offset );
414
+ }
415
+ }
416
+
417
+ LL_DMA_EnableIT_HT (dma , channel );
370
418
}
371
419
372
420
/* DMAx CCR Configuration */
@@ -695,7 +743,7 @@ static DEVICE_API(dma, dma_funcs) = {
695
743
static volatile uint32_t dma_stm32_linked_list_buffer##index \
696
744
[CONFIG_DMA_STM32_NUM_DESCRIPTORS_PER_CHANNEL * \
697
745
DT_INST_PROP_OR(index, dma_channels, \
698
- DT_NUM_IRQS(DT_DRV_INST(index)))]__nocache_noinit ; \
746
+ DT_NUM_IRQS(DT_DRV_INST(index)))]__nocache ; \
699
747
\
700
748
const struct dma_stm32_config dma_stm32_config_##index = {COND_CODE_1(DT_NODE_HAS_PROP(__node, clocks), \
701
749
(.pclken = { .bus = __bus, \
0 commit comments