@@ -16,9 +16,8 @@ LOG_MODULE_REGISTER(dma_stm32_v3, CONFIG_DMA_LOG_LEVEL);
16
16
#define DT_DRV_COMPAT st_stm32_dma_v3
17
17
#define DMA_STM32_MAX_DATA_ITEMS 0xffff
18
18
19
- /* Since at this point we only support cyclic mode , we only need 3 descriptors
20
- * at most to update the source and destinantion addresses and the update
21
- * registers. TODO: Raise this number for larger linked lists.
19
+ /* Since the descriptors pool is allocated statically, we define the number of
20
+ * descriptors per channel to be used for linked list trasnfers.
22
21
*/
23
22
#define DMA_STM32_NUM_DESCRIPTORS_PER_CHANNEL 24
24
23
#define POLLING_TIMEOUT_US (10 * USEC_PER_MSEC)
@@ -238,8 +237,8 @@ static int dma_stm32_disable_channel(DMA_TypeDef *dma, uint32_t channel)
238
237
static int dma_stm32_validate_transfer_sizes (struct dma_config * config )
239
238
{
240
239
if (config -> head_block -> block_size > DMA_STM32_MAX_DATA_ITEMS ) {
241
- LOG_ERR ("Data size exceeds the maximum limit: %d>%d" , config -> head_block -> block_size ,
242
- DMA_STM32_MAX_DATA_ITEMS );
240
+ LOG_ERR ("Data size exceeds the maximum limit: %d>%d" ,
241
+ config -> head_block -> block_size , DMA_STM32_MAX_DATA_ITEMS );
243
242
return - EINVAL ;
244
243
}
245
244
@@ -270,11 +269,69 @@ static int dma_stm32_validate_transfer_sizes(struct dma_config *config)
270
269
return 0 ;
271
270
}
272
271
272
+ static void dma_stm32_configure_linked_list (uint32_t id , struct dma_config * config ,
273
+ uint32_t * linked_list_node , DMA_TypeDef * dma )
274
+ {
275
+ uint32_t next_desc = 1 ;
276
+ struct dma_block_config * block_config ;
277
+ uint32_t registers_update = 0 ;
278
+ uint32_t addr_offset = 0 ;
279
+ uint32_t descriptor_index = 0 ;
280
+ uint32_t base_addr = 0 ;
281
+ uint32_t next_desc_addr = 0 ;
282
+ uint32_t channel ;
283
+
284
+ block_config = config -> head_block ;
285
+ base_addr = (uint32_t )& linked_list_node [descriptor_index ];
286
+
287
+ channel = dma_stm32_id_to_channel (id );
288
+ LL_DMA_SetLinkedListBaseAddr (dma , channel , base_addr );
289
+
290
+ for (int i = 0 ; i < config -> block_count ; i ++ ) {
291
+ registers_update = 0 ;
292
+ LOG_DBG ("Configuring block descriptor %d for channel %d" , i , channel );
293
+
294
+ linked_list_node [descriptor_index ] = block_config -> source_address ;
295
+ descriptor_index ++ ;
296
+ linked_list_node [descriptor_index ] = block_config -> dest_address ;
297
+ descriptor_index ++ ;
298
+
299
+ if (i < config -> block_count - 1 ) {
300
+ registers_update |=
301
+ LL_DMA_UPDATE_CSAR | LL_DMA_UPDATE_CDAR | LL_DMA_UPDATE_CLLR ;
302
+ block_config = block_config -> next_block ;
303
+ next_desc_addr = (uint32_t )& linked_list_node [descriptor_index + 1 ];
304
+ } else if (config -> cyclic ) {
305
+ LOG_DBG ("Last descriptor %d for channel %d, linking to first" , i , channel );
306
+ registers_update |=
307
+ LL_DMA_UPDATE_CSAR | LL_DMA_UPDATE_CDAR | LL_DMA_UPDATE_CLLR ;
308
+ next_desc_addr = base_addr ;
309
+ } else {
310
+ LOG_DBG ("Last descriptor %d for channel %d, no link" , i , channel );
311
+ registers_update = 0 ;
312
+ next_desc = 0 ;
313
+ }
314
+
315
+ if (next_desc != 0 ) {
316
+ addr_offset = next_desc_addr & GENMASK (15 , 2 );
317
+ registers_update |= addr_offset ;
318
+ }
319
+
320
+ linked_list_node [descriptor_index ] = registers_update ;
321
+ descriptor_index ++ ;
322
+
323
+ if (i == 0 ) {
324
+ LL_DMA_ConfigLinkUpdate (dma , channel , registers_update , addr_offset );
325
+ }
326
+ }
327
+
328
+ LL_DMA_EnableIT_HT (dma , channel );
329
+ }
330
+
273
331
int dma_stm32_configure (const struct device * dev , uint32_t id , struct dma_config * config )
274
332
{
275
333
const struct dma_stm32_config * dev_config ;
276
334
struct dma_stm32_channel * channel_config ;
277
- struct dma_block_config * block_config ;
278
335
struct dma_stm32_descriptor hwdesc ;
279
336
uint32_t channel ;
280
337
DMA_TypeDef * dma ;
@@ -318,15 +375,13 @@ int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config
318
375
return ret ;
319
376
}
320
377
321
- block_config = config -> head_block ;
322
-
323
- ret = dma_stm32_get_src_inc_mode (block_config -> source_addr_adj , & src_inc_mode );
378
+ ret = dma_stm32_get_src_inc_mode (config -> head_block -> source_addr_adj , & src_inc_mode );
324
379
if (ret < 0 ) {
325
380
return ret ;
326
381
}
327
382
LOG_DBG ("Source address increment: %d" , src_inc_mode );
328
383
329
- ret = dma_stm32_get_dest_inc_mode (block_config -> dest_addr_adj , & dest_inc_mode );
384
+ ret = dma_stm32_get_dest_inc_mode (config -> head_block -> dest_addr_adj , & dest_inc_mode );
330
385
if (ret < 0 ) {
331
386
return ret ;
332
387
}
@@ -358,8 +413,12 @@ int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config
358
413
if (config -> block_count == 1 && !config -> cyclic ) {
359
414
ccr |= LL_DMA_LSM_1LINK_EXECUTION ;
360
415
} else {
361
- LOG_ERR ("Only single block transfers are supported for now" );
362
- return - ENOTSUP ;
416
+ ccr |= LL_DMA_LSM_FULL_EXECUTION ;
417
+
418
+ dma_stm32_configure_linked_list (id , config ,
419
+ dev_config -> linked_list_buffer +
420
+ id * DMA_STM32_NUM_DESCRIPTORS_PER_CHANNEL ,
421
+ dma );
363
422
}
364
423
365
424
/* TODO: support port specifier from configuration */
0 commit comments