@@ -16,9 +16,8 @@ LOG_MODULE_REGISTER(dma_stm32_v3, CONFIG_DMA_LOG_LEVEL);
16
16
#define DT_DRV_COMPAT st_stm32_dma_v3
17
17
#define DMA_STM32_MAX_DATA_ITEMS 0xffff
18
18
19
- /* Since at this point we only support cyclic mode , we only need 3 descriptors
20
- * at most to update the source and destination addresses and the update
21
- * registers. TODO: Raise this number for larger linked lists.
19
+ /* Since the descriptors pool is allocated statically, we define the number of
20
+ * descriptors per channel to be used for linked list trasnfers.
22
21
*/
23
22
#define DMA_STM32_NUM_DESCRIPTORS_PER_CHANNEL 24
24
23
#define POLLING_TIMEOUT_US (10 * USEC_PER_MSEC)
@@ -40,6 +39,7 @@ struct dma_stm32_channel {
40
39
uint32_t src_size ;
41
40
uint32_t dst_size ;
42
41
void * user_data ;
42
+ uint32_t complete_callback_en ;
43
43
dma_callback_t dma_callback ;
44
44
bool cyclic ;
45
45
int block_count ;
@@ -240,8 +240,8 @@ static int dma_stm32_disable_channel(DMA_TypeDef *dma, uint32_t channel)
240
240
static int dma_stm32_validate_transfer_sizes (struct dma_config * config )
241
241
{
242
242
if (config -> head_block -> block_size > DMA_STM32_MAX_DATA_ITEMS ) {
243
- LOG_ERR ("Data size exceeds the maximum limit: %d>%d" , config -> head_block -> block_size ,
244
- DMA_STM32_MAX_DATA_ITEMS );
243
+ LOG_ERR ("Data size exceeds the maximum limit: %d>%d" ,
244
+ config -> head_block -> block_size , DMA_STM32_MAX_DATA_ITEMS );
245
245
return - EINVAL ;
246
246
}
247
247
@@ -272,11 +272,65 @@ static int dma_stm32_validate_transfer_sizes(struct dma_config *config)
272
272
return 0 ;
273
273
}
274
274
275
+ static void dma_stm32_configure_linked_list (uint32_t channel , struct dma_config * config ,
276
+ uint32_t * linked_list_node , DMA_TypeDef * dma )
277
+ {
278
+ uint32_t next_desc = 1 ;
279
+ struct dma_block_config * block_config ;
280
+ uint32_t registers_update = 0 ;
281
+ uint32_t addr_offset = 0 ;
282
+ uint32_t descriptor_index = 0 ;
283
+ uint32_t base_addr = 0 ;
284
+ uint32_t next_desc_addr = 0 ;
285
+
286
+ block_config = config -> head_block ;
287
+ base_addr = (uint32_t )& linked_list_node [descriptor_index ];
288
+
289
+ LL_DMA_SetLinkedListBaseAddr (dma , channel , base_addr );
290
+
291
+ for (uint32_t i = 0 ; i < config -> block_count ; i ++ ) {
292
+ registers_update = 0 ;
293
+ LOG_DBG ("Configuring block descriptor %d for channel %d" , i , channel );
294
+
295
+ linked_list_node [descriptor_index ] = block_config -> source_address ;
296
+ descriptor_index ++ ;
297
+ linked_list_node [descriptor_index ] = block_config -> dest_address ;
298
+ descriptor_index ++ ;
299
+
300
+ if (i < config -> block_count - 1 ) {
301
+ registers_update |=
302
+ LL_DMA_UPDATE_CSAR | LL_DMA_UPDATE_CDAR | LL_DMA_UPDATE_CLLR ;
303
+ block_config = block_config -> next_block ;
304
+ next_desc_addr = (uint32_t )& linked_list_node [descriptor_index + 1 ];
305
+ } else if (config -> cyclic ) {
306
+ LOG_DBG ("Last descriptor %d for channel %d, linking to first" , i , channel );
307
+ registers_update |=
308
+ LL_DMA_UPDATE_CSAR | LL_DMA_UPDATE_CDAR | LL_DMA_UPDATE_CLLR ;
309
+ next_desc_addr = base_addr ;
310
+ } else {
311
+ LOG_DBG ("Last descriptor %d for channel %d, no link" , i , channel );
312
+ registers_update = 0 ;
313
+ next_desc = 0 ;
314
+ }
315
+
316
+ if (next_desc != 0 ) {
317
+ addr_offset = next_desc_addr & GENMASK (15 , 2 );
318
+ registers_update |= addr_offset ;
319
+ }
320
+
321
+ linked_list_node [descriptor_index ] = registers_update ;
322
+ descriptor_index ++ ;
323
+
324
+ if (i == 0 ) {
325
+ LL_DMA_ConfigLinkUpdate (dma , channel , registers_update , addr_offset );
326
+ }
327
+ }
328
+ }
329
+
275
330
int dma_stm32_configure (const struct device * dev , uint32_t id , struct dma_config * config )
276
331
{
277
332
const struct dma_stm32_config * dev_config ;
278
333
struct dma_stm32_channel * channel_config ;
279
- struct dma_block_config * block_config ;
280
334
struct dma_stm32_descriptor hwdesc ;
281
335
uint32_t channel ;
282
336
DMA_TypeDef * dma ;
@@ -326,15 +380,13 @@ int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config
326
380
return ret ;
327
381
}
328
382
329
- block_config = config -> head_block ;
330
-
331
- ret = dma_stm32_get_src_inc_mode (block_config -> source_addr_adj , & src_inc_mode );
383
+ ret = dma_stm32_get_src_inc_mode (config -> head_block -> source_addr_adj , & src_inc_mode );
332
384
if (ret < 0 ) {
333
385
return ret ;
334
386
}
335
387
LOG_DBG ("Source address increment: %d" , src_inc_mode );
336
388
337
- ret = dma_stm32_get_dest_inc_mode (block_config -> dest_addr_adj , & dest_inc_mode );
389
+ ret = dma_stm32_get_dest_inc_mode (config -> head_block -> dest_addr_adj , & dest_inc_mode );
338
390
if (ret < 0 ) {
339
391
return ret ;
340
392
}
@@ -360,6 +412,7 @@ int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config
360
412
channel_config -> src_size = config -> source_data_size ;
361
413
channel_config -> dst_size = config -> dest_data_size ;
362
414
channel_config -> cyclic = config -> cyclic ;
415
+ channel_config -> complete_callback_en = config -> complete_callback_en ;
363
416
364
417
dma_stm32_disable_it (dma , channel );
365
418
@@ -371,8 +424,12 @@ int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config
371
424
if (!linked_list_needed ) {
372
425
ccr |= LL_DMA_LSM_1LINK_EXECUTION ;
373
426
} else {
374
- LOG_ERR ("Only single block transfers are supported for now" );
375
- return - ENOTSUP ;
427
+ ccr |= LL_DMA_LSM_FULL_EXECUTION ;
428
+
429
+ dma_stm32_configure_linked_list (channel , config ,
430
+ dev_config -> linked_list_buffer +
431
+ id * DMA_STM32_NUM_DESCRIPTORS_PER_CHANNEL ,
432
+ dma );
376
433
}
377
434
378
435
/* TODO: support port specifier from configuration */
@@ -390,7 +447,19 @@ int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config
390
447
config -> dest_burst_length );
391
448
392
449
hwdesc .channel_tr2 = ll_direction ;
393
- hwdesc .channel_tr2 |= LL_DMA_TCEM_BLK_TRANSFER ;
450
+
451
+ if (linked_list_needed ) {
452
+ if (channel_config -> complete_callback_en == 1 ) {
453
+ hwdesc .channel_tr2 |= LL_DMA_TCEM_EACH_LLITEM_TRANSFER ;
454
+ LOG_DBG ("Enabling TC callback at the end of each linked list item" );
455
+ } else {
456
+ hwdesc .channel_tr2 |= LL_DMA_TCEM_LAST_LLITEM_TRANSFER ;
457
+ LOG_DBG ("Enabling TC callback at the end of last linked list item" );
458
+ }
459
+ } else {
460
+ hwdesc .channel_tr2 |= LL_DMA_TCEM_BLK_TRANSFER ;
461
+ LOG_DBG ("Enabling TC callback at the end of the block" );
462
+ }
394
463
395
464
LL_DMA_ConfigChannelTransfer (dma , channel , hwdesc .channel_tr2 );
396
465
@@ -691,16 +760,20 @@ static void dma_stm32_irq_handler(const struct device *dev, uint32_t id)
691
760
channel_config -> dma_callback (dev , channel_config -> user_data , callback_arg ,
692
761
DMA_STATUS_BLOCK );
693
762
} else if (dma_stm32_is_tc_irq_active (dma , channel )) {
694
- if (!channel_config -> cyclic ) {
695
- channel_config -> busy = false;
696
- }
697
-
698
763
if (!channel_config -> hal_override ) {
699
764
LL_DMA_ClearFlag_TC (dma , channel );
700
765
}
701
766
702
- channel_config -> dma_callback (dev , channel_config -> user_data , callback_arg ,
703
- DMA_STATUS_COMPLETE );
767
+ if (channel_config -> complete_callback_en == 1 ) {
768
+ channel_config -> dma_callback (dev , channel_config -> user_data , callback_arg ,
769
+ DMA_STATUS_BLOCK );
770
+ } else {
771
+ if (!channel_config -> cyclic ) {
772
+ channel_config -> busy = false;
773
+ }
774
+ channel_config -> dma_callback (dev , channel_config -> user_data , callback_arg ,
775
+ DMA_STATUS_COMPLETE );
776
+ }
704
777
} else {
705
778
LOG_ERR ("Transfer Error." );
706
779
channel_config -> busy = false;
0 commit comments