@@ -17,9 +17,8 @@ LOG_MODULE_REGISTER(dma_stm32_v3, CONFIG_DMA_LOG_LEVEL);
17
17
#define DT_DRV_COMPAT st_stm32_dma_v3
18
18
#define DMA_STM32_MAX_DATA_ITEMS 0xffff
19
19
20
- /* Since at this point we only support cyclic mode , we only need 3 descriptors
21
- * at most to update the source and destinantion addresses and the update
22
- * registers. TODO: Raise this number for larger linked lists.
20
+ /* Since the descriptors pool is allocated statically, we define the number of
21
+ * descriptors per channel to be used for linked list trasnfers.
23
22
*/
24
23
#define DMA_STM32_NUM_DESCRIPTORS_PER_CHANNEL 24
25
24
#define POLLING_TIMEOUT_US 10*USEC_PER_MSEC
@@ -235,8 +234,8 @@ static int dma_stm32_validate_arguments(const struct device *dev, struct dma_con
235
234
static int dma_stm32_validate_transfer_sizes (struct dma_config * config )
236
235
{
237
236
if (config -> head_block -> block_size > DMA_STM32_MAX_DATA_ITEMS ) {
238
- LOG_ERR ("Data size exceeds the maximum limit: %d>%d" , config -> head_block -> block_size ,
239
- DMA_STM32_MAX_DATA_ITEMS );
237
+ LOG_ERR ("Data size exceeds the maximum limit: %d>%d" ,
238
+ config -> head_block -> block_size , DMA_STM32_MAX_DATA_ITEMS );
240
239
return - EINVAL ;
241
240
}
242
241
@@ -266,11 +265,71 @@ static int dma_stm32_validate_transfer_sizes(struct dma_config *config)
266
265
return 0 ;
267
266
}
268
267
268
+ static void dma_stm32_configure_linked_list (uint32_t id , struct dma_config * config ,
269
+ volatile uint32_t * linked_list_node ,
270
+ const DMA_TypeDef * dma )
271
+ {
272
+ uint32_t next_desc = 1 ;
273
+ struct dma_block_config * block_config ;
274
+
275
+ uint32_t registers_update = 0 ;
276
+ uint32_t addr_offset = 0 ;
277
+
278
+ uint32_t descriptor_index = 0 ;
279
+ uint32_t base_addr = 0 ;
280
+ uint32_t next_desc_addr = 0 ;
281
+
282
+ uint32_t channel = dma_stm32_id_to_channel (id );
283
+ block_config = config -> head_block ;
284
+ base_addr = (uint32_t )& linked_list_node [descriptor_index ];
285
+
286
+ LL_DMA_SetLinkedListBaseAddr (dma , channel , base_addr );
287
+
288
+ for (int i = 0 ; i < config -> block_count ; i ++ ) {
289
+ registers_update = 0 ;
290
+ LOG_DBG ("Configuring block descriptor %d for channel %d" , i , channel );
291
+
292
+ linked_list_node [descriptor_index ] = block_config -> source_address ;
293
+ descriptor_index ++ ;
294
+ linked_list_node [descriptor_index ] = block_config -> dest_address ;
295
+ descriptor_index ++ ;
296
+
297
+ if (i < config -> block_count - 1 ) {
298
+ registers_update |=
299
+ LL_DMA_UPDATE_CSAR | LL_DMA_UPDATE_CDAR | LL_DMA_UPDATE_CLLR ;
300
+ block_config = block_config -> next_block ;
301
+ next_desc_addr = (uint32_t )& linked_list_node [descriptor_index + 1 ];
302
+ } else if (config -> cyclic ) {
303
+ LOG_DBG ("Last descriptor %d for channel %d, linking to first" , i , channel );
304
+ registers_update |=
305
+ LL_DMA_UPDATE_CSAR | LL_DMA_UPDATE_CDAR | LL_DMA_UPDATE_CLLR ;
306
+ next_desc_addr = base_addr ;
307
+ } else {
308
+ LOG_DBG ("Last descriptor %d for channel %d, no link" , i , channel );
309
+ registers_update = 0 ;
310
+ next_desc = 0 ;
311
+ }
312
+
313
+ if (next_desc != 0 ) {
314
+ addr_offset = next_desc_addr & GENMASK (15 , 2 );
315
+ registers_update |= addr_offset ;
316
+ }
317
+
318
+ linked_list_node [descriptor_index ] = registers_update ;
319
+ descriptor_index ++ ;
320
+
321
+ if (i == 0 ) {
322
+ LL_DMA_ConfigLinkUpdate (dma , channel , registers_update , addr_offset );
323
+ }
324
+ }
325
+
326
+ LL_DMA_EnableIT_HT (dma , channel );
327
+ }
328
+
269
329
int dma_stm32_configure (const struct device * dev , uint32_t id , struct dma_config * config )
270
330
{
271
331
const struct dma_stm32_config * dev_config ;
272
332
struct dma_stm32_channel * channel_config ;
273
- struct dma_block_config * block_config ;
274
333
struct dma_stm32_descriptor hwdesc ;
275
334
uint32_t channel ;
276
335
const DMA_TypeDef * dma ;
@@ -314,15 +373,13 @@ int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config
314
373
return ret ;
315
374
}
316
375
317
- block_config = config -> head_block ;
318
-
319
- ret = dma_stm32_get_src_inc_mode (block_config -> source_addr_adj , & src_inc_mode );
376
+ ret = dma_stm32_get_src_inc_mode (config -> head_block -> source_addr_adj , & src_inc_mode );
320
377
if (ret < 0 ) {
321
378
return ret ;
322
379
}
323
380
LOG_DBG ("Source address increment: %d" , src_inc_mode );
324
381
325
- ret = dma_stm32_get_dest_inc_mode (block_config -> dest_addr_adj , & dest_inc_mode );
382
+ ret = dma_stm32_get_dest_inc_mode (config -> head_block -> dest_addr_adj , & dest_inc_mode );
326
383
if (ret < 0 ) {
327
384
return ret ;
328
385
}
@@ -355,8 +412,12 @@ int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config
355
412
/* DMAx CCR Configuration */
356
413
ccr |= LL_DMA_LSM_1LINK_EXECUTION ;
357
414
} else {
358
- LOG_ERR ("Only single block transfers are supported for now" );
359
- return - ENOTSUP ;
415
+ /* DMAx CCR Configuration */
416
+ ccr |= LL_DMA_LSM_FULL_EXECUTION ;
417
+
418
+ volatile uint32_t * linked_list_node =
419
+ & dev_config -> linked_list_buffer [id * DMA_STM32_NUM_DESCRIPTORS_PER_CHANNEL ];
420
+ dma_stm32_configure_linked_list (id , config , linked_list_node , dma );
360
421
}
361
422
362
423
/* DMAx CCR Configuration */
0 commit comments