@@ -89,9 +89,12 @@ static int dma_silabs_get_blocksize(uint32_t src_blen, uint32_t dst_blen, uint32
89
89
90
90
static int dma_silabs_block_to_descriptor (struct dma_config * config ,
91
91
struct dma_silabs_channel * chan_conf ,
92
- struct dma_block_config * block , LDMA_Descriptor_t * desc )
92
+ struct dma_block_config * block , LDMA_Descriptor_t * desc ,
93
+ int * offset )
93
94
{
94
- int ret , src_size , xfer_count ;
95
+ int ret , src_size , xfer_count , loc_offset , mod , rem_bsize ;
96
+
97
+ loc_offset = * offset ;
95
98
96
99
if (block -> dest_scatter_count || block -> source_gather_count ||
97
100
block -> source_gather_interval || block -> dest_scatter_interval ||
@@ -123,17 +126,27 @@ static int dma_silabs_block_to_descriptor(struct dma_config *config,
123
126
src_size = config -> source_data_size ;
124
127
desc -> xfer .size = LOG2 (src_size );
125
128
126
- if (block -> block_size % config -> source_data_size ) {
127
- xfer_count = block -> block_size / config -> source_data_size ;
129
+ if (loc_offset ) {
130
+ rem_bsize = block -> block_size - loc_offset * config -> source_data_size ;
128
131
} else {
129
- xfer_count = block -> block_size / config -> source_data_size - 1 ;
132
+ rem_bsize = block -> block_size ;
130
133
}
131
134
135
+ xfer_count = rem_bsize / config -> source_data_size ;
136
+ mod = rem_bsize % config -> source_data_size ;
137
+
132
138
if (xfer_count > LDMA_DESCRIPTOR_MAX_XFER_SIZE ) {
133
- return - ENOTSUP ;
134
- }
139
+ desc -> xfer . xferCnt = LDMA_DESCRIPTOR_MAX_XFER_SIZE - 1 ;
140
+ * offset = loc_offset + LDMA_DESCRIPTOR_MAX_XFER_SIZE ;
135
141
136
- desc -> xfer .xferCnt = xfer_count ;
142
+ } else {
143
+ if (!mod || xfer_count == LDMA_DESCRIPTOR_MAX_XFER_SIZE ) {
144
+ xfer_count -- ;
145
+ }
146
+
147
+ desc -> xfer .xferCnt = xfer_count ;
148
+ * offset = 0 ;
149
+ }
137
150
138
151
/* Warning : High LDMA blockSize (high burst) mean a large transfer
139
152
* without LDMA controller re-arbitration.
@@ -195,8 +208,8 @@ static int dma_silabs_block_to_descriptor(struct dma_config *config,
195
208
LOG_WRN ("dest_buffer address is null." );
196
209
}
197
210
198
- desc -> xfer .srcAddr = block -> source_address ;
199
- desc -> xfer .dstAddr = block -> dest_address ;
211
+ desc -> xfer .srcAddr = block -> source_address + loc_offset * config -> source_data_size ;
212
+ desc -> xfer .dstAddr = block -> dest_address + loc_offset * config -> dest_data_size ;
200
213
201
214
return 0 ;
202
215
}
@@ -229,21 +242,22 @@ static int dma_silabs_configure_descriptor(struct dma_config *config, struct dma
229
242
struct dma_block_config * head_block = config -> head_block ;
230
243
struct dma_block_config * block = config -> head_block ;
231
244
LDMA_Descriptor_t * desc , * prev_desc ;
232
- int ret ;
245
+ int ret , offset ;
233
246
234
247
/* Descriptors configuration
235
248
* block refers to user configured block (dma_block_config structure from dma.h)
236
249
* desc refers to driver configured block (LDMA_Descriptor_t structure from silabs
237
250
* hal)
238
251
*/
239
252
prev_desc = NULL ;
253
+ offset = 0 ;
240
254
while (block ) {
241
255
ret = sys_mem_blocks_alloc (data -> dma_desc_pool , 1 , (void * * )& desc );
242
256
if (ret ) {
243
257
goto err ;
244
258
}
245
259
246
- ret = dma_silabs_block_to_descriptor (config , chan_conf , block , desc );
260
+ ret = dma_silabs_block_to_descriptor (config , chan_conf , block , desc , & offset );
247
261
if (ret ) {
248
262
goto err ;
249
263
}
@@ -257,13 +271,15 @@ static int dma_silabs_configure_descriptor(struct dma_config *config, struct dma
257
271
}
258
272
259
273
prev_desc = desc ;
260
- block = block -> next_block ;
261
- if (block == head_block ) {
262
- block = NULL ;
263
- prev_desc -> xfer .linkAddr =
264
- LDMA_DESCRIPTOR_LINKABS_ADDR_TO_LINKADDR (chan_conf -> desc );
265
- prev_desc -> xfer .linkMode = ldmaLinkModeAbs ;
266
- prev_desc -> xfer .link = 1 ;
274
+ if (!offset ) {
275
+ block = block -> next_block ;
276
+ if (block == head_block ) {
277
+ block = NULL ;
278
+ prev_desc -> xfer .linkAddr =
279
+ LDMA_DESCRIPTOR_LINKABS_ADDR_TO_LINKADDR (chan_conf -> desc );
280
+ prev_desc -> xfer .linkMode = ldmaLinkModeAbs ;
281
+ prev_desc -> xfer .link = 1 ;
282
+ }
267
283
}
268
284
}
269
285
@@ -525,7 +541,9 @@ int silabs_ldma_append_block(const struct device *dev, uint32_t channel, struct
525
541
struct dma_block_config * block_config = config -> head_block ;
526
542
LDMA_Descriptor_t * desc = data -> dma_chan_table [channel ].desc ;
527
543
unsigned int key ;
528
- int ret ;
544
+ int ret , offset ;
545
+
546
+ offset = 0 ;
529
547
530
548
__ASSERT (!((uintptr_t )desc & ~_LDMA_CH_LINK_LINKADDR_MASK ),
531
549
"DMA Descriptor is not 32 bits aligned" );
@@ -552,9 +570,15 @@ int silabs_ldma_append_block(const struct device *dev, uint32_t channel, struct
552
570
return - EINVAL ;
553
571
}
554
572
555
- ret = dma_silabs_block_to_descriptor (config , chan_conf , block_config , desc );
573
+ ret = dma_silabs_block_to_descriptor (config , chan_conf , block_config , desc , & offset );
556
574
if (ret ) {
557
575
return ret ;
576
+ } else if (offset ) {
577
+ /* If the offset is not 0, it means that the block size is larger than the transfer
578
+ * capacity of a single hardware LDMA descriptor. It is not supported with the
579
+ * append function.
580
+ */
581
+ return - EINVAL ;
558
582
}
559
583
560
584
key = irq_lock ();
0 commit comments