|
5 | 5 | */ |
6 | 6 |
|
7 | 7 | #include <inttypes.h> |
| 8 | +#include <sys/param.h> // for MIN/MAX |
8 | 9 | #include "esp_private/sdmmc_common.h" |
9 | 10 |
|
| 11 | +// the maximum size in blocks of the chunks a SDMMC write/read will be split into |
| 12 | +#define MAX_NUM_BLOCKS_PER_MULTI_BLOCK_RW (16u) |
| 13 | + |
10 | 14 | static const char* TAG = "sdmmc_cmd"; |
11 | 15 |
|
12 | 16 |
|
@@ -462,27 +466,30 @@ esp_err_t sdmmc_write_sectors(sdmmc_card_t* card, const void* src, |
462 | 466 | err = sdmmc_write_sectors_dma(card, src, start_block, block_count, block_size * block_count); |
463 | 467 | } else { |
464 | 468 | // SDMMC peripheral needs DMA-capable buffers. Split the write into |
465 | | - // separate single block writes, if needed, and allocate a temporary |
| 469 | + // separate (multi) block writes, if needed, and allocate a temporary |
466 | 470 | // DMA-capable buffer. |
| 471 | + size_t blocks_per_write = MIN(MAX_NUM_BLOCKS_PER_MULTI_BLOCK_RW, block_count); |
467 | 472 | void *tmp_buf = NULL; |
468 | 473 | size_t actual_size = 0; |
469 | 474 | // We don't want to force the allocation into SPIRAM, the allocator |
470 | 475 | // will decide based on the buffer size and memory availability. |
471 | | - tmp_buf = heap_caps_malloc(block_size, MALLOC_CAP_DMA); |
| 476 | + tmp_buf = heap_caps_malloc(block_size * blocks_per_write, MALLOC_CAP_DMA); |
472 | 477 | if (!tmp_buf) { |
473 | 478 | ESP_LOGE(TAG, "%s: not enough mem, err=0x%x", __func__, ESP_ERR_NO_MEM); |
474 | 479 | return ESP_ERR_NO_MEM; |
475 | 480 | } |
476 | 481 | actual_size = heap_caps_get_allocated_size(tmp_buf); |
477 | 482 |
|
478 | 483 | const uint8_t* cur_src = (const uint8_t*) src; |
479 | | - for (size_t i = 0; i < block_count; ++i) { |
480 | | - memcpy(tmp_buf, cur_src, block_size); |
481 | | - cur_src += block_size; |
482 | | - err = sdmmc_write_sectors_dma(card, tmp_buf, start_block + i, 1, actual_size); |
| 484 | + for (size_t i = 0; i < block_count; i += blocks_per_write) { |
| 485 | + // make sure not to write more than the remaining blocks, i.e. block_count - i |
| 486 | + blocks_per_write = MIN(blocks_per_write, (block_count - i)); |
| 487 | + memcpy(tmp_buf, cur_src, block_size * blocks_per_write); |
| 488 | + cur_src += block_size * blocks_per_write; |
| 489 | + err = sdmmc_write_sectors_dma(card, tmp_buf, start_block + i, blocks_per_write, actual_size); |
483 | 490 | if (err != ESP_OK) { |
484 | | - ESP_LOGD(TAG, "%s: error 0x%x writing block %d+%d", |
485 | | - __func__, err, start_block, i); |
| 491 | + ESP_LOGD(TAG, "%s: error 0x%x writing blocks %zu+[%zu..%zu]", |
| 492 | + __func__, err, start_block, i, i + blocks_per_write - 1); |
486 | 493 | break; |
487 | 494 | } |
488 | 495 | } |
@@ -600,27 +607,32 @@ esp_err_t sdmmc_read_sectors(sdmmc_card_t* card, void* dst, |
600 | 607 | err = sdmmc_read_sectors_dma(card, dst, start_block, block_count, block_size * block_count); |
601 | 608 | } else { |
602 | 609 | // SDMMC peripheral needs DMA-capable buffers. Split the read into |
603 | | - // separate single block reads, if needed, and allocate a temporary |
| 610 | + // separate (multi) block reads, if needed, and allocate a temporary |
604 | 611 | // DMA-capable buffer. |
| 612 | + size_t blocks_per_read = MIN(MAX_NUM_BLOCKS_PER_MULTI_BLOCK_RW, block_count); |
605 | 613 | void *tmp_buf = NULL; |
606 | 614 | size_t actual_size = 0; |
607 | | - tmp_buf = heap_caps_malloc(block_size, MALLOC_CAP_DMA); |
| 615 | + // We don't want to force the allocation into SPIRAM, the allocator |
| 616 | + // will decide based on the buffer size and memory availability. |
| 617 | + tmp_buf = heap_caps_malloc(block_size * blocks_per_read, MALLOC_CAP_DMA); |
608 | 618 | if (!tmp_buf) { |
609 | 619 | ESP_LOGE(TAG, "%s: not enough mem, err=0x%x", __func__, ESP_ERR_NO_MEM); |
610 | 620 | return ESP_ERR_NO_MEM; |
611 | 621 | } |
612 | 622 | actual_size = heap_caps_get_allocated_size(tmp_buf); |
613 | 623 |
|
614 | 624 | uint8_t* cur_dst = (uint8_t*) dst; |
615 | | - for (size_t i = 0; i < block_count; ++i) { |
616 | | - err = sdmmc_read_sectors_dma(card, tmp_buf, start_block + i, 1, actual_size); |
| 625 | + for (size_t i = 0; i < block_count; i += blocks_per_read) { |
| 626 | + // make sure not to read more than the remaining blocks, i.e. block_count - i |
| 627 | + blocks_per_read = MIN(blocks_per_read, (block_count - i)); |
| 628 | + err = sdmmc_read_sectors_dma(card, tmp_buf, start_block + i, blocks_per_read, actual_size); |
617 | 629 | if (err != ESP_OK) { |
618 | | - ESP_LOGD(TAG, "%s: error 0x%x writing block %d+%d", |
619 | | - __func__, err, start_block, i); |
| 630 | + ESP_LOGD(TAG, "%s: error 0x%x reading blocks %zu+[%zu..%zu]", |
| 631 | + __func__, err, start_block, i, i + blocks_per_read - 1); |
620 | 632 | break; |
621 | 633 | } |
622 | | - memcpy(cur_dst, tmp_buf, block_size); |
623 | | - cur_dst += block_size; |
| 634 | + memcpy(cur_dst, tmp_buf, block_size * blocks_per_read); |
| 635 | + cur_dst += block_size * blocks_per_read; |
624 | 636 | } |
625 | 637 | free(tmp_buf); |
626 | 638 | } |
|
0 commit comments