diff --git a/.config b/.config
index 8003bc4..a01e8bd 100644
--- a/.config
+++ b/.config
@@ -128,7 +128,7 @@ CONFIG_DFS_USING_WORKDIR=y
CONFIG_DFS_FD_MAX=16
CONFIG_RT_USING_DFS_V1=y
# CONFIG_RT_USING_DFS_V2 is not set
-CONFIG_DFS_FILESYSTEMS_MAX=4
+CONFIG_DFS_FILESYSTEMS_MAX=6
CONFIG_DFS_FILESYSTEM_TYPES_MAX=4
CONFIG_RT_USING_DFS_ELMFAT=y
@@ -164,7 +164,12 @@ CONFIG_RT_USING_DFS_TMPFS=y
# CONFIG_RT_USING_DFS_MQUEUE is not set
# end of DFS: device virtual file system
-# CONFIG_RT_USING_FAL is not set
+CONFIG_RT_USING_FAL=y
+# CONFIG_FAL_DEBUG_CONFIG is not set
+CONFIG_FAL_DEBUG=0
+CONFIG_FAL_PART_HAS_TABLE_CFG=y
+CONFIG_FAL_USING_SFUD_PORT=y
+CONFIG_FAL_USING_NOR_FLASH_DEV_NAME="W25Q128"
#
# Device Drivers
@@ -188,7 +193,7 @@ CONFIG_RT_SERIAL_RB_BUFSZ=512
# CONFIG_RT_USING_ZERO is not set
# CONFIG_RT_USING_RANDOM is not set
# CONFIG_RT_USING_PWM is not set
-# CONFIG_RT_USING_MTD_NOR is not set
+CONFIG_RT_USING_MTD_NOR=y
# CONFIG_RT_USING_MTD_NAND is not set
# CONFIG_RT_USING_PM is not set
CONFIG_RT_USING_RTC=y
@@ -205,7 +210,12 @@ CONFIG_RT_USING_SPI=y
# CONFIG_RT_USING_SPI_BITOPS is not set
CONFIG_RT_USING_QSPI=y
CONFIG_RT_USING_SPI_MSD=y
-# CONFIG_RT_USING_SFUD is not set
+CONFIG_RT_USING_SFUD=y
+CONFIG_RT_SFUD_USING_SFDP=y
+CONFIG_RT_SFUD_USING_FLASH_INFO_TABLE=y
+CONFIG_RT_SFUD_USING_QSPI=y
+CONFIG_RT_SFUD_SPI_MAX_HZ=50000000
+# CONFIG_RT_DEBUG_SFUD is not set
# CONFIG_RT_USING_ENC28J60 is not set
# CONFIG_RT_USING_SPI_WIFI is not set
# CONFIG_RT_USING_WDT is not set
@@ -660,7 +670,31 @@ CONFIG_PKG_SQLITE_DB_NAME_MAX_LEN=64
CONFIG_PKG_USING_SQLITE_V3193=y
# CONFIG_PKG_USING_RTI is not set
# CONFIG_PKG_USING_DFS_YAFFS is not set
-# CONFIG_PKG_USING_LITTLEFS is not set
+CONFIG_PKG_USING_LITTLEFS=y
+CONFIG_PKG_LITTLEFS_PATH="/packages/system/littlefs"
+# CONFIG_PKG_USING_LITTLEFS_V090 is not set
+# CONFIG_PKG_USING_LITTLEFS_V170 is not set
+# CONFIG_PKG_USING_LITTLEFS_V172 is not set
+# CONFIG_PKG_USING_LITTLEFS_V201 is not set
+# CONFIG_PKG_USING_LITTLEFS_V205 is not set
+# CONFIG_PKG_USING_LITTLEFS_V214 is not set
+# CONFIG_PKG_USING_LITTLEFS_V220 is not set
+# CONFIG_PKG_USING_LITTLEFS_V221 is not set
+# CONFIG_PKG_USING_LITTLEFS_V230 is not set
+# CONFIG_PKG_USING_LITTLEFS_V250 is not set
+# CONFIG_PKG_USING_LITTLEFS_V293 is not set
+CONFIG_PKG_USING_LITTLEFS_V2112=y
+# CONFIG_PKG_USING_LITTLEFS_LATEST_VERSION is not set
+CONFIG_LFS_READ_SIZE=256
+CONFIG_LFS_PROG_SIZE=256
+CONFIG_LFS_BLOCK_SIZE=4096
+CONFIG_LFS_CACHE_SIZE=256
+CONFIG_LFS_BLOCK_CYCLES=500
+# CONFIG_DFS_LFS_READONLY is not set
+CONFIG_LFS_THREADSAFE=y
+CONFIG_LFS_LOOKAHEAD_MAX=128
+CONFIG_RT_DEF_LFS_DRIVERS=2
+CONFIG_PKG_LITTLEFS_VER="v2.11.2"
# CONFIG_PKG_USING_DFS_JFFS2 is not set
# CONFIG_PKG_USING_DFS_UFFS is not set
# CONFIG_PKG_USING_LWEXT4 is not set
diff --git a/.cproject b/.cproject
index 63df00c..e42c2d4 100644
--- a/.cproject
+++ b/.cproject
@@ -1,248 +1,255 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.project b/.project
index f059ea4..350c330 100644
--- a/.project
+++ b/.project
@@ -1,6 +1,6 @@
- 828F
+ project
diff --git a/.settings/.rtmenus b/.settings/.rtmenus
index 27d9f46..b5c2f3e 100644
Binary files a/.settings/.rtmenus and b/.settings/.rtmenus differ
diff --git a/.settings/language.settings.xml b/.settings/language.settings.xml
index fa957bb..2071896 100644
--- a/.settings/language.settings.xml
+++ b/.settings/language.settings.xml
@@ -5,7 +5,7 @@
-
+
diff --git a/applications/QSPI_elmfatfs.c b/applications/QSPI_elmfatfs.c
new file mode 100644
index 0000000..1844323
--- /dev/null
+++ b/applications/QSPI_elmfatfs.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2006-2021, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date Author Notes
+ * 2025-11-22 Administrator the first version
+ *//*
+ * Copyright (c) 2006-2021, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date Author Notes
+ * 2025-11-22 Administrator the first version
+ */
+#include
+#include
+#include "fal_cfg.h"
+#include "spi_flash.h"
+#include "spi_flash_sfud.h"
+#include "drv_qspi.h"
+#include "dfs_fs.h"
+#include "fal.h"
+
+/* 添加 DEBUG 头文件 */
+#include
+
+char w25qxx_read_status_register2(struct rt_qspi_device *device)
+{
+ /* 0x35 read status register2 */
+ char instruction = 0x35, status;
+ rt_qspi_send_then_recv(device, &instruction, 1, &status, 1);
+ return status;
+}
+
+void w25qxx_write_enable(struct rt_qspi_device *device)
+{
+ /* 0x06 write enable */
+ char instruction = 0x06;
+ rt_qspi_send(device, &instruction, 1);
+}
+
+void w25qxx_enter_qspi_mode(struct rt_qspi_device *device)
+{
+ char status = 0;
+ /* 0x38 enter qspi mode */
+ char instruction = 0x38;
+ char write_status2_buf[2] = {0};
+ /* 0x31 write status register2 */
+ write_status2_buf[0] = 0x31;
+ status = w25qxx_read_status_register2(device);
+ if (!(status & 0x02))
+ {
+ status |= 1 << 1;
+ w25qxx_write_enable(device);
+ write_status2_buf[1] = status;
+ rt_qspi_send(device, &write_status2_buf, 2);
+ rt_qspi_send(device, &instruction, 1);
+ LOG_D("flash already enter qspi mode");
+ rt_thread_mdelay(10);
+ }
+}
+
+int spi_flash_init(void)
+{
+ rt_err_t ret = stm32_qspi_bus_attach_device("qspi1", "qspi10", RT_NULL, 4, w25qxx_enter_qspi_mode, RT_NULL);
+ if (ret != RT_EOK)
+ {
+ LOG_E("qspi attach device failed\n");
+ return -RT_ERROR;
+ }
+ /* init W25Q256 */
+ // spi_flash_dev_name 需要用配置文件内的,spi_dev_name实际是SPI挂在参数
+ if(rt_sfud_flash_probe(FAL_USING_NOR_FLASH_DEV_NAME, "qspi10") == RT_NULL)
+ {
+ LOG_E("rt sfud flash error");
+ return -1;
+ }
+ LOG_I("SFUD flash '%s' probed successfully.", FAL_USING_NOR_FLASH_DEV_NAME);
+ /* 初始化 fal */
+ int fal_ = fal_init();
+ if (fal_ > 0)
+ {
+ LOG_I("FAL Number of partitions = %d",fal_);
+ }else{
+ return -1;
+ }
+ /* 创建 MTD 块设备 */
+ if (fal_mtd_nor_device_create("sysdata") == RT_NULL)
+ {
+ LOG_E("Failed to create MTD device for sysdata!");
+ return -1;
+ }
+ LOG_I("MTD device 'sysdata' created.");
+ return RT_EOK;
+}
+INIT_DEVICE_EXPORT(spi_flash_init);
+
+int qspi_fal(void)
+{
+ LOG_I("Create file path /sys");
+ mkdir("/sys", 0777);
+
+ /* 挂载 littlefs */
+ if (dfs_mount("sysdata", "/sys", "lfs", 0, NULL) == RT_EOK)
+ {
+ LOG_I("Filesystem initialized!");
+ }
+ else
+ {
+ LOG_E("Format memory sysdata");
+ /* 格式化文件系统 */
+ dfs_mkfs("lfs", "sysdata");
+ /* 挂载 littlefs */
+ if (dfs_mount("sysdata", "/sys", "lfs", 0, NULL) == RT_EOK)
+ {
+ LOG_I("Filesystem initialized!");
+ }
+ else
+ {
+ LOG_E("Failed to initialize filesystem!");
+ }
+ }
+ return RT_EOK;
+}
+INIT_APP_EXPORT(qspi_fal);
diff --git a/applications/RUN_LED.c b/applications/RUN_LED.c
index ad62067..f302d3c 100644
--- a/applications/RUN_LED.c
+++ b/applications/RUN_LED.c
@@ -4,7 +4,7 @@
#include"drv_common.h"
#include "RUN_LED.h"
-#define LED_PIN GET_PIN(B,0)
+#define LED_PIN GET_PIN(B,1)
/* 线程 1 的入口函数 */
void RUN_LED(void *parameter)
diff --git a/applications/SDIO_elmfatfs.c b/applications/SDIO_elmfatfs.c
index e81012d..5053977 100644
--- a/applications/SDIO_elmfatfs.c
+++ b/applications/SDIO_elmfatfs.c
@@ -24,6 +24,7 @@ void sd_mount(void *parameter)
{
if (rt_device_find("sd") != RT_NULL)
{
+ LOG_I("Create file path /sddisk");
mkdir("/sddisk", 0777);
if (dfs_mount("sd", "/sddisk", "elm", 0, NULL) == RT_EOK)
{
@@ -54,7 +55,6 @@ void sd_mount(void *parameter)
int stm32_sdcard_mount(void)
{
rt_thread_t tid;
- rt_thread_mdelay(1000);
// 删除旧的信号量(如果存在)
if (mount_sem != RT_NULL)
{
@@ -72,7 +72,7 @@ int stm32_sdcard_mount(void)
tid = rt_thread_create("sd_mount",
sd_mount,
RT_NULL,
- 1024*6, // 避免溢出
+ 1024*2, // 避免溢出
RT_THREAD_PRIORITY_MAX - 2,
20);
if (tid != RT_NULL)
diff --git a/applications/TMPFS_root.c b/applications/TMPFS_root.c
index 5a1d6ee..8a21a7e 100644
--- a/applications/TMPFS_root.c
+++ b/applications/TMPFS_root.c
@@ -28,11 +28,11 @@ int init_tmpfs_root(void)
if (mkdir("/run", 0777) == 0) {
LOG_I("Created /run directory");
} else {
- LOG_E("mkdir failed, errno=%d", errno);
+ LOG_E("mkdir failed, errno = %d", errno);
}
return 0;
}
/* 在应用初始化阶段运行(确保系统已就绪) */
-INIT_APP_EXPORT(init_tmpfs_root);
+INIT_DEVICE_EXPORT(init_tmpfs_root);
diff --git a/applications/fal_cfg.h b/applications/fal_cfg.h
new file mode 100644
index 0000000..608361b
--- /dev/null
+++ b/applications/fal_cfg.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2006-2021, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date Author Notes
+ * 2025-11-22 Administrator the first version
+ */
+#ifndef _FAL_CFG_H_
+#define _FAL_CFG_H_
+
+#include
+#include
+
+extern struct fal_flash_dev nor_flash0;
+
+/* flash device table */
+#define FAL_FLASH_DEV_TABLE \
+{ \
+ &nor_flash0, \
+}
+
+/* 分区表:根据你的需求划分 */
+#define FAL_PART_TABLE \
+{ \
+ {FAL_PART_MAGIC_WORD, "sysdata","W25Q128", 0, 16 * 1024 * 1024, 0 }, \
+}
+
+#endif /* _FAL_CFG_H_ */
diff --git a/cubemx/.mxproject b/cubemx/.mxproject
index ba05569..59ea3f5 100644
--- a/cubemx/.mxproject
+++ b/cubemx/.mxproject
@@ -1,8 +1,8 @@
[PreviousLibFiles]
-LibFiles=Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_cortex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_cortex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_fmc.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_nor.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_sram.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_nand.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_sdram.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_rcc.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_rcc_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_bus.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_rcc.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_crs.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_system.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_utils.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_flash.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_flash_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_gpio.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_gpio_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_gpio.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_hsem.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_hsem.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_dma.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_dma_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_dma.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_dmamux.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_mdma.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_pwr.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_pwr_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_pwr.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_def.h;Drivers\STM32H7xx_HAL_Driver\Inc\Legacy\stm32_hal_legacy.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_i2c.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_i2c_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_exti.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_exti.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_sdmmc.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_sd.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_delayblock.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_sd_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_mmc.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_mmc_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_tim.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_tim_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_uart.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_usart.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_lpuart.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_uart_ex.h;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_cortex.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_ll_fmc.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_nor.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sram.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_nand.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sdram.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_rcc.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_rcc_ex.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_flash.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_flash_ex.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_gpio.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_hsem.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_dma.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_dma_ex.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_mdma.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_pwr.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_pwr_ex.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_i2c.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_i2c_ex.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_exti.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_ll_sdmmc.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_ll_delayblock.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sd.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sd_ex.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_mmc.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_mmc_ex.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_tim.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_tim_ex.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_uart.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_uart_ex.c;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_cortex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_cortex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_fmc.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_nor.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_sram.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_nand.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_sdram.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_rcc.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_rcc_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_bus.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_rcc.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_crs.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_system.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_utils.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_flash.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_flash_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_gpio.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_gpio_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_gpio.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_hsem.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_hsem.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_dma.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_dma_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_dma.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_dmamux.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_mdma.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_pwr.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_pwr_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_pwr.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_def.h;Drivers\STM32H7xx_HAL_Driver\Inc\Legacy\stm32_hal_legacy.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_i2c.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_i2c_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_exti.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_exti.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_sdmmc.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_sd.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_delayblock.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_sd_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_mmc.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_mmc_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_tim.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_tim_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_uart.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_usart.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_lpuart.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_uart_ex.h;Drivers\CMSIS\Device\ST\STM32H7xx\Include\stm32h743xx.h;Drivers\CMSIS\Device\ST\STM32H7xx\Include\stm32h7xx.h;Drivers\CMSIS\Device\ST\STM32H7xx\Include\system_stm32h7xx.h;Drivers\CMSIS\Device\ST\STM32H7xx\Include\system_stm32h7xx.h;Drivers\CMSIS\Device\ST\STM32H7xx\Source\Templates\system_stm32h7xx.c;Drivers\CMSIS\Include\cmsis_armcc.h;Drivers\CMSIS\Include\cmsis_armclang.h;Drivers\CMSIS\Include\cmsis_armclang_ltm.h;Drivers\CMSIS\Include\cmsis_compiler.h;Drivers\CMSIS\Include\cmsis_gcc.h;Drivers\CMSIS\Include\cmsis_iccarm.h;Drivers\CMSIS\Include\cmsis_version.h;Drivers\CMSIS\Include\core_armv81mml.h;Drivers\CMSIS\Include\core_armv8mbl.h;Drivers\CMSIS\Include\core_armv8mml.h;Drivers\CMSIS\Include\core_cm0.h;Drivers\CMSIS\Include\core_cm0plus.h;Drivers\CMSIS\Include\core_cm1.h;Drivers\CMSIS\Include\core_cm23.h;Drivers\CMSIS\Include\core_cm3.h;Drivers\CMSIS\Include\core_cm33.h;Drivers\CMSIS\Include\core_cm35p.h;Drivers\CMSIS\Include\core_cm4.h;Drivers\CMSIS\Include\core_cm7.h;Drivers\CMSIS\Include\core_sc000.h;Drivers\CMSIS\Include\core_sc300.h;Drivers\CMSIS\Include\mpu_armv7.h;Drivers\CMSIS\Include\mpu_armv8.h;Drivers\CMSIS\Include\tz_context.h;
+LibFiles=Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_cortex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_cortex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_fmc.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_nor.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_sram.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_nand.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_sdram.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_rcc.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_rcc_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_bus.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_rcc.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_crs.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_system.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_utils.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_flash.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_flash_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_gpio.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_gpio_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_gpio.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_hsem.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_hsem.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_dma.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_dma_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_dma.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_dmamux.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_mdma.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_pwr.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_pwr_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_pwr.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_def.h;Drivers\STM32H7xx_HAL_Driver\Inc\Legacy\stm32_hal_legacy.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_i2c.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_i2c_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_exti.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_exti.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_qspi.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_delayblock.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_sdmmc.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_sd.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_sd_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_mmc.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_mmc_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_tim.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_tim_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_uart.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_usart.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_lpuart.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_uart_ex.h;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_cortex.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_ll_fmc.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_nor.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sram.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_nand.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sdram.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_rcc.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_rcc_ex.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_flash.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_flash_ex.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_gpio.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_hsem.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_dma.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_dma_ex.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_mdma.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_pwr.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_pwr_ex.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_i2c.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_i2c_ex.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_exti.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_qspi.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_ll_delayblock.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_ll_sdmmc.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sd.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sd_ex.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_mmc.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_mmc_ex.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_tim.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_tim_ex.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_uart.c;Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_uart_ex.c;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_cortex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_cortex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_fmc.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_nor.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_sram.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_nand.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_sdram.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_rcc.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_rcc_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_bus.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_rcc.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_crs.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_system.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_utils.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_flash.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_flash_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_gpio.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_gpio_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_gpio.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_hsem.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_hsem.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_dma.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_dma_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_dma.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_dmamux.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_mdma.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_pwr.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_pwr_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_pwr.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_def.h;Drivers\STM32H7xx_HAL_Driver\Inc\Legacy\stm32_hal_legacy.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_i2c.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_i2c_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_exti.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_exti.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_qspi.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_delayblock.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_sdmmc.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_sd.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_sd_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_mmc.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_mmc_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_tim.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_tim_ex.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_uart.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_usart.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_ll_lpuart.h;Drivers\STM32H7xx_HAL_Driver\Inc\stm32h7xx_hal_uart_ex.h;Drivers\CMSIS\Device\ST\STM32H7xx\Include\stm32h743xx.h;Drivers\CMSIS\Device\ST\STM32H7xx\Include\stm32h7xx.h;Drivers\CMSIS\Device\ST\STM32H7xx\Include\system_stm32h7xx.h;Drivers\CMSIS\Device\ST\STM32H7xx\Include\system_stm32h7xx.h;Drivers\CMSIS\Device\ST\STM32H7xx\Source\Templates\system_stm32h7xx.c;Drivers\CMSIS\Include\cmsis_armcc.h;Drivers\CMSIS\Include\cmsis_armclang.h;Drivers\CMSIS\Include\cmsis_armclang_ltm.h;Drivers\CMSIS\Include\cmsis_compiler.h;Drivers\CMSIS\Include\cmsis_gcc.h;Drivers\CMSIS\Include\cmsis_iccarm.h;Drivers\CMSIS\Include\cmsis_version.h;Drivers\CMSIS\Include\core_armv81mml.h;Drivers\CMSIS\Include\core_armv8mbl.h;Drivers\CMSIS\Include\core_armv8mml.h;Drivers\CMSIS\Include\core_cm0.h;Drivers\CMSIS\Include\core_cm0plus.h;Drivers\CMSIS\Include\core_cm1.h;Drivers\CMSIS\Include\core_cm23.h;Drivers\CMSIS\Include\core_cm3.h;Drivers\CMSIS\Include\core_cm33.h;Drivers\CMSIS\Include\core_cm35p.h;Drivers\CMSIS\Include\core_cm4.h;Drivers\CMSIS\Include\core_cm7.h;Drivers\CMSIS\Include\core_sc000.h;Drivers\CMSIS\Include\core_sc300.h;Drivers\CMSIS\Include\mpu_armv7.h;Drivers\CMSIS\Include\mpu_armv8.h;Drivers\CMSIS\Include\tz_context.h;
[PreviousUsedIarFiles]
-SourceFiles=..\Src\main.c;..\Src\stm32h7xx_it.c;..\Src\stm32h7xx_hal_msp.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_cortex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_ll_fmc.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_nor.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sram.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_nand.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sdram.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_rcc.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_rcc_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_flash.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_flash_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_gpio.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_hsem.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_dma.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_dma_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_mdma.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_pwr.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_pwr_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_i2c.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_i2c_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_exti.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_ll_sdmmc.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_ll_delayblock.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sd.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sd_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_mmc.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_mmc_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_tim.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_tim_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_uart.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_uart_ex.c;..\Drivers\CMSIS\Device\ST\STM32H7xx\Source\Templates\system_stm32h7xx.c;..\\Src\system_stm32h7xx.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_cortex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_ll_fmc.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_nor.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sram.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_nand.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sdram.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_rcc.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_rcc_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_flash.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_flash_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_gpio.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_hsem.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_dma.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_dma_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_mdma.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_pwr.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_pwr_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_i2c.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_i2c_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_exti.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_ll_sdmmc.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_ll_delayblock.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sd.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sd_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_mmc.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_mmc_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_tim.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_tim_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_uart.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_uart_ex.c;..\Drivers\CMSIS\Device\ST\STM32H7xx\Source\Templates\system_stm32h7xx.c;..\\Src\system_stm32h7xx.c;;;
+SourceFiles=..\Src\main.c;..\Src\stm32h7xx_it.c;..\Src\stm32h7xx_hal_msp.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_cortex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_ll_fmc.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_nor.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sram.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_nand.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sdram.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_rcc.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_rcc_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_flash.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_flash_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_gpio.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_hsem.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_dma.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_dma_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_mdma.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_pwr.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_pwr_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_i2c.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_i2c_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_exti.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_qspi.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_ll_delayblock.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_ll_sdmmc.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sd.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sd_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_mmc.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_mmc_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_tim.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_tim_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_uart.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_uart_ex.c;..\Drivers\CMSIS\Device\ST\STM32H7xx\Source\Templates\system_stm32h7xx.c;..\\Src\system_stm32h7xx.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_cortex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_ll_fmc.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_nor.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sram.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_nand.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sdram.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_rcc.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_rcc_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_flash.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_flash_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_gpio.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_hsem.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_dma.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_dma_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_mdma.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_pwr.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_pwr_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_i2c.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_i2c_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_exti.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_qspi.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_ll_delayblock.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_ll_sdmmc.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sd.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_sd_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_mmc.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_mmc_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_tim.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_tim_ex.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_uart.c;..\Drivers\STM32H7xx_HAL_Driver\Src\stm32h7xx_hal_uart_ex.c;..\Drivers\CMSIS\Device\ST\STM32H7xx\Source\Templates\system_stm32h7xx.c;..\\Src\system_stm32h7xx.c;;;
HeaderPath=..\Drivers\STM32H7xx_HAL_Driver\Inc;..\Drivers\STM32H7xx_HAL_Driver\Inc\Legacy;..\Drivers\CMSIS\Device\ST\STM32H7xx\Include;..\Drivers\CMSIS\Include;..\Inc;
CDefines=USE_PWR_LDO_SUPPLY;USE_HAL_DRIVER;STM32H743xx;USE_HAL_DRIVER;USE_HAL_DRIVER;
diff --git a/cubemx/Drivers/STM32H7xx_HAL_Driver/Inc/stm32h7xx_hal_qspi.h b/cubemx/Drivers/STM32H7xx_HAL_Driver/Inc/stm32h7xx_hal_qspi.h
new file mode 100644
index 0000000..321f05f
--- /dev/null
+++ b/cubemx/Drivers/STM32H7xx_HAL_Driver/Inc/stm32h7xx_hal_qspi.h
@@ -0,0 +1,747 @@
+/**
+ ******************************************************************************
+ * @file stm32h7xx_hal_qspi.h
+ * @author MCD Application Team
+ * @brief Header file of QSPI HAL module.
+ ******************************************************************************
+ * @attention
+ *
+ * Copyright (c) 2017 STMicroelectronics.
+ * All rights reserved.
+ *
+ * This software is licensed under terms that can be found in the LICENSE file
+ * in the root directory of this software component.
+ * If no LICENSE file comes with this software, it is provided AS-IS.
+ *
+ ******************************************************************************
+ */
+
+/* Define to prevent recursive inclusion -------------------------------------*/
+#ifndef STM32H7xx_HAL_QSPI_H
+#define STM32H7xx_HAL_QSPI_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+/* Includes ------------------------------------------------------------------*/
+#include "stm32h7xx_hal_def.h"
+#if defined (DLYB_QUADSPI)
+#include "stm32h7xx_ll_delayblock.h"
+#endif /* DLYB_QUADSPI */
+
+#if defined(QUADSPI)
+
+/** @addtogroup STM32H7xx_HAL_Driver
+ * @{
+ */
+
+/** @addtogroup QSPI
+ * @{
+ */
+
+/* Exported types ------------------------------------------------------------*/
+/** @defgroup QSPI_Exported_Types QSPI Exported Types
+ * @{
+ */
+
+/**
+ * @brief QSPI Init structure definition
+ */
+typedef struct
+{
+ uint32_t ClockPrescaler; /* Specifies the prescaler factor for generating clock based on the AHB clock.
+ This parameter can be a number between 0 and 255 */
+ uint32_t FifoThreshold; /* Specifies the threshold number of bytes in the FIFO (used only in indirect mode)
+ This parameter can be a value between 1 and 32 */
+ uint32_t SampleShifting; /* Specifies the Sample Shift. The data is sampled 1/2 clock cycle delay later to
+ take in account external signal delays. (It should be QSPI_SAMPLE_SHIFTING_NONE in DDR mode)
+ This parameter can be a value of @ref QSPI_SampleShifting */
+ uint32_t FlashSize; /* Specifies the Flash Size. FlashSize+1 is effectively the number of address bits
+ required to address the flash memory. The flash capacity can be up to 4GB
+ (addressed using 32 bits) in indirect mode, but the addressable space in
+ memory-mapped mode is limited to 256MB
+ This parameter can be a number between 0 and 31 */
+ uint32_t ChipSelectHighTime; /* Specifies the Chip Select High Time. ChipSelectHighTime+1 defines the minimum number
+ of clock cycles which the chip select must remain high between commands.
+ This parameter can be a value of @ref QSPI_ChipSelectHighTime */
+ uint32_t ClockMode; /* Specifies the Clock Mode. It indicates the level that clock takes between commands.
+ This parameter can be a value of @ref QSPI_ClockMode */
+ uint32_t FlashID; /* Specifies the Flash which will be used,
+ This parameter can be a value of @ref QSPI_Flash_Select */
+ uint32_t DualFlash; /* Specifies the Dual Flash Mode State
+ This parameter can be a value of @ref QSPI_DualFlash_Mode */
+}QSPI_InitTypeDef;
+
+/**
+ * @brief HAL QSPI State structures definition
+ */
+typedef enum
+{
+ HAL_QSPI_STATE_RESET = 0x00U, /*!< Peripheral not initialized */
+ HAL_QSPI_STATE_READY = 0x01U, /*!< Peripheral initialized and ready for use */
+ HAL_QSPI_STATE_BUSY = 0x02U, /*!< Peripheral in indirect mode and busy */
+ HAL_QSPI_STATE_BUSY_INDIRECT_TX = 0x12U, /*!< Peripheral in indirect mode with transmission ongoing */
+ HAL_QSPI_STATE_BUSY_INDIRECT_RX = 0x22U, /*!< Peripheral in indirect mode with reception ongoing */
+ HAL_QSPI_STATE_BUSY_AUTO_POLLING = 0x42U, /*!< Peripheral in auto polling mode ongoing */
+ HAL_QSPI_STATE_BUSY_MEM_MAPPED = 0x82U, /*!< Peripheral in memory mapped mode ongoing */
+ HAL_QSPI_STATE_ABORT = 0x08U, /*!< Peripheral with abort request ongoing */
+ HAL_QSPI_STATE_ERROR = 0x04U /*!< Peripheral in error */
+}HAL_QSPI_StateTypeDef;
+
+/**
+ * @brief QSPI Handle Structure definition
+ */
+#if (USE_HAL_QSPI_REGISTER_CALLBACKS == 1)
+typedef struct __QSPI_HandleTypeDef
+#else
+typedef struct
+#endif
+{
+ QUADSPI_TypeDef *Instance; /* QSPI registers base address */
+ QSPI_InitTypeDef Init; /* QSPI communication parameters */
+ uint8_t *pTxBuffPtr; /* Pointer to QSPI Tx transfer Buffer */
+ __IO uint32_t TxXferSize; /* QSPI Tx Transfer size */
+ __IO uint32_t TxXferCount; /* QSPI Tx Transfer Counter */
+ uint8_t *pRxBuffPtr; /* Pointer to QSPI Rx transfer Buffer */
+ __IO uint32_t RxXferSize; /* QSPI Rx Transfer size */
+ __IO uint32_t RxXferCount; /* QSPI Rx Transfer Counter */
+ MDMA_HandleTypeDef *hmdma; /* QSPI Rx/Tx MDMA Handle parameters */
+ __IO HAL_LockTypeDef Lock; /* Locking object */
+ __IO HAL_QSPI_StateTypeDef State; /* QSPI communication state */
+ __IO uint32_t ErrorCode; /* QSPI Error code */
+ uint32_t Timeout; /* Timeout for the QSPI memory access */
+#if (USE_HAL_QSPI_REGISTER_CALLBACKS == 1)
+ void (* ErrorCallback) (struct __QSPI_HandleTypeDef *hqspi);
+ void (* AbortCpltCallback) (struct __QSPI_HandleTypeDef *hqspi);
+ void (* FifoThresholdCallback)(struct __QSPI_HandleTypeDef *hqspi);
+ void (* CmdCpltCallback) (struct __QSPI_HandleTypeDef *hqspi);
+ void (* RxCpltCallback) (struct __QSPI_HandleTypeDef *hqspi);
+ void (* TxCpltCallback) (struct __QSPI_HandleTypeDef *hqspi);
+ void (* StatusMatchCallback) (struct __QSPI_HandleTypeDef *hqspi);
+ void (* TimeOutCallback) (struct __QSPI_HandleTypeDef *hqspi);
+
+ void (* MspInitCallback) (struct __QSPI_HandleTypeDef *hqspi);
+ void (* MspDeInitCallback) (struct __QSPI_HandleTypeDef *hqspi);
+#endif
+}QSPI_HandleTypeDef;
+
+/**
+ * @brief QSPI Command structure definition
+ */
+typedef struct
+{
+ uint32_t Instruction; /* Specifies the Instruction to be sent
+ This parameter can be a value (8-bit) between 0x00 and 0xFF */
+ uint32_t Address; /* Specifies the Address to be sent (Size from 1 to 4 bytes according AddressSize)
+ This parameter can be a value (32-bits) between 0x0 and 0xFFFFFFFF */
+ uint32_t AlternateBytes; /* Specifies the Alternate Bytes to be sent (Size from 1 to 4 bytes according AlternateBytesSize)
+ This parameter can be a value (32-bits) between 0x0 and 0xFFFFFFFF */
+ uint32_t AddressSize; /* Specifies the Address Size
+ This parameter can be a value of @ref QSPI_AddressSize */
+ uint32_t AlternateBytesSize; /* Specifies the Alternate Bytes Size
+ This parameter can be a value of @ref QSPI_AlternateBytesSize */
+ uint32_t DummyCycles; /* Specifies the Number of Dummy Cycles.
+ This parameter can be a number between 0 and 31 */
+ uint32_t InstructionMode; /* Specifies the Instruction Mode
+ This parameter can be a value of @ref QSPI_InstructionMode */
+ uint32_t AddressMode; /* Specifies the Address Mode
+ This parameter can be a value of @ref QSPI_AddressMode */
+ uint32_t AlternateByteMode; /* Specifies the Alternate Bytes Mode
+ This parameter can be a value of @ref QSPI_AlternateBytesMode */
+ uint32_t DataMode; /* Specifies the Data Mode (used for dummy cycles and data phases)
+ This parameter can be a value of @ref QSPI_DataMode */
+ uint32_t NbData; /* Specifies the number of data to transfer. (This is the number of bytes)
+ This parameter can be any value between 0 and 0xFFFFFFFF (0 means undefined length
+ until end of memory)*/
+ uint32_t DdrMode; /* Specifies the double data rate mode for address, alternate byte and data phase
+ This parameter can be a value of @ref QSPI_DdrMode */
+ uint32_t DdrHoldHalfCycle; /* Specifies if the DDR hold is enabled. When enabled it delays the data
+ output by one half of system clock in DDR mode.
+ This parameter can be a value of @ref QSPI_DdrHoldHalfCycle */
+ uint32_t SIOOMode; /* Specifies the send instruction only once mode
+ This parameter can be a value of @ref QSPI_SIOOMode */
+}QSPI_CommandTypeDef;
+
+/**
+ * @brief QSPI Auto Polling mode configuration structure definition
+ */
+typedef struct
+{
+ uint32_t Match; /* Specifies the value to be compared with the masked status register to get a match.
+ This parameter can be any value between 0 and 0xFFFFFFFF */
+ uint32_t Mask; /* Specifies the mask to be applied to the status bytes received.
+ This parameter can be any value between 0 and 0xFFFFFFFF */
+ uint32_t Interval; /* Specifies the number of clock cycles between two read during automatic polling phases.
+ This parameter can be any value between 0 and 0xFFFF */
+ uint32_t StatusBytesSize; /* Specifies the size of the status bytes received.
+ This parameter can be any value between 1 and 4 */
+ uint32_t MatchMode; /* Specifies the method used for determining a match.
+ This parameter can be a value of @ref QSPI_MatchMode */
+ uint32_t AutomaticStop; /* Specifies if automatic polling is stopped after a match.
+ This parameter can be a value of @ref QSPI_AutomaticStop */
+}QSPI_AutoPollingTypeDef;
+
+/**
+ * @brief QSPI Memory Mapped mode configuration structure definition
+ */
+typedef struct
+{
+ uint32_t TimeOutPeriod; /* Specifies the number of clock to wait when the FIFO is full before to release the chip select.
+ This parameter can be any value between 0 and 0xFFFF */
+ uint32_t TimeOutActivation; /* Specifies if the timeout counter is enabled to release the chip select.
+ This parameter can be a value of @ref QSPI_TimeOutActivation */
+}QSPI_MemoryMappedTypeDef;
+
+#if (USE_HAL_QSPI_REGISTER_CALLBACKS == 1)
+/**
+ * @brief HAL QSPI Callback ID enumeration definition
+ */
+typedef enum
+{
+ HAL_QSPI_ERROR_CB_ID = 0x00U, /*!< QSPI Error Callback ID */
+ HAL_QSPI_ABORT_CB_ID = 0x01U, /*!< QSPI Abort Callback ID */
+ HAL_QSPI_FIFO_THRESHOLD_CB_ID = 0x02U, /*!< QSPI FIFO Threshold Callback ID */
+ HAL_QSPI_CMD_CPLT_CB_ID = 0x03U, /*!< QSPI Command Complete Callback ID */
+ HAL_QSPI_RX_CPLT_CB_ID = 0x04U, /*!< QSPI Rx Complete Callback ID */
+ HAL_QSPI_TX_CPLT_CB_ID = 0x05U, /*!< QSPI Tx Complete Callback ID */
+ HAL_QSPI_STATUS_MATCH_CB_ID = 0x08U, /*!< QSPI Status Match Callback ID */
+ HAL_QSPI_TIMEOUT_CB_ID = 0x09U, /*!< QSPI Timeout Callback ID */
+
+ HAL_QSPI_MSP_INIT_CB_ID = 0x0AU, /*!< QSPI MspInit Callback ID */
+ HAL_QSPI_MSP_DEINIT_CB_ID = 0x0B0 /*!< QSPI MspDeInit Callback ID */
+}HAL_QSPI_CallbackIDTypeDef;
+
+/**
+ * @brief HAL QSPI Callback pointer definition
+ */
+typedef void (*pQSPI_CallbackTypeDef)(QSPI_HandleTypeDef *hqspi);
+#endif
+/**
+ * @}
+ */
+
+/* Exported constants --------------------------------------------------------*/
+/** @defgroup QSPI_Exported_Constants QSPI Exported Constants
+ * @{
+ */
+
+/** @defgroup QSPI_ErrorCode QSPI Error Code
+ * @{
+ */
+#define HAL_QSPI_ERROR_NONE 0x00000000U /*!< No error */
+#define HAL_QSPI_ERROR_TIMEOUT 0x00000001U /*!< Timeout error */
+#define HAL_QSPI_ERROR_TRANSFER 0x00000002U /*!< Transfer error */
+#define HAL_QSPI_ERROR_DMA 0x00000004U /*!< DMA transfer error */
+#define HAL_QSPI_ERROR_INVALID_PARAM 0x00000008U /*!< Invalid parameters error */
+#if (USE_HAL_QSPI_REGISTER_CALLBACKS == 1)
+#define HAL_QSPI_ERROR_INVALID_CALLBACK 0x00000010U /*!< Invalid callback error */
+#endif
+/**
+ * @}
+ */
+
+/** @defgroup QSPI_SampleShifting QSPI Sample Shifting
+ * @{
+ */
+#define QSPI_SAMPLE_SHIFTING_NONE 0x00000000U /*!State = HAL_QSPI_STATE_RESET; \
+ (__HANDLE__)->MspInitCallback = NULL; \
+ (__HANDLE__)->MspDeInitCallback = NULL; \
+ } while(0)
+#else
+#define __HAL_QSPI_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = HAL_QSPI_STATE_RESET)
+#endif
+
+/** @brief Enable the QSPI peripheral.
+ * @param __HANDLE__ specifies the QSPI Handle.
+ * @retval None
+ */
+#define __HAL_QSPI_ENABLE(__HANDLE__) SET_BIT((__HANDLE__)->Instance->CR, QUADSPI_CR_EN)
+
+/** @brief Disable the QSPI peripheral.
+ * @param __HANDLE__ specifies the QSPI Handle.
+ * @retval None
+ */
+#define __HAL_QSPI_DISABLE(__HANDLE__) CLEAR_BIT((__HANDLE__)->Instance->CR, QUADSPI_CR_EN)
+
+/** @brief Enable the specified QSPI interrupt.
+ * @param __HANDLE__ specifies the QSPI Handle.
+ * @param __INTERRUPT__ specifies the QSPI interrupt source to enable.
+ * This parameter can be one of the following values:
+ * @arg QSPI_IT_TO: QSPI Timeout interrupt
+ * @arg QSPI_IT_SM: QSPI Status match interrupt
+ * @arg QSPI_IT_FT: QSPI FIFO threshold interrupt
+ * @arg QSPI_IT_TC: QSPI Transfer complete interrupt
+ * @arg QSPI_IT_TE: QSPI Transfer error interrupt
+ * @retval None
+ */
+#define __HAL_QSPI_ENABLE_IT(__HANDLE__, __INTERRUPT__) SET_BIT((__HANDLE__)->Instance->CR, (__INTERRUPT__))
+
+
+/** @brief Disable the specified QSPI interrupt.
+ * @param __HANDLE__ specifies the QSPI Handle.
+ * @param __INTERRUPT__ specifies the QSPI interrupt source to disable.
+ * This parameter can be one of the following values:
+ * @arg QSPI_IT_TO: QSPI Timeout interrupt
+ * @arg QSPI_IT_SM: QSPI Status match interrupt
+ * @arg QSPI_IT_FT: QSPI FIFO threshold interrupt
+ * @arg QSPI_IT_TC: QSPI Transfer complete interrupt
+ * @arg QSPI_IT_TE: QSPI Transfer error interrupt
+ * @retval None
+ */
+#define __HAL_QSPI_DISABLE_IT(__HANDLE__, __INTERRUPT__) CLEAR_BIT((__HANDLE__)->Instance->CR, (__INTERRUPT__))
+
+/** @brief Check whether the specified QSPI interrupt source is enabled or not.
+ * @param __HANDLE__ specifies the QSPI Handle.
+ * @param __INTERRUPT__ specifies the QSPI interrupt source to check.
+ * This parameter can be one of the following values:
+ * @arg QSPI_IT_TO: QSPI Timeout interrupt
+ * @arg QSPI_IT_SM: QSPI Status match interrupt
+ * @arg QSPI_IT_FT: QSPI FIFO threshold interrupt
+ * @arg QSPI_IT_TC: QSPI Transfer complete interrupt
+ * @arg QSPI_IT_TE: QSPI Transfer error interrupt
+ * @retval The new state of __INTERRUPT__ (TRUE or FALSE).
+ */
+#define __HAL_QSPI_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) (READ_BIT((__HANDLE__)->Instance->CR, (__INTERRUPT__)) == (__INTERRUPT__))
+
+/**
+ * @brief Check whether the selected QSPI flag is set or not.
+ * @param __HANDLE__ specifies the QSPI Handle.
+ * @param __FLAG__ specifies the QSPI flag to check.
+ * This parameter can be one of the following values:
+ * @arg QSPI_FLAG_BUSY: QSPI Busy flag
+ * @arg QSPI_FLAG_TO: QSPI Timeout flag
+ * @arg QSPI_FLAG_SM: QSPI Status match flag
+ * @arg QSPI_FLAG_FT: QSPI FIFO threshold flag
+ * @arg QSPI_FLAG_TC: QSPI Transfer complete flag
+ * @arg QSPI_FLAG_TE: QSPI Transfer error flag
+ * @retval None
+ */
+#define __HAL_QSPI_GET_FLAG(__HANDLE__, __FLAG__) ((READ_BIT((__HANDLE__)->Instance->SR, (__FLAG__)) != 0U) ? SET : RESET)
+
+/** @brief Clears the specified QSPI's flag status.
+ * @param __HANDLE__ specifies the QSPI Handle.
+ * @param __FLAG__ specifies the QSPI clear register flag that needs to be set
+ * This parameter can be one of the following values:
+ * @arg QSPI_FLAG_TO: QSPI Timeout flag
+ * @arg QSPI_FLAG_SM: QSPI Status match flag
+ * @arg QSPI_FLAG_TC: QSPI Transfer complete flag
+ * @arg QSPI_FLAG_TE: QSPI Transfer error flag
+ * @retval None
+ */
+#define __HAL_QSPI_CLEAR_FLAG(__HANDLE__, __FLAG__) WRITE_REG((__HANDLE__)->Instance->FCR, (__FLAG__))
+/**
+ * @}
+ */
+
+/* Exported functions --------------------------------------------------------*/
+/** @addtogroup QSPI_Exported_Functions
+ * @{
+ */
+
+/** @addtogroup QSPI_Exported_Functions_Group1
+ * @{
+ */
+/* Initialization/de-initialization functions ********************************/
+HAL_StatusTypeDef HAL_QSPI_Init (QSPI_HandleTypeDef *hqspi);
+HAL_StatusTypeDef HAL_QSPI_DeInit (QSPI_HandleTypeDef *hqspi);
+void HAL_QSPI_MspInit (QSPI_HandleTypeDef *hqspi);
+void HAL_QSPI_MspDeInit(QSPI_HandleTypeDef *hqspi);
+/**
+ * @}
+ */
+
+/** @addtogroup QSPI_Exported_Functions_Group2
+ * @{
+ */
+/* IO operation functions *****************************************************/
+/* QSPI IRQ handler method */
+void HAL_QSPI_IRQHandler(QSPI_HandleTypeDef *hqspi);
+
+/* QSPI indirect mode */
+HAL_StatusTypeDef HAL_QSPI_Command (QSPI_HandleTypeDef *hqspi, QSPI_CommandTypeDef *cmd, uint32_t Timeout);
+HAL_StatusTypeDef HAL_QSPI_Transmit (QSPI_HandleTypeDef *hqspi, uint8_t *pData, uint32_t Timeout);
+HAL_StatusTypeDef HAL_QSPI_Receive (QSPI_HandleTypeDef *hqspi, uint8_t *pData, uint32_t Timeout);
+HAL_StatusTypeDef HAL_QSPI_Command_IT (QSPI_HandleTypeDef *hqspi, QSPI_CommandTypeDef *cmd);
+HAL_StatusTypeDef HAL_QSPI_Transmit_IT (QSPI_HandleTypeDef *hqspi, uint8_t *pData);
+HAL_StatusTypeDef HAL_QSPI_Receive_IT (QSPI_HandleTypeDef *hqspi, uint8_t *pData);
+HAL_StatusTypeDef HAL_QSPI_Transmit_DMA (QSPI_HandleTypeDef *hqspi, uint8_t *pData);
+HAL_StatusTypeDef HAL_QSPI_Receive_DMA (QSPI_HandleTypeDef *hqspi, uint8_t *pData);
+
+/* QSPI status flag polling mode */
+HAL_StatusTypeDef HAL_QSPI_AutoPolling (QSPI_HandleTypeDef *hqspi, QSPI_CommandTypeDef *cmd, QSPI_AutoPollingTypeDef *cfg, uint32_t Timeout);
+HAL_StatusTypeDef HAL_QSPI_AutoPolling_IT(QSPI_HandleTypeDef *hqspi, QSPI_CommandTypeDef *cmd, QSPI_AutoPollingTypeDef *cfg);
+
+/* QSPI memory-mapped mode */
+HAL_StatusTypeDef HAL_QSPI_MemoryMapped(QSPI_HandleTypeDef *hqspi, QSPI_CommandTypeDef *cmd, QSPI_MemoryMappedTypeDef *cfg);
+
+/* Callback functions in non-blocking modes ***********************************/
+void HAL_QSPI_ErrorCallback (QSPI_HandleTypeDef *hqspi);
+void HAL_QSPI_AbortCpltCallback (QSPI_HandleTypeDef *hqspi);
+void HAL_QSPI_FifoThresholdCallback(QSPI_HandleTypeDef *hqspi);
+
+/* QSPI indirect mode */
+void HAL_QSPI_CmdCpltCallback (QSPI_HandleTypeDef *hqspi);
+void HAL_QSPI_RxCpltCallback (QSPI_HandleTypeDef *hqspi);
+void HAL_QSPI_TxCpltCallback (QSPI_HandleTypeDef *hqspi);
+
+/* QSPI status flag polling mode */
+void HAL_QSPI_StatusMatchCallback (QSPI_HandleTypeDef *hqspi);
+
+/* QSPI memory-mapped mode */
+void HAL_QSPI_TimeOutCallback (QSPI_HandleTypeDef *hqspi);
+
+#if (USE_HAL_QSPI_REGISTER_CALLBACKS == 1)
+/* QSPI callback registering/unregistering */
+HAL_StatusTypeDef HAL_QSPI_RegisterCallback (QSPI_HandleTypeDef *hqspi, HAL_QSPI_CallbackIDTypeDef CallbackId, pQSPI_CallbackTypeDef pCallback);
+HAL_StatusTypeDef HAL_QSPI_UnRegisterCallback (QSPI_HandleTypeDef *hqspi, HAL_QSPI_CallbackIDTypeDef CallbackId);
+#endif
+/**
+ * @}
+ */
+
+/** @addtogroup QSPI_Exported_Functions_Group3
+ * @{
+ */
+/* Peripheral Control and State functions ************************************/
+HAL_QSPI_StateTypeDef HAL_QSPI_GetState (const QSPI_HandleTypeDef *hqspi);
+uint32_t HAL_QSPI_GetError (const QSPI_HandleTypeDef *hqspi);
+HAL_StatusTypeDef HAL_QSPI_Abort (QSPI_HandleTypeDef *hqspi);
+HAL_StatusTypeDef HAL_QSPI_Abort_IT (QSPI_HandleTypeDef *hqspi);
+void HAL_QSPI_SetTimeout (QSPI_HandleTypeDef *hqspi, uint32_t Timeout);
+HAL_StatusTypeDef HAL_QSPI_SetFifoThreshold(QSPI_HandleTypeDef *hqspi, uint32_t Threshold);
+uint32_t HAL_QSPI_GetFifoThreshold(const QSPI_HandleTypeDef *hqspi);
+HAL_StatusTypeDef HAL_QSPI_SetFlashID (QSPI_HandleTypeDef *hqspi, uint32_t FlashID);
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
+/* End of exported functions -------------------------------------------------*/
+
+/* Private macros ------------------------------------------------------------*/
+/** @defgroup QSPI_Private_Macros QSPI Private Macros
+ * @{
+ */
+#define IS_QSPI_CLOCK_PRESCALER(PRESCALER) ((PRESCALER) <= 0xFFU)
+
+#define IS_QSPI_FIFO_THRESHOLD(THR) (((THR) > 0U) && ((THR) <= 32U))
+
+#define IS_QSPI_SSHIFT(SSHIFT) (((SSHIFT) == QSPI_SAMPLE_SHIFTING_NONE) || \
+ ((SSHIFT) == QSPI_SAMPLE_SHIFTING_HALFCYCLE))
+
+#define IS_QSPI_FLASH_SIZE(FSIZE) (((FSIZE) <= 31U))
+
+#define IS_QSPI_CS_HIGH_TIME(CSHTIME) (((CSHTIME) == QSPI_CS_HIGH_TIME_1_CYCLE) || \
+ ((CSHTIME) == QSPI_CS_HIGH_TIME_2_CYCLE) || \
+ ((CSHTIME) == QSPI_CS_HIGH_TIME_3_CYCLE) || \
+ ((CSHTIME) == QSPI_CS_HIGH_TIME_4_CYCLE) || \
+ ((CSHTIME) == QSPI_CS_HIGH_TIME_5_CYCLE) || \
+ ((CSHTIME) == QSPI_CS_HIGH_TIME_6_CYCLE) || \
+ ((CSHTIME) == QSPI_CS_HIGH_TIME_7_CYCLE) || \
+ ((CSHTIME) == QSPI_CS_HIGH_TIME_8_CYCLE))
+
+#define IS_QSPI_CLOCK_MODE(CLKMODE) (((CLKMODE) == QSPI_CLOCK_MODE_0) || \
+ ((CLKMODE) == QSPI_CLOCK_MODE_3))
+
+#define IS_QSPI_FLASH_ID(FLASH_ID) (((FLASH_ID) == QSPI_FLASH_ID_1) || \
+ ((FLASH_ID) == QSPI_FLASH_ID_2))
+
+#define IS_QSPI_DUAL_FLASH_MODE(MODE) (((MODE) == QSPI_DUALFLASH_ENABLE) || \
+ ((MODE) == QSPI_DUALFLASH_DISABLE))
+
+#define IS_QSPI_INSTRUCTION(INSTRUCTION) ((INSTRUCTION) <= 0xFFU)
+
+#define IS_QSPI_ADDRESS_SIZE(ADDR_SIZE) (((ADDR_SIZE) == QSPI_ADDRESS_8_BITS) || \
+ ((ADDR_SIZE) == QSPI_ADDRESS_16_BITS) || \
+ ((ADDR_SIZE) == QSPI_ADDRESS_24_BITS) || \
+ ((ADDR_SIZE) == QSPI_ADDRESS_32_BITS))
+
+#define IS_QSPI_ALTERNATE_BYTES_SIZE(SIZE) (((SIZE) == QSPI_ALTERNATE_BYTES_8_BITS) || \
+ ((SIZE) == QSPI_ALTERNATE_BYTES_16_BITS) || \
+ ((SIZE) == QSPI_ALTERNATE_BYTES_24_BITS) || \
+ ((SIZE) == QSPI_ALTERNATE_BYTES_32_BITS))
+
+#define IS_QSPI_DUMMY_CYCLES(DCY) ((DCY) <= 31U)
+
+#define IS_QSPI_INSTRUCTION_MODE(MODE) (((MODE) == QSPI_INSTRUCTION_NONE) || \
+ ((MODE) == QSPI_INSTRUCTION_1_LINE) || \
+ ((MODE) == QSPI_INSTRUCTION_2_LINES) || \
+ ((MODE) == QSPI_INSTRUCTION_4_LINES))
+
+#define IS_QSPI_ADDRESS_MODE(MODE) (((MODE) == QSPI_ADDRESS_NONE) || \
+ ((MODE) == QSPI_ADDRESS_1_LINE) || \
+ ((MODE) == QSPI_ADDRESS_2_LINES) || \
+ ((MODE) == QSPI_ADDRESS_4_LINES))
+
+#define IS_QSPI_ALTERNATE_BYTES_MODE(MODE) (((MODE) == QSPI_ALTERNATE_BYTES_NONE) || \
+ ((MODE) == QSPI_ALTERNATE_BYTES_1_LINE) || \
+ ((MODE) == QSPI_ALTERNATE_BYTES_2_LINES) || \
+ ((MODE) == QSPI_ALTERNATE_BYTES_4_LINES))
+
+#define IS_QSPI_DATA_MODE(MODE) (((MODE) == QSPI_DATA_NONE) || \
+ ((MODE) == QSPI_DATA_1_LINE) || \
+ ((MODE) == QSPI_DATA_2_LINES) || \
+ ((MODE) == QSPI_DATA_4_LINES))
+
+#define IS_QSPI_DDR_MODE(DDR_MODE) (((DDR_MODE) == QSPI_DDR_MODE_DISABLE) || \
+ ((DDR_MODE) == QSPI_DDR_MODE_ENABLE))
+
+#define IS_QSPI_DDR_HHC(DDR_HHC) (((DDR_HHC) == QSPI_DDR_HHC_ANALOG_DELAY) || \
+ ((DDR_HHC) == QSPI_DDR_HHC_HALF_CLK_DELAY))
+
+#define IS_QSPI_SIOO_MODE(SIOO_MODE) (((SIOO_MODE) == QSPI_SIOO_INST_EVERY_CMD) || \
+ ((SIOO_MODE) == QSPI_SIOO_INST_ONLY_FIRST_CMD))
+
+#define IS_QSPI_INTERVAL(INTERVAL) ((INTERVAL) <= QUADSPI_PIR_INTERVAL)
+
+#define IS_QSPI_STATUS_BYTES_SIZE(SIZE) (((SIZE) >= 1U) && ((SIZE) <= 4U))
+
+#define IS_QSPI_MATCH_MODE(MODE) (((MODE) == QSPI_MATCH_MODE_AND) || \
+ ((MODE) == QSPI_MATCH_MODE_OR))
+
+#define IS_QSPI_AUTOMATIC_STOP(APMS) (((APMS) == QSPI_AUTOMATIC_STOP_DISABLE) || \
+ ((APMS) == QSPI_AUTOMATIC_STOP_ENABLE))
+
+#define IS_QSPI_TIMEOUT_ACTIVATION(TCEN) (((TCEN) == QSPI_TIMEOUT_COUNTER_DISABLE) || \
+ ((TCEN) == QSPI_TIMEOUT_COUNTER_ENABLE))
+
+#define IS_QSPI_TIMEOUT_PERIOD(PERIOD) ((PERIOD) <= 0xFFFFU)
+/**
+* @}
+*/
+/* End of private macros -----------------------------------------------------*/
+
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
+
+#endif /* defined(QUADSPI) */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* STM32H7xx_HAL_QSPI_H */
diff --git a/cubemx/Drivers/STM32H7xx_HAL_Driver/Src/stm32h7xx_hal_qspi.c b/cubemx/Drivers/STM32H7xx_HAL_Driver/Src/stm32h7xx_hal_qspi.c
new file mode 100644
index 0000000..9f177da
--- /dev/null
+++ b/cubemx/Drivers/STM32H7xx_HAL_Driver/Src/stm32h7xx_hal_qspi.c
@@ -0,0 +1,2683 @@
+/**
+ ******************************************************************************
+ * @file stm32h7xx_hal_qspi.c
+ * @author MCD Application Team
+ * @brief QSPI HAL module driver.
+ * This file provides firmware functions to manage the following
+ * functionalities of the QuadSPI interface (QSPI).
+ * + Initialization and de-initialization functions
+ * + Indirect functional mode management
+ * + Memory-mapped functional mode management
+ * + Auto-polling functional mode management
+ * + Interrupts and flags management
+ * + MDMA channel configuration for indirect functional mode
+ * + Errors management and abort functionality
+ *
+ *
+ ******************************************************************************
+ * @attention
+ *
+ * Copyright (c) 2017 STMicroelectronics.
+ * All rights reserved.
+ *
+ * This software is licensed under terms that can be found in the LICENSE file
+ * in the root directory of this software component.
+ * If no LICENSE file comes with this software, it is provided AS-IS.
+ *
+ ******************************************************************************
+ @verbatim
+ ===============================================================================
+ ##### How to use this driver #####
+ ===============================================================================
+ [..]
+ *** Initialization ***
+ ======================
+ [..]
+ (#) As prerequisite, fill in the HAL_QSPI_MspInit() :
+ (++) Enable QuadSPI clock interface with __HAL_RCC_QSPI_CLK_ENABLE().
+ (++) Reset QuadSPI Peripheral with __HAL_RCC_QSPI_FORCE_RESET() and __HAL_RCC_QSPI_RELEASE_RESET().
+ (++) Enable the clocks for the QuadSPI GPIOS with __HAL_RCC_GPIOx_CLK_ENABLE().
+ (++) Configure these QuadSPI pins in alternate mode using HAL_GPIO_Init().
+ (++) If interrupt mode is used, enable and configure QuadSPI global
+ interrupt with HAL_NVIC_SetPriority() and HAL_NVIC_EnableIRQ().
+ (++) If DMA mode is used, enable the clocks for the QuadSPI MDMA
+ with __HAL_RCC_MDMA_CLK_ENABLE(), configure MDMA with HAL_MDMA_Init(),
+ link it with QuadSPI handle using __HAL_LINKDMA(), enable and configure
+ MDMA global interrupt with HAL_NVIC_SetPriority() and HAL_NVIC_EnableIRQ().
+ (#) Configure the flash size, the clock prescaler, the fifo threshold, the
+ clock mode, the sample shifting and the CS high time using the HAL_QSPI_Init() function.
+
+ *** Indirect functional mode ***
+ ================================
+ [..]
+ (#) Configure the command sequence using the HAL_QSPI_Command() or HAL_QSPI_Command_IT()
+ functions :
+ (++) Instruction phase : the mode used and if present the instruction opcode.
+ (++) Address phase : the mode used and if present the size and the address value.
+ (++) Alternate-bytes phase : the mode used and if present the size and the alternate
+ bytes values.
+ (++) Dummy-cycles phase : the number of dummy cycles (mode used is same as data phase).
+ (++) Data phase : the mode used and if present the number of bytes.
+ (++) Double Data Rate (DDR) mode : the activation (or not) of this mode and the delay
+ if activated.
+ (++) Sending Instruction Only Once (SIOO) mode : the activation (or not) of this mode.
+ (#) If no data is required for the command, it is sent directly to the memory :
+ (++) In polling mode, the output of the function is done when the transfer is complete.
+ (++) In interrupt mode, HAL_QSPI_CmdCpltCallback() will be called when the transfer is complete.
+ (#) For the indirect write mode, use HAL_QSPI_Transmit(), HAL_QSPI_Transmit_DMA() or
+ HAL_QSPI_Transmit_IT() after the command configuration :
+ (++) In polling mode, the output of the function is done when the transfer is complete.
+ (++) In interrupt mode, HAL_QSPI_FifoThresholdCallback() will be called when the fifo threshold
+ is reached and HAL_QSPI_TxCpltCallback() will be called when the transfer is complete.
+ (++) In DMA mode,HAL_QSPI_TxCpltCallback() will be called when the transfer is complete.
+ (#) For the indirect read mode, use HAL_QSPI_Receive(), HAL_QSPI_Receive_DMA() or
+ HAL_QSPI_Receive_IT() after the command configuration :
+ (++) In polling mode, the output of the function is done when the transfer is complete.
+ (++) In interrupt mode, HAL_QSPI_FifoThresholdCallback() will be called when the fifo threshold
+ is reached and HAL_QSPI_RxCpltCallback() will be called when the transfer is complete.
+ (++) In DMA mode,HAL_QSPI_RxCpltCallback() will be called when the transfer is complete.
+
+ *** Auto-polling functional mode ***
+ ====================================
+ [..]
+ (#) Configure the command sequence and the auto-polling functional mode using the
+ HAL_QSPI_AutoPolling() or HAL_QSPI_AutoPolling_IT() functions :
+ (++) Instruction phase : the mode used and if present the instruction opcode.
+ (++) Address phase : the mode used and if present the size and the address value.
+ (++) Alternate-bytes phase : the mode used and if present the size and the alternate
+ bytes values.
+ (++) Dummy-cycles phase : the number of dummy cycles (mode used is same as data phase).
+ (++) Data phase : the mode used.
+ (++) Double Data Rate (DDR) mode : the activation (or not) of this mode and the delay
+ if activated.
+ (++) Sending Instruction Only Once (SIOO) mode : the activation (or not) of this mode.
+ (++) The size of the status bytes, the match value, the mask used, the match mode (OR/AND),
+ the polling interval and the automatic stop activation.
+ (#) After the configuration :
+ (++) In polling mode, the output of the function is done when the status match is reached. The
+ automatic stop is activated to avoid an infinite loop.
+ (++) In interrupt mode, HAL_QSPI_StatusMatchCallback() will be called each time the status match is reached.
+
+ *** MDMA functional mode ***
+ ====================================
+ [..]
+ (#) Configure the SourceInc and DestinationInc of MDMA parameters in the HAL_QSPI_MspInit() function :
+ (++) MDMA settings for write operation :
+ (+) The DestinationInc should be MDMA_DEST_INC_DISABLE
+ (+) The SourceInc must be a value of MDMA_Source_increment_mode (Except the MDMA_SRC_INC_DOUBLEWORD).
+ (+) The SourceDataSize must be a value of MDMA Source data size (Except the MDMA_SRC_DATASIZE_DOUBLEWORD)
+ aligned with MDMA_Source_increment_mode .
+ (+) The DestDataSize must be a value of MDMA Destination data size (Except the MDMA_DEST_DATASIZE_DOUBLEWORD)
+ (++) MDMA settings for read operation :
+ (+) The SourceInc should be MDMA_SRC_INC_DISABLE
+ (+) The DestinationInc must be a value of MDMA_Destination_increment_mode (Except the MDMA_DEST_INC_DOUBLEWORD).
+ (+) The SourceDataSize must be a value of MDMA Source data size (Except the MDMA_SRC_DATASIZE_DOUBLEWORD) .
+ (+) The DestDataSize must be a value of MDMA Destination data size (Except the MDMA_DEST_DATASIZE_DOUBLEWORD)
+ aligned with MDMA_Destination_increment_mode.
+ (++)The buffer Transfer Length (BufferTransferLength) = number of bytes in the FIFO (FifoThreshold) of the Quadspi.
+ (#)In case of wrong MDMA setting
+ (++) For write operation :
+ (+) If the DestinationInc is different to MDMA_DEST_INC_DISABLE , it will be disabled by the HAL_QSPI_Transmit_DMA().
+ (++) For read operation :
+ (+) If the SourceInc is not set to MDMA_SRC_INC_DISABLE , it will be disabled by the HAL_QSPI_Receive_DMA().
+
+ *** Memory-mapped functional mode ***
+ =====================================
+ [..]
+ (#) Configure the command sequence and the memory-mapped functional mode using the
+ HAL_QSPI_MemoryMapped() functions :
+ (++) Instruction phase : the mode used and if present the instruction opcode.
+ (++) Address phase : the mode used and the size.
+ (++) Alternate-bytes phase : the mode used and if present the size and the alternate
+ bytes values.
+ (++) Dummy-cycles phase : the number of dummy cycles (mode used is same as data phase).
+ (++) Data phase : the mode used.
+ (++) Double Data Rate (DDR) mode : the activation (or not) of this mode and the delay
+ if activated.
+ (++) Sending Instruction Only Once (SIOO) mode : the activation (or not) of this mode.
+ (++) The timeout activation and the timeout period.
+ (#) After the configuration, the QuadSPI will be used as soon as an access on the AHB is done on
+ the address range. HAL_QSPI_TimeOutCallback() will be called when the timeout expires.
+
+ *** Errors management and abort functionality ***
+ =================================================
+ [..]
+ (#) HAL_QSPI_GetError() function gives the error raised during the last operation.
+ (#) HAL_QSPI_Abort() and HAL_QSPI_Abort_IT() functions aborts any on-going operation and
+ flushes the fifo :
+ (++) In polling mode, the output of the function is done when the transfer
+ complete bit is set and the busy bit cleared.
+ (++) In interrupt mode, HAL_QSPI_AbortCpltCallback() will be called when
+ the transfer complete bit is set.
+
+ *** Control functions ***
+ =========================
+ [..]
+ (#) HAL_QSPI_GetState() function gives the current state of the HAL QuadSPI driver.
+ (#) HAL_QSPI_SetTimeout() function configures the timeout value used in the driver.
+ (#) HAL_QSPI_SetFifoThreshold() function configures the threshold on the Fifo of the QSPI IP.
+ (#) HAL_QSPI_GetFifoThreshold() function gives the current of the Fifo's threshold
+ (#) HAL_QSPI_SetFlashID() function configures the index of the flash memory to be accessed.
+
+ *** Callback registration ***
+ =============================================
+ [..]
+ The compilation define USE_HAL_QSPI_REGISTER_CALLBACKS when set to 1
+ allows the user to configure dynamically the driver callbacks.
+
+ Use Functions HAL_QSPI_RegisterCallback() to register a user callback,
+ it allows to register following callbacks:
+ (+) ErrorCallback : callback when error occurs.
+ (+) AbortCpltCallback : callback when abort is completed.
+ (+) FifoThresholdCallback : callback when the fifo threshold is reached.
+ (+) CmdCpltCallback : callback when a command without data is completed.
+ (+) RxCpltCallback : callback when a reception transfer is completed.
+ (+) TxCpltCallback : callback when a transmission transfer is completed.
+ (+) StatusMatchCallback : callback when a status match occurs.
+ (+) TimeOutCallback : callback when the timeout perioed expires.
+ (+) MspInitCallback : QSPI MspInit.
+ (+) MspDeInitCallback : QSPI MspDeInit.
+ This function takes as parameters the HAL peripheral handle, the Callback ID
+ and a pointer to the user callback function.
+
+ Use function HAL_QSPI_UnRegisterCallback() to reset a callback to the default
+ weak (overridden) function. It allows to reset following callbacks:
+ (+) ErrorCallback : callback when error occurs.
+ (+) AbortCpltCallback : callback when abort is completed.
+ (+) FifoThresholdCallback : callback when the fifo threshold is reached.
+ (+) CmdCpltCallback : callback when a command without data is completed.
+ (+) RxCpltCallback : callback when a reception transfer is completed.
+ (+) TxCpltCallback : callback when a transmission transfer is completed.
+ (+) StatusMatchCallback : callback when a status match occurs.
+ (+) TimeOutCallback : callback when the timeout perioed expires.
+ (+) MspInitCallback : QSPI MspInit.
+ (+) MspDeInitCallback : QSPI MspDeInit.
+ This function) takes as parameters the HAL peripheral handle and the Callback ID.
+
+ By default, after the HAL_QSPI_Init and if the state is HAL_QSPI_STATE_RESET
+ all callbacks are reset to the corresponding legacy weak (overridden) functions.
+ Exception done for MspInit and MspDeInit callbacks that are respectively
+ reset to the legacy weak (overridden) functions in the HAL_QSPI_Init
+ and HAL_QSPI_DeInit only when these callbacks are null (not registered beforehand).
+ If not, MspInit or MspDeInit are not null, the HAL_QSPI_Init and HAL_QSPI_DeInit
+ keep and use the user MspInit/MspDeInit callbacks (registered beforehand)
+
+ Callbacks can be registered/unregistered in READY state only.
+ Exception done for MspInit/MspDeInit callbacks that can be registered/unregistered
+ in READY or RESET state, thus registered (user) MspInit/DeInit callbacks can be used
+ during the Init/DeInit.
+ In that case first register the MspInit/MspDeInit user callbacks
+ using HAL_QSPI_RegisterCallback before calling HAL_QSPI_DeInit
+ or HAL_QSPI_Init function.
+
+ When The compilation define USE_HAL_QSPI_REGISTER_CALLBACKS is set to 0 or
+ not defined, the callback registering feature is not available
+ and weak (overridden) callbacks are used.
+
+ *** Workarounds linked to Silicon Limitation ***
+ ====================================================
+ [..]
+ (#) Workarounds Implemented inside HAL Driver
+ (++) Extra data written in the FIFO at the end of a read transfer
+
+ @endverbatim
+ ******************************************************************************
+ */
+
+/* Includes ------------------------------------------------------------------*/
+#include "stm32h7xx_hal.h"
+
+#if defined(QUADSPI)
+
+/** @addtogroup STM32H7xx_HAL_Driver
+ * @{
+ */
+
+/** @defgroup QSPI QSPI
+ * @brief QSPI HAL module driver
+ * @{
+ */
+#ifdef HAL_QSPI_MODULE_ENABLED
+
+/* Private typedef -----------------------------------------------------------*/
+
+/* Private define ------------------------------------------------------------*/
+/** @defgroup QSPI_Private_Constants QSPI Private Constants
+ * @{
+ */
+#define QSPI_FUNCTIONAL_MODE_INDIRECT_WRITE 0x00000000U /*!Instance));
+ assert_param(IS_QSPI_CLOCK_PRESCALER(hqspi->Init.ClockPrescaler));
+ assert_param(IS_QSPI_FIFO_THRESHOLD(hqspi->Init.FifoThreshold));
+ assert_param(IS_QSPI_SSHIFT(hqspi->Init.SampleShifting));
+ assert_param(IS_QSPI_FLASH_SIZE(hqspi->Init.FlashSize));
+ assert_param(IS_QSPI_CS_HIGH_TIME(hqspi->Init.ChipSelectHighTime));
+ assert_param(IS_QSPI_CLOCK_MODE(hqspi->Init.ClockMode));
+ assert_param(IS_QSPI_DUAL_FLASH_MODE(hqspi->Init.DualFlash));
+
+ if (hqspi->Init.DualFlash != QSPI_DUALFLASH_ENABLE )
+ {
+ assert_param(IS_QSPI_FLASH_ID(hqspi->Init.FlashID));
+ }
+
+ if(hqspi->State == HAL_QSPI_STATE_RESET)
+ {
+
+#if (USE_HAL_QSPI_REGISTER_CALLBACKS == 1)
+ /* Reset Callback pointers in HAL_QSPI_STATE_RESET only */
+ hqspi->ErrorCallback = HAL_QSPI_ErrorCallback;
+ hqspi->AbortCpltCallback = HAL_QSPI_AbortCpltCallback;
+ hqspi->FifoThresholdCallback = HAL_QSPI_FifoThresholdCallback;
+ hqspi->CmdCpltCallback = HAL_QSPI_CmdCpltCallback;
+ hqspi->RxCpltCallback = HAL_QSPI_RxCpltCallback;
+ hqspi->TxCpltCallback = HAL_QSPI_TxCpltCallback;
+ hqspi->StatusMatchCallback = HAL_QSPI_StatusMatchCallback;
+ hqspi->TimeOutCallback = HAL_QSPI_TimeOutCallback;
+
+ if(hqspi->MspInitCallback == NULL)
+ {
+ hqspi->MspInitCallback = HAL_QSPI_MspInit;
+ }
+
+ /* Init the low level hardware */
+ hqspi->MspInitCallback(hqspi);
+#else
+ /* Init the low level hardware : GPIO, CLOCK */
+ HAL_QSPI_MspInit(hqspi);
+#endif
+
+ /* Configure the default timeout for the QSPI memory access */
+ HAL_QSPI_SetTimeout(hqspi, HAL_QSPI_TIMEOUT_DEFAULT_VALUE);
+ }
+
+ /* Configure QSPI FIFO Threshold */
+ MODIFY_REG(hqspi->Instance->CR, QUADSPI_CR_FTHRES,
+ ((hqspi->Init.FifoThreshold - 1U) << QUADSPI_CR_FTHRES_Pos));
+
+ /* Wait till BUSY flag reset */
+ status = QSPI_WaitFlagStateUntilTimeout(hqspi, QSPI_FLAG_BUSY, RESET, tickstart, hqspi->Timeout);
+
+ if(status == HAL_OK)
+ {
+ /* Configure QSPI Clock Prescaler and Sample Shift */
+ MODIFY_REG(hqspi->Instance->CR, (QUADSPI_CR_PRESCALER | QUADSPI_CR_SSHIFT | QUADSPI_CR_FSEL | QUADSPI_CR_DFM),
+ ((hqspi->Init.ClockPrescaler << QUADSPI_CR_PRESCALER_Pos) |
+ hqspi->Init.SampleShifting | hqspi->Init.FlashID | hqspi->Init.DualFlash));
+
+ /* Configure QSPI Flash Size, CS High Time and Clock Mode */
+ MODIFY_REG(hqspi->Instance->DCR, (QUADSPI_DCR_FSIZE | QUADSPI_DCR_CSHT | QUADSPI_DCR_CKMODE),
+ ((hqspi->Init.FlashSize << QUADSPI_DCR_FSIZE_Pos) |
+ hqspi->Init.ChipSelectHighTime | hqspi->Init.ClockMode));
+
+ /* Enable the QSPI peripheral */
+ __HAL_QSPI_ENABLE(hqspi);
+
+ /* Set QSPI error code to none */
+ hqspi->ErrorCode = HAL_QSPI_ERROR_NONE;
+
+ /* Initialize the QSPI state */
+ hqspi->State = HAL_QSPI_STATE_READY;
+ }
+
+ /* Return function status */
+ return status;
+}
+
+/**
+ * @brief De-Initialize the QSPI peripheral.
+ * @param hqspi QSPI handle
+ * @retval HAL status
+ */
+HAL_StatusTypeDef HAL_QSPI_DeInit(QSPI_HandleTypeDef *hqspi)
+{
+ /* Check the QSPI handle allocation */
+ if(hqspi == NULL)
+ {
+ return HAL_ERROR;
+ }
+
+ /* Disable the QSPI Peripheral Clock */
+ __HAL_QSPI_DISABLE(hqspi);
+
+#if (USE_HAL_QSPI_REGISTER_CALLBACKS == 1)
+ if(hqspi->MspDeInitCallback == NULL)
+ {
+ hqspi->MspDeInitCallback = HAL_QSPI_MspDeInit;
+ }
+
+ /* DeInit the low level hardware */
+ hqspi->MspDeInitCallback(hqspi);
+#else
+ /* DeInit the low level hardware: GPIO, CLOCK, NVIC... */
+ HAL_QSPI_MspDeInit(hqspi);
+#endif
+
+ /* Set QSPI error code to none */
+ hqspi->ErrorCode = HAL_QSPI_ERROR_NONE;
+
+ /* Initialize the QSPI state */
+ hqspi->State = HAL_QSPI_STATE_RESET;
+
+ return HAL_OK;
+}
+
+/**
+ * @brief Initialize the QSPI MSP.
+ * @param hqspi QSPI handle
+ * @retval None
+ */
+__weak void HAL_QSPI_MspInit(QSPI_HandleTypeDef *hqspi)
+{
+ /* Prevent unused argument(s) compilation warning */
+ UNUSED(hqspi);
+
+ /* NOTE : This function should not be modified, when the callback is needed,
+ the HAL_QSPI_MspInit can be implemented in the user file
+ */
+}
+
+/**
+ * @brief DeInitialize the QSPI MSP.
+ * @param hqspi QSPI handle
+ * @retval None
+ */
+__weak void HAL_QSPI_MspDeInit(QSPI_HandleTypeDef *hqspi)
+{
+ /* Prevent unused argument(s) compilation warning */
+ UNUSED(hqspi);
+
+ /* NOTE : This function should not be modified, when the callback is needed,
+ the HAL_QSPI_MspDeInit can be implemented in the user file
+ */
+}
+
+/**
+ * @}
+ */
+
+/** @defgroup QSPI_Exported_Functions_Group2 Input and Output operation functions
+ * @brief QSPI Transmit/Receive functions
+ *
+@verbatim
+ ===============================================================================
+ ##### IO operation functions #####
+ ===============================================================================
+ [..]
+ This subsection provides a set of functions allowing to :
+ (+) Handle the interrupts.
+ (+) Handle the command sequence.
+ (+) Transmit data in blocking, interrupt or DMA mode.
+ (+) Receive data in blocking, interrupt or DMA mode.
+ (+) Manage the auto-polling functional mode.
+ (+) Manage the memory-mapped functional mode.
+
+@endverbatim
+ * @{
+ */
+
+/**
+ * @brief Handle QSPI interrupt request.
+ * @param hqspi QSPI handle
+ * @retval None
+ */
+void HAL_QSPI_IRQHandler(QSPI_HandleTypeDef *hqspi)
+{
+ __IO uint32_t *data_reg;
+ uint32_t flag = READ_REG(hqspi->Instance->SR);
+ uint32_t itsource = READ_REG(hqspi->Instance->CR);
+
+ /* QSPI Fifo Threshold interrupt occurred ----------------------------------*/
+ if(((flag & QSPI_FLAG_FT) != 0U) && ((itsource & QSPI_IT_FT) != 0U))
+ {
+ data_reg = &hqspi->Instance->DR;
+
+ if(hqspi->State == HAL_QSPI_STATE_BUSY_INDIRECT_TX)
+ {
+ /* Transmission process */
+ while(__HAL_QSPI_GET_FLAG(hqspi, QSPI_FLAG_FT) != RESET)
+ {
+ if (hqspi->TxXferCount > 0U)
+ {
+ /* Fill the FIFO until the threshold is reached */
+ *((__IO uint8_t *)data_reg) = *hqspi->pTxBuffPtr;
+ hqspi->pTxBuffPtr++;
+ hqspi->TxXferCount--;
+ }
+ else
+ {
+ /* No more data available for the transfer */
+ /* Disable the QSPI FIFO Threshold Interrupt */
+ __HAL_QSPI_DISABLE_IT(hqspi, QSPI_IT_FT);
+ break;
+ }
+ }
+ }
+ else if(hqspi->State == HAL_QSPI_STATE_BUSY_INDIRECT_RX)
+ {
+ /* Receiving Process */
+ while(__HAL_QSPI_GET_FLAG(hqspi, QSPI_FLAG_FT) != RESET)
+ {
+ if (hqspi->RxXferCount > 0U)
+ {
+ /* Read the FIFO until the threshold is reached */
+ *hqspi->pRxBuffPtr = *((__IO uint8_t *)data_reg);
+ hqspi->pRxBuffPtr++;
+ hqspi->RxXferCount--;
+ }
+ else
+ {
+ /* All data have been received for the transfer */
+ /* Disable the QSPI FIFO Threshold Interrupt */
+ __HAL_QSPI_DISABLE_IT(hqspi, QSPI_IT_FT);
+ break;
+ }
+ }
+ }
+ else
+ {
+ /* Nothing to do */
+ }
+
+ /* FIFO Threshold callback */
+#if (USE_HAL_QSPI_REGISTER_CALLBACKS == 1)
+ hqspi->FifoThresholdCallback(hqspi);
+#else
+ HAL_QSPI_FifoThresholdCallback(hqspi);
+#endif
+ }
+
+ /* QSPI Transfer Complete interrupt occurred -------------------------------*/
+ else if(((flag & QSPI_FLAG_TC) != 0U) && ((itsource & QSPI_IT_TC) != 0U))
+ {
+ /* Clear interrupt */
+ WRITE_REG(hqspi->Instance->FCR, QSPI_FLAG_TC);
+
+ /* Disable the QSPI FIFO Threshold, Transfer Error and Transfer complete Interrupts */
+ __HAL_QSPI_DISABLE_IT(hqspi, QSPI_IT_TC | QSPI_IT_TE | QSPI_IT_FT);
+
+ /* Transfer complete callback */
+ if(hqspi->State == HAL_QSPI_STATE_BUSY_INDIRECT_TX)
+ {
+ if ((hqspi->Instance->CR & QUADSPI_CR_DMAEN) != 0U)
+ {
+ /* Disable using MDMA by clearing DMAEN, note that DMAEN bit is "reserved"
+ but no impact on H7 HW and it minimize the cost in the footprint */
+ CLEAR_BIT(hqspi->Instance->CR, QUADSPI_CR_DMAEN);
+
+ /* Disable the MDMA channel */
+ __HAL_MDMA_DISABLE(hqspi->hmdma);
+ }
+
+
+ /* Change state of QSPI */
+ hqspi->State = HAL_QSPI_STATE_READY;
+
+ /* TX Complete callback */
+#if (USE_HAL_QSPI_REGISTER_CALLBACKS == 1)
+ hqspi->TxCpltCallback(hqspi);
+#else
+ HAL_QSPI_TxCpltCallback(hqspi);
+#endif
+ }
+ else if(hqspi->State == HAL_QSPI_STATE_BUSY_INDIRECT_RX)
+ {
+ if ((hqspi->Instance->CR & QUADSPI_CR_DMAEN) != 0U)
+ {
+ /* Disable using MDMA by clearing DMAEN, note that DMAEN bit is "reserved"
+ but no impact on H7 HW and it minimize the cost in the footprint */
+ CLEAR_BIT(hqspi->Instance->CR, QUADSPI_CR_DMAEN);
+
+ /* Disable the MDMA channel */
+ __HAL_MDMA_DISABLE(hqspi->hmdma);
+ }
+ else
+ {
+ data_reg = &hqspi->Instance->DR;
+ while(READ_BIT(hqspi->Instance->SR, QUADSPI_SR_FLEVEL) != 0U)
+ {
+ if (hqspi->RxXferCount > 0U)
+ {
+ /* Read the last data received in the FIFO until it is empty */
+ *hqspi->pRxBuffPtr = *((__IO uint8_t *)data_reg);
+ hqspi->pRxBuffPtr++;
+ hqspi->RxXferCount--;
+ }
+ else
+ {
+ /* All data have been received for the transfer */
+ break;
+ }
+ }
+ }
+
+
+ /* Change state of QSPI */
+ hqspi->State = HAL_QSPI_STATE_READY;
+
+ /* RX Complete callback */
+#if (USE_HAL_QSPI_REGISTER_CALLBACKS == 1)
+ hqspi->RxCpltCallback(hqspi);
+#else
+ HAL_QSPI_RxCpltCallback(hqspi);
+#endif
+ }
+ else if(hqspi->State == HAL_QSPI_STATE_BUSY)
+ {
+ /* Change state of QSPI */
+ hqspi->State = HAL_QSPI_STATE_READY;
+
+ /* Command Complete callback */
+#if (USE_HAL_QSPI_REGISTER_CALLBACKS == 1)
+ hqspi->CmdCpltCallback(hqspi);
+#else
+ HAL_QSPI_CmdCpltCallback(hqspi);
+#endif
+ }
+ else if(hqspi->State == HAL_QSPI_STATE_ABORT)
+ {
+ /* Reset functional mode configuration to indirect write mode by default */
+ CLEAR_BIT(hqspi->Instance->CCR, QUADSPI_CCR_FMODE);
+
+ /* Change state of QSPI */
+ hqspi->State = HAL_QSPI_STATE_READY;
+
+ if (hqspi->ErrorCode == HAL_QSPI_ERROR_NONE)
+ {
+ /* Abort called by the user */
+
+ /* Abort Complete callback */
+#if (USE_HAL_QSPI_REGISTER_CALLBACKS == 1)
+ hqspi->AbortCpltCallback(hqspi);
+#else
+ HAL_QSPI_AbortCpltCallback(hqspi);
+#endif
+ }
+ else
+ {
+ /* Abort due to an error (eg : MDMA error) */
+
+ /* Error callback */
+#if (USE_HAL_QSPI_REGISTER_CALLBACKS == 1)
+ hqspi->ErrorCallback(hqspi);
+#else
+ HAL_QSPI_ErrorCallback(hqspi);
+#endif
+ }
+ }
+ else
+ {
+ /* Nothing to do */
+ }
+ }
+
+ /* QSPI Status Match interrupt occurred ------------------------------------*/
+ else if(((flag & QSPI_FLAG_SM) != 0U) && ((itsource & QSPI_IT_SM) != 0U))
+ {
+ /* Clear interrupt */
+ WRITE_REG(hqspi->Instance->FCR, QSPI_FLAG_SM);
+
+ /* Check if the automatic poll mode stop is activated */
+ if(READ_BIT(hqspi->Instance->CR, QUADSPI_CR_APMS) != 0U)
+ {
+ /* Disable the QSPI Transfer Error and Status Match Interrupts */
+ __HAL_QSPI_DISABLE_IT(hqspi, (QSPI_IT_SM | QSPI_IT_TE));
+
+ /* Change state of QSPI */
+ hqspi->State = HAL_QSPI_STATE_READY;
+ }
+
+ /* Status match callback */
+#if (USE_HAL_QSPI_REGISTER_CALLBACKS == 1)
+ hqspi->StatusMatchCallback(hqspi);
+#else
+ HAL_QSPI_StatusMatchCallback(hqspi);
+#endif
+ }
+
+ /* QSPI Transfer Error interrupt occurred ----------------------------------*/
+ else if(((flag & QSPI_FLAG_TE) != 0U) && ((itsource & QSPI_IT_TE) != 0U))
+ {
+ /* Clear interrupt */
+ WRITE_REG(hqspi->Instance->FCR, QSPI_FLAG_TE);
+
+ /* Disable all the QSPI Interrupts */
+ __HAL_QSPI_DISABLE_IT(hqspi, QSPI_IT_SM | QSPI_IT_TC | QSPI_IT_TE | QSPI_IT_FT);
+
+ /* Set error code */
+ hqspi->ErrorCode |= HAL_QSPI_ERROR_TRANSFER;
+
+ if ((hqspi->Instance->CR & QUADSPI_CR_DMAEN) != 0U)
+ {
+ /* Disable using MDMA by clearing DMAEN, note that DMAEN bit is "reserved"
+ but no impact on H7 HW and it minimize the cost in the footprint */
+ CLEAR_BIT(hqspi->Instance->CR, QUADSPI_CR_DMAEN);
+
+ /* Disable the MDMA channel */
+ hqspi->hmdma->XferAbortCallback = QSPI_DMAAbortCplt;
+ if (HAL_MDMA_Abort_IT(hqspi->hmdma) != HAL_OK)
+ {
+ /* Set error code to DMA */
+ hqspi->ErrorCode |= HAL_QSPI_ERROR_DMA;
+
+ /* Change state of QSPI */
+ hqspi->State = HAL_QSPI_STATE_READY;
+
+ /* Error callback */
+#if (USE_HAL_QSPI_REGISTER_CALLBACKS == 1)
+ hqspi->ErrorCallback(hqspi);
+#else
+ HAL_QSPI_ErrorCallback(hqspi);
+#endif
+ }
+ }
+ else
+ {
+ /* Change state of QSPI */
+ hqspi->State = HAL_QSPI_STATE_READY;
+
+ /* Error callback */
+#if (USE_HAL_QSPI_REGISTER_CALLBACKS == 1)
+ hqspi->ErrorCallback(hqspi);
+#else
+ HAL_QSPI_ErrorCallback(hqspi);
+#endif
+ }
+ }
+
+ /* QSPI Timeout interrupt occurred -----------------------------------------*/
+ else if(((flag & QSPI_FLAG_TO) != 0U) && ((itsource & QSPI_IT_TO) != 0U))
+ {
+ /* Clear interrupt */
+ WRITE_REG(hqspi->Instance->FCR, QSPI_FLAG_TO);
+
+ /* Timeout callback */
+#if (USE_HAL_QSPI_REGISTER_CALLBACKS == 1)
+ hqspi->TimeOutCallback(hqspi);
+#else
+ HAL_QSPI_TimeOutCallback(hqspi);
+#endif
+ }
+
+ else
+ {
+ /* Nothing to do */
+ }
+}
+
+/**
+ * @brief Set the command configuration.
+ * @param hqspi QSPI handle
+ * @param cmd : structure that contains the command configuration information
+ * @param Timeout Timeout duration
+ * @note This function is used only in Indirect Read or Write Modes
+ * @retval HAL status
+ */
+HAL_StatusTypeDef HAL_QSPI_Command(QSPI_HandleTypeDef *hqspi, QSPI_CommandTypeDef *cmd, uint32_t Timeout)
+{
+ HAL_StatusTypeDef status;
+ uint32_t tickstart = HAL_GetTick();
+
+ /* Check the parameters */
+ assert_param(IS_QSPI_INSTRUCTION_MODE(cmd->InstructionMode));
+ if (cmd->InstructionMode != QSPI_INSTRUCTION_NONE)
+ {
+ assert_param(IS_QSPI_INSTRUCTION(cmd->Instruction));
+ }
+
+ assert_param(IS_QSPI_ADDRESS_MODE(cmd->AddressMode));
+ if (cmd->AddressMode != QSPI_ADDRESS_NONE)
+ {
+ assert_param(IS_QSPI_ADDRESS_SIZE(cmd->AddressSize));
+ }
+
+ assert_param(IS_QSPI_ALTERNATE_BYTES_MODE(cmd->AlternateByteMode));
+ if (cmd->AlternateByteMode != QSPI_ALTERNATE_BYTES_NONE)
+ {
+ assert_param(IS_QSPI_ALTERNATE_BYTES_SIZE(cmd->AlternateBytesSize));
+ }
+
+ assert_param(IS_QSPI_DUMMY_CYCLES(cmd->DummyCycles));
+ assert_param(IS_QSPI_DATA_MODE(cmd->DataMode));
+
+ assert_param(IS_QSPI_DDR_MODE(cmd->DdrMode));
+ assert_param(IS_QSPI_DDR_HHC(cmd->DdrHoldHalfCycle));
+ assert_param(IS_QSPI_SIOO_MODE(cmd->SIOOMode));
+
+ /* Process locked */
+ __HAL_LOCK(hqspi);
+
+ if(hqspi->State == HAL_QSPI_STATE_READY)
+ {
+ hqspi->ErrorCode = HAL_QSPI_ERROR_NONE;
+
+ /* Update QSPI state */
+ hqspi->State = HAL_QSPI_STATE_BUSY;
+
+ /* Wait till BUSY flag reset */
+ status = QSPI_WaitFlagStateUntilTimeout(hqspi, QSPI_FLAG_BUSY, RESET, tickstart, Timeout);
+
+ if (status == HAL_OK)
+ {
+ /* Call the configuration function */
+ QSPI_Config(hqspi, cmd, QSPI_FUNCTIONAL_MODE_INDIRECT_WRITE);
+
+ if (cmd->DataMode == QSPI_DATA_NONE)
+ {
+ /* When there is no data phase, the transfer start as soon as the configuration is done
+ so wait until TC flag is set to go back in idle state */
+ status = QSPI_WaitFlagStateUntilTimeout(hqspi, QSPI_FLAG_TC, SET, tickstart, Timeout);
+
+ if (status == HAL_OK)
+ {
+ __HAL_QSPI_CLEAR_FLAG(hqspi, QSPI_FLAG_TC);
+
+ /* Update QSPI state */
+ hqspi->State = HAL_QSPI_STATE_READY;
+ }
+ }
+ else
+ {
+ /* Update QSPI state */
+ hqspi->State = HAL_QSPI_STATE_READY;
+ }
+ }
+ }
+ else
+ {
+ status = HAL_BUSY;
+ }
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+
+ /* Return function status */
+ return status;
+}
+
+/**
+ * @brief Set the command configuration in interrupt mode.
+ * @param hqspi QSPI handle
+ * @param cmd structure that contains the command configuration information
+ * @note This function is used only in Indirect Read or Write Modes
+ * @retval HAL status
+ */
+HAL_StatusTypeDef HAL_QSPI_Command_IT(QSPI_HandleTypeDef *hqspi, QSPI_CommandTypeDef *cmd)
+{
+ HAL_StatusTypeDef status;
+ uint32_t tickstart = HAL_GetTick();
+
+ /* Check the parameters */
+ assert_param(IS_QSPI_INSTRUCTION_MODE(cmd->InstructionMode));
+ if (cmd->InstructionMode != QSPI_INSTRUCTION_NONE)
+ {
+ assert_param(IS_QSPI_INSTRUCTION(cmd->Instruction));
+ }
+
+ assert_param(IS_QSPI_ADDRESS_MODE(cmd->AddressMode));
+ if (cmd->AddressMode != QSPI_ADDRESS_NONE)
+ {
+ assert_param(IS_QSPI_ADDRESS_SIZE(cmd->AddressSize));
+ }
+
+ assert_param(IS_QSPI_ALTERNATE_BYTES_MODE(cmd->AlternateByteMode));
+ if (cmd->AlternateByteMode != QSPI_ALTERNATE_BYTES_NONE)
+ {
+ assert_param(IS_QSPI_ALTERNATE_BYTES_SIZE(cmd->AlternateBytesSize));
+ }
+
+ assert_param(IS_QSPI_DUMMY_CYCLES(cmd->DummyCycles));
+ assert_param(IS_QSPI_DATA_MODE(cmd->DataMode));
+
+ assert_param(IS_QSPI_DDR_MODE(cmd->DdrMode));
+ assert_param(IS_QSPI_DDR_HHC(cmd->DdrHoldHalfCycle));
+ assert_param(IS_QSPI_SIOO_MODE(cmd->SIOOMode));
+
+ /* Process locked */
+ __HAL_LOCK(hqspi);
+
+ if(hqspi->State == HAL_QSPI_STATE_READY)
+ {
+ hqspi->ErrorCode = HAL_QSPI_ERROR_NONE;
+
+ /* Update QSPI state */
+ hqspi->State = HAL_QSPI_STATE_BUSY;
+
+ /* Wait till BUSY flag reset */
+ status = QSPI_WaitFlagStateUntilTimeout(hqspi, QSPI_FLAG_BUSY, RESET, tickstart, hqspi->Timeout);
+
+ if (status == HAL_OK)
+ {
+ if (cmd->DataMode == QSPI_DATA_NONE)
+ {
+ /* Clear interrupt */
+ __HAL_QSPI_CLEAR_FLAG(hqspi, QSPI_FLAG_TE | QSPI_FLAG_TC);
+ }
+
+ /* Call the configuration function */
+ QSPI_Config(hqspi, cmd, QSPI_FUNCTIONAL_MODE_INDIRECT_WRITE);
+
+ if (cmd->DataMode == QSPI_DATA_NONE)
+ {
+ /* When there is no data phase, the transfer start as soon as the configuration is done
+ so activate TC and TE interrupts */
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+
+ /* Enable the QSPI Transfer Error Interrupt */
+ __HAL_QSPI_ENABLE_IT(hqspi, QSPI_IT_TE | QSPI_IT_TC);
+ }
+ else
+ {
+ /* Update QSPI state */
+ hqspi->State = HAL_QSPI_STATE_READY;
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+ }
+ }
+ else
+ {
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+ }
+ }
+ else
+ {
+ status = HAL_BUSY;
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+ }
+
+ /* Return function status */
+ return status;
+}
+
+/**
+ * @brief Transmit an amount of data in blocking mode.
+ * @param hqspi QSPI handle
+ * @param pData pointer to data buffer
+ * @param Timeout Timeout duration
+ * @note This function is used only in Indirect Write Mode
+ * @retval HAL status
+ */
+HAL_StatusTypeDef HAL_QSPI_Transmit(QSPI_HandleTypeDef *hqspi, uint8_t *pData, uint32_t Timeout)
+{
+ HAL_StatusTypeDef status = HAL_OK;
+ uint32_t tickstart = HAL_GetTick();
+ __IO uint32_t *data_reg = &hqspi->Instance->DR;
+
+ /* Process locked */
+ __HAL_LOCK(hqspi);
+
+ if(hqspi->State == HAL_QSPI_STATE_READY)
+ {
+ hqspi->ErrorCode = HAL_QSPI_ERROR_NONE;
+
+ if(pData != NULL )
+ {
+ /* Update state */
+ hqspi->State = HAL_QSPI_STATE_BUSY_INDIRECT_TX;
+
+ /* Configure counters and size of the handle */
+ hqspi->TxXferCount = READ_REG(hqspi->Instance->DLR) + 1U;
+ hqspi->TxXferSize = READ_REG(hqspi->Instance->DLR) + 1U;
+ hqspi->pTxBuffPtr = pData;
+
+ /* Configure QSPI: CCR register with functional as indirect write */
+ MODIFY_REG(hqspi->Instance->CCR, QUADSPI_CCR_FMODE, QSPI_FUNCTIONAL_MODE_INDIRECT_WRITE);
+
+ while(hqspi->TxXferCount > 0U)
+ {
+ /* Wait until FT flag is set to send data */
+ status = QSPI_WaitFlagStateUntilTimeout(hqspi, QSPI_FLAG_FT, SET, tickstart, Timeout);
+
+ if (status != HAL_OK)
+ {
+ break;
+ }
+
+ *((__IO uint8_t *)data_reg) = *hqspi->pTxBuffPtr;
+ hqspi->pTxBuffPtr++;
+ hqspi->TxXferCount--;
+ }
+
+ if (status == HAL_OK)
+ {
+ /* Wait until TC flag is set to go back in idle state */
+ status = QSPI_WaitFlagStateUntilTimeout(hqspi, QSPI_FLAG_TC, SET, tickstart, Timeout);
+
+ if (status == HAL_OK)
+ {
+ /* Clear Transfer Complete bit */
+ __HAL_QSPI_CLEAR_FLAG(hqspi, QSPI_FLAG_TC);
+
+ }
+ }
+
+ /* Update QSPI state */
+ hqspi->State = HAL_QSPI_STATE_READY;
+ }
+ else
+ {
+ hqspi->ErrorCode |= HAL_QSPI_ERROR_INVALID_PARAM;
+ status = HAL_ERROR;
+ }
+ }
+ else
+ {
+ status = HAL_BUSY;
+ }
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+
+ return status;
+}
+
+
+/**
+ * @brief Receive an amount of data in blocking mode.
+ * @param hqspi QSPI handle
+ * @param pData pointer to data buffer
+ * @param Timeout Timeout duration
+ * @note This function is used only in Indirect Read Mode
+ * @retval HAL status
+ */
+HAL_StatusTypeDef HAL_QSPI_Receive(QSPI_HandleTypeDef *hqspi, uint8_t *pData, uint32_t Timeout)
+{
+ HAL_StatusTypeDef status = HAL_OK;
+ uint32_t tickstart = HAL_GetTick();
+ uint32_t addr_reg = READ_REG(hqspi->Instance->AR);
+ __IO uint32_t *data_reg = &hqspi->Instance->DR;
+
+ /* Process locked */
+ __HAL_LOCK(hqspi);
+
+ if(hqspi->State == HAL_QSPI_STATE_READY)
+ {
+ hqspi->ErrorCode = HAL_QSPI_ERROR_NONE;
+
+ if(pData != NULL )
+ {
+ /* Update state */
+ hqspi->State = HAL_QSPI_STATE_BUSY_INDIRECT_RX;
+
+ /* Configure counters and size of the handle */
+ hqspi->RxXferCount = READ_REG(hqspi->Instance->DLR) + 1U;
+ hqspi->RxXferSize = READ_REG(hqspi->Instance->DLR) + 1U;
+ hqspi->pRxBuffPtr = pData;
+
+ /* Configure QSPI: CCR register with functional as indirect read */
+ MODIFY_REG(hqspi->Instance->CCR, QUADSPI_CCR_FMODE, QSPI_FUNCTIONAL_MODE_INDIRECT_READ);
+
+ /* Start the transfer by re-writing the address in AR register */
+ WRITE_REG(hqspi->Instance->AR, addr_reg);
+
+ while(hqspi->RxXferCount > 0U)
+ {
+ /* Wait until FT or TC flag is set to read received data */
+ status = QSPI_WaitFlagStateUntilTimeout(hqspi, (QSPI_FLAG_FT | QSPI_FLAG_TC), SET, tickstart, Timeout);
+
+ if (status != HAL_OK)
+ {
+ break;
+ }
+
+ *hqspi->pRxBuffPtr = *((__IO uint8_t *)data_reg);
+ hqspi->pRxBuffPtr++;
+ hqspi->RxXferCount--;
+ }
+
+ if (status == HAL_OK)
+ {
+ /* Wait until TC flag is set to go back in idle state */
+ status = QSPI_WaitFlagStateUntilTimeout(hqspi, QSPI_FLAG_TC, SET, tickstart, Timeout);
+
+ if (status == HAL_OK)
+ {
+ /* Clear Transfer Complete bit */
+ __HAL_QSPI_CLEAR_FLAG(hqspi, QSPI_FLAG_TC);
+
+ }
+ }
+
+ /* Update QSPI state */
+ hqspi->State = HAL_QSPI_STATE_READY;
+ }
+ else
+ {
+ hqspi->ErrorCode |= HAL_QSPI_ERROR_INVALID_PARAM;
+ status = HAL_ERROR;
+ }
+ }
+ else
+ {
+ status = HAL_BUSY;
+ }
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+
+ return status;
+}
+
+/**
+ * @brief Send an amount of data in non-blocking mode with interrupt.
+ * @param hqspi QSPI handle
+ * @param pData pointer to data buffer
+ * @note This function is used only in Indirect Write Mode
+ * @retval HAL status
+ */
+HAL_StatusTypeDef HAL_QSPI_Transmit_IT(QSPI_HandleTypeDef *hqspi, uint8_t *pData)
+{
+ HAL_StatusTypeDef status = HAL_OK;
+
+ /* Process locked */
+ __HAL_LOCK(hqspi);
+
+ if(hqspi->State == HAL_QSPI_STATE_READY)
+ {
+ hqspi->ErrorCode = HAL_QSPI_ERROR_NONE;
+
+ if(pData != NULL )
+ {
+ /* Update state */
+ hqspi->State = HAL_QSPI_STATE_BUSY_INDIRECT_TX;
+
+ /* Configure counters and size of the handle */
+ hqspi->TxXferCount = READ_REG(hqspi->Instance->DLR) + 1U;
+ hqspi->TxXferSize = READ_REG(hqspi->Instance->DLR) + 1U;
+ hqspi->pTxBuffPtr = pData;
+
+ /* Clear interrupt */
+ __HAL_QSPI_CLEAR_FLAG(hqspi, QSPI_FLAG_TE | QSPI_FLAG_TC);
+
+ /* Configure QSPI: CCR register with functional as indirect write */
+ MODIFY_REG(hqspi->Instance->CCR, QUADSPI_CCR_FMODE, QSPI_FUNCTIONAL_MODE_INDIRECT_WRITE);
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+
+ /* Enable the QSPI transfer error, FIFO threshold and transfer complete Interrupts */
+ __HAL_QSPI_ENABLE_IT(hqspi, QSPI_IT_TE | QSPI_IT_FT | QSPI_IT_TC);
+ }
+ else
+ {
+ hqspi->ErrorCode |= HAL_QSPI_ERROR_INVALID_PARAM;
+ status = HAL_ERROR;
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+ }
+ }
+ else
+ {
+ status = HAL_BUSY;
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+ }
+
+ return status;
+}
+
+/**
+ * @brief Receive an amount of data in non-blocking mode with interrupt.
+ * @param hqspi QSPI handle
+ * @param pData pointer to data buffer
+ * @note This function is used only in Indirect Read Mode
+ * @retval HAL status
+ */
+HAL_StatusTypeDef HAL_QSPI_Receive_IT(QSPI_HandleTypeDef *hqspi, uint8_t *pData)
+{
+ HAL_StatusTypeDef status = HAL_OK;
+ uint32_t addr_reg = READ_REG(hqspi->Instance->AR);
+
+ /* Process locked */
+ __HAL_LOCK(hqspi);
+
+ if(hqspi->State == HAL_QSPI_STATE_READY)
+ {
+ hqspi->ErrorCode = HAL_QSPI_ERROR_NONE;
+
+ if(pData != NULL )
+ {
+ /* Update state */
+ hqspi->State = HAL_QSPI_STATE_BUSY_INDIRECT_RX;
+
+ /* Configure counters and size of the handle */
+ hqspi->RxXferCount = READ_REG(hqspi->Instance->DLR) + 1U;
+ hqspi->RxXferSize = READ_REG(hqspi->Instance->DLR) + 1U;
+ hqspi->pRxBuffPtr = pData;
+
+ /* Clear interrupt */
+ __HAL_QSPI_CLEAR_FLAG(hqspi, QSPI_FLAG_TE | QSPI_FLAG_TC);
+
+ /* Configure QSPI: CCR register with functional as indirect read */
+ MODIFY_REG(hqspi->Instance->CCR, QUADSPI_CCR_FMODE, QSPI_FUNCTIONAL_MODE_INDIRECT_READ);
+
+ /* Start the transfer by re-writing the address in AR register */
+ WRITE_REG(hqspi->Instance->AR, addr_reg);
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+
+ /* Enable the QSPI transfer error, FIFO threshold and transfer complete Interrupts */
+ __HAL_QSPI_ENABLE_IT(hqspi, QSPI_IT_TE | QSPI_IT_FT | QSPI_IT_TC);
+ }
+ else
+ {
+ hqspi->ErrorCode |= HAL_QSPI_ERROR_INVALID_PARAM;
+ status = HAL_ERROR;
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+ }
+ }
+ else
+ {
+ status = HAL_BUSY;
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+ }
+
+ return status;
+}
+
+/**
+ * @brief Send an amount of data in non-blocking mode with DMA.
+ * @param hqspi QSPI handle
+ * @param pData pointer to data buffer
+ * @note This function is used only in Indirect Write Mode
+ * @retval HAL status
+ */
+HAL_StatusTypeDef HAL_QSPI_Transmit_DMA(QSPI_HandleTypeDef *hqspi, uint8_t *pData)
+{
+ HAL_StatusTypeDef status = HAL_OK;
+ uint32_t data_size = (READ_REG(hqspi->Instance->DLR) + 1U);
+
+ /* Process locked */
+ __HAL_LOCK(hqspi);
+
+ if(hqspi->State == HAL_QSPI_STATE_READY)
+ {
+ /* Clear the error code */
+ hqspi->ErrorCode = HAL_QSPI_ERROR_NONE;
+
+ if(pData != NULL )
+ {
+ /* Configure counters of the handle */
+ hqspi->TxXferCount = data_size;
+
+ /* Update state */
+ hqspi->State = HAL_QSPI_STATE_BUSY_INDIRECT_TX;
+
+ /* Clear interrupt */
+ __HAL_QSPI_CLEAR_FLAG(hqspi, (QSPI_FLAG_TE | QSPI_FLAG_TC));
+
+ /* Configure size and pointer of the handle */
+ hqspi->TxXferSize = hqspi->TxXferCount;
+ hqspi->pTxBuffPtr = pData;
+
+ /* Configure QSPI: CCR register with functional mode as indirect write */
+ MODIFY_REG(hqspi->Instance->CCR, QUADSPI_CCR_FMODE, QSPI_FUNCTIONAL_MODE_INDIRECT_WRITE);
+
+ /* Set the QSPI MDMA transfer complete callback */
+ hqspi->hmdma->XferCpltCallback = QSPI_DMATxCplt;
+
+ /* Set the MDMA error callback */
+ hqspi->hmdma->XferErrorCallback = QSPI_DMAError;
+
+ /* Clear the MDMA abort callback */
+ hqspi->hmdma->XferAbortCallback = NULL;
+
+ /* In Transmit mode , the MDMA destination is the QSPI DR register : Force the MDMA Destination Increment to disable */
+ MODIFY_REG(hqspi->hmdma->Instance->CTCR, (MDMA_CTCR_DINC | MDMA_CTCR_DINCOS) ,MDMA_DEST_INC_DISABLE);
+
+ /* Update MDMA configuration with the correct SourceInc field for Write operation */
+ if (hqspi->hmdma->Init.SourceDataSize == MDMA_SRC_DATASIZE_BYTE)
+ {
+ MODIFY_REG(hqspi->hmdma->Instance->CTCR, (MDMA_CTCR_SINC | MDMA_CTCR_SINCOS) , MDMA_SRC_INC_BYTE);
+ }
+ else if (hqspi->hmdma->Init.SourceDataSize == MDMA_SRC_DATASIZE_HALFWORD)
+ {
+ MODIFY_REG(hqspi->hmdma->Instance->CTCR, (MDMA_CTCR_SINC | MDMA_CTCR_SINCOS) , MDMA_SRC_INC_HALFWORD);
+ }
+ else if (hqspi->hmdma->Init.SourceDataSize == MDMA_SRC_DATASIZE_WORD)
+ {
+ MODIFY_REG(hqspi->hmdma->Instance->CTCR, (MDMA_CTCR_SINC | MDMA_CTCR_SINCOS) , MDMA_SRC_INC_WORD);
+ }
+ else
+ {
+ /* in case of incorrect source data size */
+ hqspi->ErrorCode |= HAL_QSPI_ERROR_DMA;
+ status = HAL_ERROR;
+ }
+
+ /* Enable the QSPI transfer error Interrupt */
+ __HAL_QSPI_ENABLE_IT(hqspi, QSPI_IT_TE);
+
+ /* Use DMAEN bit with no impact on H7 HW to record MDMA transfer request */
+ SET_BIT(hqspi->Instance->CR, QUADSPI_CR_DMAEN);
+
+
+ /* Enable the QSPI transmit MDMA */
+ if (HAL_MDMA_Start_IT(hqspi->hmdma, (uint32_t)pData, (uint32_t)&hqspi->Instance->DR, hqspi->TxXferSize, 1) == HAL_OK)
+ {
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+ }
+ else
+ {
+ /* Clear DMAEN bit with no impact on H7 HW to cancel MDMA transfer request */
+ CLEAR_BIT(hqspi->Instance->CR, QUADSPI_CR_DMAEN);
+
+ status = HAL_ERROR;
+ hqspi->ErrorCode |= HAL_QSPI_ERROR_DMA;
+ hqspi->State = HAL_QSPI_STATE_READY;
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+ }
+ }
+ else
+ {
+ hqspi->ErrorCode |= HAL_QSPI_ERROR_INVALID_PARAM;
+ status = HAL_ERROR;
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+ }
+ }
+ else
+ {
+ status = HAL_BUSY;
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+ }
+
+ return status;
+}
+
+/**
+ * @brief Receive an amount of data in non-blocking mode with DMA.
+ * @param hqspi QSPI handle
+ * @param pData pointer to data buffer.
+ * @note This function is used only in Indirect Read Mode
+ * @retval HAL status
+ */
+HAL_StatusTypeDef HAL_QSPI_Receive_DMA(QSPI_HandleTypeDef *hqspi, uint8_t *pData)
+{
+ HAL_StatusTypeDef status = HAL_OK;
+ uint32_t addr_reg = READ_REG(hqspi->Instance->AR);
+ uint32_t data_size = (READ_REG(hqspi->Instance->DLR) + 1U);
+
+ /* Process locked */
+ __HAL_LOCK(hqspi);
+
+ if(hqspi->State == HAL_QSPI_STATE_READY)
+ {
+ /* Clear the error code */
+ hqspi->ErrorCode = HAL_QSPI_ERROR_NONE;
+
+ if(pData != NULL )
+ {
+ /* Configure counters of the handle */
+ hqspi->RxXferCount = data_size;
+ /* Update state */
+ hqspi->State = HAL_QSPI_STATE_BUSY_INDIRECT_RX;
+
+ /* Clear interrupt */
+ __HAL_QSPI_CLEAR_FLAG(hqspi, (QSPI_FLAG_TE | QSPI_FLAG_TC));
+
+ /* Configure size and pointer of the handle */
+ hqspi->RxXferSize = hqspi->RxXferCount;
+ hqspi->pRxBuffPtr = pData;
+
+ /* Set the QSPI MDMA transfer complete callback */
+ hqspi->hmdma->XferCpltCallback = QSPI_DMARxCplt;
+
+ /* Set the MDMA error callback */
+ hqspi->hmdma->XferErrorCallback = QSPI_DMAError;
+
+ /* Clear the MDMA abort callback */
+ hqspi->hmdma->XferAbortCallback = NULL;
+
+ /* In Receive mode , the MDMA source is the QSPI DR register : Force the MDMA Source Increment to disable */
+ MODIFY_REG(hqspi->hmdma->Instance->CTCR, (MDMA_CTCR_SINC | MDMA_CTCR_SINCOS) , MDMA_SRC_INC_DISABLE);
+
+ /* Update MDMA configuration with the correct DestinationInc field for read operation */
+ if (hqspi->hmdma->Init.DestDataSize == MDMA_DEST_DATASIZE_BYTE)
+ {
+ MODIFY_REG(hqspi->hmdma->Instance->CTCR, (MDMA_CTCR_DINC | MDMA_CTCR_DINCOS) , MDMA_DEST_INC_BYTE);
+ }
+ else if (hqspi->hmdma->Init.DestDataSize == MDMA_DEST_DATASIZE_HALFWORD)
+ {
+ MODIFY_REG(hqspi->hmdma->Instance->CTCR, (MDMA_CTCR_DINC | MDMA_CTCR_DINCOS) , MDMA_DEST_INC_HALFWORD);
+ }
+ else if (hqspi->hmdma->Init.DestDataSize == MDMA_DEST_DATASIZE_WORD)
+ {
+ MODIFY_REG(hqspi->hmdma->Instance->CTCR, (MDMA_CTCR_DINC | MDMA_CTCR_DINCOS) , MDMA_DEST_INC_WORD);
+ }
+ else
+ {
+ /* in case of incorrect destination data size */
+ hqspi->ErrorCode |= HAL_QSPI_ERROR_DMA;
+ status = HAL_ERROR;
+ }
+ /* Configure QSPI: CCR register with functional as indirect read */
+ MODIFY_REG(hqspi->Instance->CCR, QUADSPI_CCR_FMODE, QSPI_FUNCTIONAL_MODE_INDIRECT_READ);
+
+ /* Start the transfer by re-writing the address in AR register */
+ WRITE_REG(hqspi->Instance->AR, addr_reg);
+
+ /* Enable the QSPI transfer error Interrupt */
+ __HAL_QSPI_ENABLE_IT(hqspi, QSPI_IT_TE);
+
+ /* Use DMAEN bit with no impact on H7 HW to record MDMA transfer request */
+ SET_BIT(hqspi->Instance->CR, QUADSPI_CR_DMAEN);
+
+ /* Enable the MDMA */
+ if (HAL_MDMA_Start_IT(hqspi->hmdma, (uint32_t)&hqspi->Instance->DR, (uint32_t)pData, hqspi->RxXferSize, 1) == HAL_OK)
+ {
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+ }
+ else
+ {
+ /* Clear DMAEN bit with no impact on H7 HW to cancel MDMA transfer request */
+ CLEAR_BIT(hqspi->Instance->CR, QUADSPI_CR_DMAEN);
+
+ status = HAL_ERROR;
+ hqspi->ErrorCode |= HAL_QSPI_ERROR_DMA;
+ hqspi->State = HAL_QSPI_STATE_READY;
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+ }
+ }
+ else
+ {
+ hqspi->ErrorCode |= HAL_QSPI_ERROR_INVALID_PARAM;
+ status = HAL_ERROR;
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+ }
+ }
+ else
+ {
+ status = HAL_BUSY;
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+ }
+
+ return status;
+}
+
+/**
+ * @brief Configure the QSPI Automatic Polling Mode in blocking mode.
+ * @param hqspi QSPI handle
+ * @param cmd structure that contains the command configuration information.
+ * @param cfg structure that contains the polling configuration information.
+ * @param Timeout Timeout duration
+ * @note This function is used only in Automatic Polling Mode
+ * @retval HAL status
+ */
+HAL_StatusTypeDef HAL_QSPI_AutoPolling(QSPI_HandleTypeDef *hqspi, QSPI_CommandTypeDef *cmd, QSPI_AutoPollingTypeDef *cfg, uint32_t Timeout)
+{
+ HAL_StatusTypeDef status;
+ uint32_t tickstart = HAL_GetTick();
+
+ /* Check the parameters */
+ assert_param(IS_QSPI_INSTRUCTION_MODE(cmd->InstructionMode));
+ if (cmd->InstructionMode != QSPI_INSTRUCTION_NONE)
+ {
+ assert_param(IS_QSPI_INSTRUCTION(cmd->Instruction));
+ }
+
+ assert_param(IS_QSPI_ADDRESS_MODE(cmd->AddressMode));
+ if (cmd->AddressMode != QSPI_ADDRESS_NONE)
+ {
+ assert_param(IS_QSPI_ADDRESS_SIZE(cmd->AddressSize));
+ }
+
+ assert_param(IS_QSPI_ALTERNATE_BYTES_MODE(cmd->AlternateByteMode));
+ if (cmd->AlternateByteMode != QSPI_ALTERNATE_BYTES_NONE)
+ {
+ assert_param(IS_QSPI_ALTERNATE_BYTES_SIZE(cmd->AlternateBytesSize));
+ }
+
+ assert_param(IS_QSPI_DUMMY_CYCLES(cmd->DummyCycles));
+ assert_param(IS_QSPI_DATA_MODE(cmd->DataMode));
+
+ assert_param(IS_QSPI_DDR_MODE(cmd->DdrMode));
+ assert_param(IS_QSPI_DDR_HHC(cmd->DdrHoldHalfCycle));
+ assert_param(IS_QSPI_SIOO_MODE(cmd->SIOOMode));
+
+ assert_param(IS_QSPI_INTERVAL(cfg->Interval));
+ assert_param(IS_QSPI_STATUS_BYTES_SIZE(cfg->StatusBytesSize));
+ assert_param(IS_QSPI_MATCH_MODE(cfg->MatchMode));
+
+ /* Process locked */
+ __HAL_LOCK(hqspi);
+
+ if(hqspi->State == HAL_QSPI_STATE_READY)
+ {
+ hqspi->ErrorCode = HAL_QSPI_ERROR_NONE;
+
+ /* Update state */
+ hqspi->State = HAL_QSPI_STATE_BUSY_AUTO_POLLING;
+
+ /* Wait till BUSY flag reset */
+ status = QSPI_WaitFlagStateUntilTimeout(hqspi, QSPI_FLAG_BUSY, RESET, tickstart, Timeout);
+
+ if (status == HAL_OK)
+ {
+ /* Configure QSPI: PSMAR register with the status match value */
+ WRITE_REG(hqspi->Instance->PSMAR, cfg->Match);
+
+ /* Configure QSPI: PSMKR register with the status mask value */
+ WRITE_REG(hqspi->Instance->PSMKR, cfg->Mask);
+
+ /* Configure QSPI: PIR register with the interval value */
+ WRITE_REG(hqspi->Instance->PIR, cfg->Interval);
+
+ /* Configure QSPI: CR register with Match mode and Automatic stop enabled
+ (otherwise there will be an infinite loop in blocking mode) */
+ MODIFY_REG(hqspi->Instance->CR, (QUADSPI_CR_PMM | QUADSPI_CR_APMS),
+ (cfg->MatchMode | QSPI_AUTOMATIC_STOP_ENABLE));
+
+ /* Call the configuration function */
+ cmd->NbData = cfg->StatusBytesSize;
+ QSPI_Config(hqspi, cmd, QSPI_FUNCTIONAL_MODE_AUTO_POLLING);
+
+ /* Wait until SM flag is set to go back in idle state */
+ status = QSPI_WaitFlagStateUntilTimeout(hqspi, QSPI_FLAG_SM, SET, tickstart, Timeout);
+
+ if (status == HAL_OK)
+ {
+ __HAL_QSPI_CLEAR_FLAG(hqspi, QSPI_FLAG_SM);
+
+ /* Update state */
+ hqspi->State = HAL_QSPI_STATE_READY;
+ }
+ }
+ }
+ else
+ {
+ status = HAL_BUSY;
+ }
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+
+ /* Return function status */
+ return status;
+}
+
+/**
+ * @brief Configure the QSPI Automatic Polling Mode in non-blocking mode.
+ * @param hqspi QSPI handle
+ * @param cmd structure that contains the command configuration information.
+ * @param cfg structure that contains the polling configuration information.
+ * @note This function is used only in Automatic Polling Mode
+ * @retval HAL status
+ */
+HAL_StatusTypeDef HAL_QSPI_AutoPolling_IT(QSPI_HandleTypeDef *hqspi, QSPI_CommandTypeDef *cmd, QSPI_AutoPollingTypeDef *cfg)
+{
+ HAL_StatusTypeDef status;
+ uint32_t tickstart = HAL_GetTick();
+
+ /* Check the parameters */
+ assert_param(IS_QSPI_INSTRUCTION_MODE(cmd->InstructionMode));
+ if (cmd->InstructionMode != QSPI_INSTRUCTION_NONE)
+ {
+ assert_param(IS_QSPI_INSTRUCTION(cmd->Instruction));
+ }
+
+ assert_param(IS_QSPI_ADDRESS_MODE(cmd->AddressMode));
+ if (cmd->AddressMode != QSPI_ADDRESS_NONE)
+ {
+ assert_param(IS_QSPI_ADDRESS_SIZE(cmd->AddressSize));
+ }
+
+ assert_param(IS_QSPI_ALTERNATE_BYTES_MODE(cmd->AlternateByteMode));
+ if (cmd->AlternateByteMode != QSPI_ALTERNATE_BYTES_NONE)
+ {
+ assert_param(IS_QSPI_ALTERNATE_BYTES_SIZE(cmd->AlternateBytesSize));
+ }
+
+ assert_param(IS_QSPI_DUMMY_CYCLES(cmd->DummyCycles));
+ assert_param(IS_QSPI_DATA_MODE(cmd->DataMode));
+
+ assert_param(IS_QSPI_DDR_MODE(cmd->DdrMode));
+ assert_param(IS_QSPI_DDR_HHC(cmd->DdrHoldHalfCycle));
+ assert_param(IS_QSPI_SIOO_MODE(cmd->SIOOMode));
+
+ assert_param(IS_QSPI_INTERVAL(cfg->Interval));
+ assert_param(IS_QSPI_STATUS_BYTES_SIZE(cfg->StatusBytesSize));
+ assert_param(IS_QSPI_MATCH_MODE(cfg->MatchMode));
+ assert_param(IS_QSPI_AUTOMATIC_STOP(cfg->AutomaticStop));
+
+ /* Process locked */
+ __HAL_LOCK(hqspi);
+
+ if(hqspi->State == HAL_QSPI_STATE_READY)
+ {
+ hqspi->ErrorCode = HAL_QSPI_ERROR_NONE;
+
+ /* Update state */
+ hqspi->State = HAL_QSPI_STATE_BUSY_AUTO_POLLING;
+
+ /* Wait till BUSY flag reset */
+ status = QSPI_WaitFlagStateUntilTimeout(hqspi, QSPI_FLAG_BUSY, RESET, tickstart, hqspi->Timeout);
+
+ if (status == HAL_OK)
+ {
+ /* Configure QSPI: PSMAR register with the status match value */
+ WRITE_REG(hqspi->Instance->PSMAR, cfg->Match);
+
+ /* Configure QSPI: PSMKR register with the status mask value */
+ WRITE_REG(hqspi->Instance->PSMKR, cfg->Mask);
+
+ /* Configure QSPI: PIR register with the interval value */
+ WRITE_REG(hqspi->Instance->PIR, cfg->Interval);
+
+ /* Configure QSPI: CR register with Match mode and Automatic stop mode */
+ MODIFY_REG(hqspi->Instance->CR, (QUADSPI_CR_PMM | QUADSPI_CR_APMS),
+ (cfg->MatchMode | cfg->AutomaticStop));
+
+ /* Clear interrupt */
+ __HAL_QSPI_CLEAR_FLAG(hqspi, QSPI_FLAG_TE | QSPI_FLAG_SM);
+
+ /* Call the configuration function */
+ cmd->NbData = cfg->StatusBytesSize;
+ QSPI_Config(hqspi, cmd, QSPI_FUNCTIONAL_MODE_AUTO_POLLING);
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+
+ /* Enable the QSPI Transfer Error and status match Interrupt */
+ __HAL_QSPI_ENABLE_IT(hqspi, (QSPI_IT_SM | QSPI_IT_TE));
+
+ }
+ else
+ {
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+ }
+ }
+ else
+ {
+ status = HAL_BUSY;
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+ }
+
+ /* Return function status */
+ return status;
+}
+
+/**
+ * @brief Configure the Memory Mapped mode.
+ * @param hqspi QSPI handle
+ * @param cmd structure that contains the command configuration information.
+ * @param cfg structure that contains the memory mapped configuration information.
+ * @note This function is used only in Memory mapped Mode
+ * @retval HAL status
+ */
+HAL_StatusTypeDef HAL_QSPI_MemoryMapped(QSPI_HandleTypeDef *hqspi, QSPI_CommandTypeDef *cmd, QSPI_MemoryMappedTypeDef *cfg)
+{
+ HAL_StatusTypeDef status;
+ uint32_t tickstart = HAL_GetTick();
+
+ /* Check the parameters */
+ assert_param(IS_QSPI_INSTRUCTION_MODE(cmd->InstructionMode));
+ if (cmd->InstructionMode != QSPI_INSTRUCTION_NONE)
+ {
+ assert_param(IS_QSPI_INSTRUCTION(cmd->Instruction));
+ }
+
+ assert_param(IS_QSPI_ADDRESS_MODE(cmd->AddressMode));
+ if (cmd->AddressMode != QSPI_ADDRESS_NONE)
+ {
+ assert_param(IS_QSPI_ADDRESS_SIZE(cmd->AddressSize));
+ }
+
+ assert_param(IS_QSPI_ALTERNATE_BYTES_MODE(cmd->AlternateByteMode));
+ if (cmd->AlternateByteMode != QSPI_ALTERNATE_BYTES_NONE)
+ {
+ assert_param(IS_QSPI_ALTERNATE_BYTES_SIZE(cmd->AlternateBytesSize));
+ }
+
+ assert_param(IS_QSPI_DUMMY_CYCLES(cmd->DummyCycles));
+ assert_param(IS_QSPI_DATA_MODE(cmd->DataMode));
+
+ assert_param(IS_QSPI_DDR_MODE(cmd->DdrMode));
+ assert_param(IS_QSPI_DDR_HHC(cmd->DdrHoldHalfCycle));
+ assert_param(IS_QSPI_SIOO_MODE(cmd->SIOOMode));
+
+ assert_param(IS_QSPI_TIMEOUT_ACTIVATION(cfg->TimeOutActivation));
+
+ /* Process locked */
+ __HAL_LOCK(hqspi);
+
+ if(hqspi->State == HAL_QSPI_STATE_READY)
+ {
+ hqspi->ErrorCode = HAL_QSPI_ERROR_NONE;
+
+ /* Update state */
+ hqspi->State = HAL_QSPI_STATE_BUSY_MEM_MAPPED;
+
+ /* Wait till BUSY flag reset */
+ status = QSPI_WaitFlagStateUntilTimeout(hqspi, QSPI_FLAG_BUSY, RESET, tickstart, hqspi->Timeout);
+
+ if (status == HAL_OK)
+ {
+ /* Configure QSPI: CR register with timeout counter enable */
+ MODIFY_REG(hqspi->Instance->CR, QUADSPI_CR_TCEN, cfg->TimeOutActivation);
+
+ if (cfg->TimeOutActivation == QSPI_TIMEOUT_COUNTER_ENABLE)
+ {
+ assert_param(IS_QSPI_TIMEOUT_PERIOD(cfg->TimeOutPeriod));
+
+ /* Configure QSPI: LPTR register with the low-power timeout value */
+ WRITE_REG(hqspi->Instance->LPTR, cfg->TimeOutPeriod);
+
+ /* Clear interrupt */
+ __HAL_QSPI_CLEAR_FLAG(hqspi, QSPI_FLAG_TO);
+
+ /* Enable the QSPI TimeOut Interrupt */
+ __HAL_QSPI_ENABLE_IT(hqspi, QSPI_IT_TO);
+ }
+
+ /* Call the configuration function */
+ QSPI_Config(hqspi, cmd, QSPI_FUNCTIONAL_MODE_MEMORY_MAPPED);
+ }
+ }
+ else
+ {
+ status = HAL_BUSY;
+ }
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+
+ /* Return function status */
+ return status;
+}
+
+/**
+ * @brief Transfer Error callback.
+ * @param hqspi QSPI handle
+ * @retval None
+ */
+__weak void HAL_QSPI_ErrorCallback(QSPI_HandleTypeDef *hqspi)
+{
+ /* Prevent unused argument(s) compilation warning */
+ UNUSED(hqspi);
+
+ /* NOTE : This function should not be modified, when the callback is needed,
+ the HAL_QSPI_ErrorCallback could be implemented in the user file
+ */
+}
+
+/**
+ * @brief Abort completed callback.
+ * @param hqspi QSPI handle
+ * @retval None
+ */
+__weak void HAL_QSPI_AbortCpltCallback(QSPI_HandleTypeDef *hqspi)
+{
+ /* Prevent unused argument(s) compilation warning */
+ UNUSED(hqspi);
+
+ /* NOTE: This function should not be modified, when the callback is needed,
+ the HAL_QSPI_AbortCpltCallback could be implemented in the user file
+ */
+}
+
+/**
+ * @brief Command completed callback.
+ * @param hqspi QSPI handle
+ * @retval None
+ */
+__weak void HAL_QSPI_CmdCpltCallback(QSPI_HandleTypeDef *hqspi)
+{
+ /* Prevent unused argument(s) compilation warning */
+ UNUSED(hqspi);
+
+ /* NOTE: This function should not be modified, when the callback is needed,
+ the HAL_QSPI_CmdCpltCallback could be implemented in the user file
+ */
+}
+
+/**
+ * @brief Rx Transfer completed callback.
+ * @param hqspi QSPI handle
+ * @retval None
+ */
+__weak void HAL_QSPI_RxCpltCallback(QSPI_HandleTypeDef *hqspi)
+{
+ /* Prevent unused argument(s) compilation warning */
+ UNUSED(hqspi);
+
+ /* NOTE: This function should not be modified, when the callback is needed,
+ the HAL_QSPI_RxCpltCallback could be implemented in the user file
+ */
+}
+
+/**
+ * @brief Tx Transfer completed callback.
+ * @param hqspi QSPI handle
+ * @retval None
+ */
+__weak void HAL_QSPI_TxCpltCallback(QSPI_HandleTypeDef *hqspi)
+{
+ /* Prevent unused argument(s) compilation warning */
+ UNUSED(hqspi);
+
+ /* NOTE: This function should not be modified, when the callback is needed,
+ the HAL_QSPI_TxCpltCallback could be implemented in the user file
+ */
+}
+
+
+/**
+ * @brief FIFO Threshold callback.
+ * @param hqspi QSPI handle
+ * @retval None
+ */
+__weak void HAL_QSPI_FifoThresholdCallback(QSPI_HandleTypeDef *hqspi)
+{
+ /* Prevent unused argument(s) compilation warning */
+ UNUSED(hqspi);
+
+ /* NOTE : This function should not be modified, when the callback is needed,
+ the HAL_QSPI_FIFOThresholdCallback could be implemented in the user file
+ */
+}
+
+/**
+ * @brief Status Match callback.
+ * @param hqspi QSPI handle
+ * @retval None
+ */
+__weak void HAL_QSPI_StatusMatchCallback(QSPI_HandleTypeDef *hqspi)
+{
+ /* Prevent unused argument(s) compilation warning */
+ UNUSED(hqspi);
+
+ /* NOTE : This function should not be modified, when the callback is needed,
+ the HAL_QSPI_StatusMatchCallback could be implemented in the user file
+ */
+}
+
+/**
+ * @brief Timeout callback.
+ * @param hqspi QSPI handle
+ * @retval None
+ */
+__weak void HAL_QSPI_TimeOutCallback(QSPI_HandleTypeDef *hqspi)
+{
+ /* Prevent unused argument(s) compilation warning */
+ UNUSED(hqspi);
+
+ /* NOTE : This function should not be modified, when the callback is needed,
+ the HAL_QSPI_TimeOutCallback could be implemented in the user file
+ */
+}
+#if (USE_HAL_QSPI_REGISTER_CALLBACKS == 1)
+/**
+ * @brief Register a User QSPI Callback
+ * To be used to override the weak predefined callback
+ * @param hqspi QSPI handle
+ * @param CallbackId ID of the callback to be registered
+ * This parameter can be one of the following values:
+ * @arg @ref HAL_QSPI_ERROR_CB_ID QSPI Error Callback ID
+ * @arg @ref HAL_QSPI_ABORT_CB_ID QSPI Abort Callback ID
+ * @arg @ref HAL_QSPI_FIFO_THRESHOLD_CB_ID QSPI FIFO Threshold Callback ID
+ * @arg @ref HAL_QSPI_CMD_CPLT_CB_ID QSPI Command Complete Callback ID
+ * @arg @ref HAL_QSPI_RX_CPLT_CB_ID QSPI Rx Complete Callback ID
+ * @arg @ref HAL_QSPI_TX_CPLT_CB_ID QSPI Tx Complete Callback ID
+ * @arg @ref HAL_QSPI_STATUS_MATCH_CB_ID QSPI Status Match Callback ID
+ * @arg @ref HAL_QSPI_TIMEOUT_CB_ID QSPI Timeout Callback ID
+ * @arg @ref HAL_QSPI_MSP_INIT_CB_ID QSPI MspInit callback ID
+ * @arg @ref HAL_QSPI_MSP_DEINIT_CB_ID QSPI MspDeInit callback ID
+ * @param pCallback pointer to the Callback function
+ * @retval status
+ */
+HAL_StatusTypeDef HAL_QSPI_RegisterCallback (QSPI_HandleTypeDef *hqspi, HAL_QSPI_CallbackIDTypeDef CallbackId, pQSPI_CallbackTypeDef pCallback)
+{
+ HAL_StatusTypeDef status = HAL_OK;
+
+ if(pCallback == NULL)
+ {
+ /* Update the error code */
+ hqspi->ErrorCode |= HAL_QSPI_ERROR_INVALID_CALLBACK;
+ return HAL_ERROR;
+ }
+
+ /* Process locked */
+ __HAL_LOCK(hqspi);
+
+ if(hqspi->State == HAL_QSPI_STATE_READY)
+ {
+ switch (CallbackId)
+ {
+ case HAL_QSPI_ERROR_CB_ID :
+ hqspi->ErrorCallback = pCallback;
+ break;
+ case HAL_QSPI_ABORT_CB_ID :
+ hqspi->AbortCpltCallback = pCallback;
+ break;
+ case HAL_QSPI_FIFO_THRESHOLD_CB_ID :
+ hqspi->FifoThresholdCallback = pCallback;
+ break;
+ case HAL_QSPI_CMD_CPLT_CB_ID :
+ hqspi->CmdCpltCallback = pCallback;
+ break;
+ case HAL_QSPI_RX_CPLT_CB_ID :
+ hqspi->RxCpltCallback = pCallback;
+ break;
+ case HAL_QSPI_TX_CPLT_CB_ID :
+ hqspi->TxCpltCallback = pCallback;
+ break;
+ case HAL_QSPI_STATUS_MATCH_CB_ID :
+ hqspi->StatusMatchCallback = pCallback;
+ break;
+ case HAL_QSPI_TIMEOUT_CB_ID :
+ hqspi->TimeOutCallback = pCallback;
+ break;
+ case HAL_QSPI_MSP_INIT_CB_ID :
+ hqspi->MspInitCallback = pCallback;
+ break;
+ case HAL_QSPI_MSP_DEINIT_CB_ID :
+ hqspi->MspDeInitCallback = pCallback;
+ break;
+ default :
+ /* Update the error code */
+ hqspi->ErrorCode |= HAL_QSPI_ERROR_INVALID_CALLBACK;
+ /* update return status */
+ status = HAL_ERROR;
+ break;
+ }
+ }
+ else if (hqspi->State == HAL_QSPI_STATE_RESET)
+ {
+ switch (CallbackId)
+ {
+ case HAL_QSPI_MSP_INIT_CB_ID :
+ hqspi->MspInitCallback = pCallback;
+ break;
+ case HAL_QSPI_MSP_DEINIT_CB_ID :
+ hqspi->MspDeInitCallback = pCallback;
+ break;
+ default :
+ /* Update the error code */
+ hqspi->ErrorCode |= HAL_QSPI_ERROR_INVALID_CALLBACK;
+ /* update return status */
+ status = HAL_ERROR;
+ break;
+ }
+ }
+ else
+ {
+ /* Update the error code */
+ hqspi->ErrorCode |= HAL_QSPI_ERROR_INVALID_CALLBACK;
+ /* update return status */
+ status = HAL_ERROR;
+ }
+
+ /* Release Lock */
+ __HAL_UNLOCK(hqspi);
+ return status;
+}
+
+/**
+ * @brief Unregister a User QSPI Callback
+ * QSPI Callback is redirected to the weak predefined callback
+ * @param hqspi QSPI handle
+ * @param CallbackId ID of the callback to be unregistered
+ * This parameter can be one of the following values:
+ * @arg @ref HAL_QSPI_ERROR_CB_ID QSPI Error Callback ID
+ * @arg @ref HAL_QSPI_ABORT_CB_ID QSPI Abort Callback ID
+ * @arg @ref HAL_QSPI_FIFO_THRESHOLD_CB_ID QSPI FIFO Threshold Callback ID
+ * @arg @ref HAL_QSPI_CMD_CPLT_CB_ID QSPI Command Complete Callback ID
+ * @arg @ref HAL_QSPI_RX_CPLT_CB_ID QSPI Rx Complete Callback ID
+ * @arg @ref HAL_QSPI_TX_CPLT_CB_ID QSPI Tx Complete Callback ID
+ * @arg @ref HAL_QSPI_STATUS_MATCH_CB_ID QSPI Status Match Callback ID
+ * @arg @ref HAL_QSPI_TIMEOUT_CB_ID QSPI Timeout Callback ID
+ * @arg @ref HAL_QSPI_MSP_INIT_CB_ID QSPI MspInit callback ID
+ * @arg @ref HAL_QSPI_MSP_DEINIT_CB_ID QSPI MspDeInit callback ID
+ * @retval status
+ */
+HAL_StatusTypeDef HAL_QSPI_UnRegisterCallback (QSPI_HandleTypeDef *hqspi, HAL_QSPI_CallbackIDTypeDef CallbackId)
+{
+ HAL_StatusTypeDef status = HAL_OK;
+
+ /* Process locked */
+ __HAL_LOCK(hqspi);
+
+ if(hqspi->State == HAL_QSPI_STATE_READY)
+ {
+ switch (CallbackId)
+ {
+ case HAL_QSPI_ERROR_CB_ID :
+ hqspi->ErrorCallback = HAL_QSPI_ErrorCallback;
+ break;
+ case HAL_QSPI_ABORT_CB_ID :
+ hqspi->AbortCpltCallback = HAL_QSPI_AbortCpltCallback;
+ break;
+ case HAL_QSPI_FIFO_THRESHOLD_CB_ID :
+ hqspi->FifoThresholdCallback = HAL_QSPI_FifoThresholdCallback;
+ break;
+ case HAL_QSPI_CMD_CPLT_CB_ID :
+ hqspi->CmdCpltCallback = HAL_QSPI_CmdCpltCallback;
+ break;
+ case HAL_QSPI_RX_CPLT_CB_ID :
+ hqspi->RxCpltCallback = HAL_QSPI_RxCpltCallback;
+ break;
+ case HAL_QSPI_TX_CPLT_CB_ID :
+ hqspi->TxCpltCallback = HAL_QSPI_TxCpltCallback;
+ break;
+ case HAL_QSPI_STATUS_MATCH_CB_ID :
+ hqspi->StatusMatchCallback = HAL_QSPI_StatusMatchCallback;
+ break;
+ case HAL_QSPI_TIMEOUT_CB_ID :
+ hqspi->TimeOutCallback = HAL_QSPI_TimeOutCallback;
+ break;
+ case HAL_QSPI_MSP_INIT_CB_ID :
+ hqspi->MspInitCallback = HAL_QSPI_MspInit;
+ break;
+ case HAL_QSPI_MSP_DEINIT_CB_ID :
+ hqspi->MspDeInitCallback = HAL_QSPI_MspDeInit;
+ break;
+ default :
+ /* Update the error code */
+ hqspi->ErrorCode |= HAL_QSPI_ERROR_INVALID_CALLBACK;
+ /* update return status */
+ status = HAL_ERROR;
+ break;
+ }
+ }
+ else if (hqspi->State == HAL_QSPI_STATE_RESET)
+ {
+ switch (CallbackId)
+ {
+ case HAL_QSPI_MSP_INIT_CB_ID :
+ hqspi->MspInitCallback = HAL_QSPI_MspInit;
+ break;
+ case HAL_QSPI_MSP_DEINIT_CB_ID :
+ hqspi->MspDeInitCallback = HAL_QSPI_MspDeInit;
+ break;
+ default :
+ /* Update the error code */
+ hqspi->ErrorCode |= HAL_QSPI_ERROR_INVALID_CALLBACK;
+ /* update return status */
+ status = HAL_ERROR;
+ break;
+ }
+ }
+ else
+ {
+ /* Update the error code */
+ hqspi->ErrorCode |= HAL_QSPI_ERROR_INVALID_CALLBACK;
+ /* update return status */
+ status = HAL_ERROR;
+ }
+
+ /* Release Lock */
+ __HAL_UNLOCK(hqspi);
+ return status;
+}
+#endif
+
+/**
+ * @}
+ */
+
+/** @defgroup QSPI_Exported_Functions_Group3 Peripheral Control and State functions
+ * @brief QSPI control and State functions
+ *
+@verbatim
+ ===============================================================================
+ ##### Peripheral Control and State functions #####
+ ===============================================================================
+ [..]
+ This subsection provides a set of functions allowing to :
+ (+) Check in run-time the state of the driver.
+ (+) Check the error code set during last operation.
+ (+) Abort any operation.
+
+
+@endverbatim
+ * @{
+ */
+
+/**
+ * @brief Return the QSPI handle state.
+ * @param hqspi QSPI handle
+ * @retval HAL state
+ */
+HAL_QSPI_StateTypeDef HAL_QSPI_GetState(const QSPI_HandleTypeDef *hqspi)
+{
+ /* Return QSPI handle state */
+ return hqspi->State;
+}
+
+/**
+* @brief Return the QSPI error code.
+* @param hqspi QSPI handle
+* @retval QSPI Error Code
+*/
+uint32_t HAL_QSPI_GetError(const QSPI_HandleTypeDef *hqspi)
+{
+ return hqspi->ErrorCode;
+}
+
+/**
+* @brief Abort the current transmission.
+* @param hqspi QSPI handle
+* @retval HAL status
+*/
+HAL_StatusTypeDef HAL_QSPI_Abort(QSPI_HandleTypeDef *hqspi)
+{
+ HAL_StatusTypeDef status = HAL_OK;
+ uint32_t tickstart = HAL_GetTick();
+
+ /* Check if the state is in one of the busy states */
+ if (((uint32_t)hqspi->State & 0x2U) != 0U)
+ {
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+
+ if ((hqspi->Instance->CR & QUADSPI_CR_DMAEN) != 0U)
+ {
+ /* Disable using MDMA by clearing DMAEN, note that DMAEN bit is "reserved"
+ but no impact on H7 HW and it minimize the cost in the footprint */
+ CLEAR_BIT(hqspi->Instance->CR, QUADSPI_CR_DMAEN);
+
+ /* Abort MDMA */
+ status = HAL_MDMA_Abort(hqspi->hmdma);
+ if(status != HAL_OK)
+ {
+ hqspi->ErrorCode |= HAL_QSPI_ERROR_DMA;
+ }
+ }
+
+ if (__HAL_QSPI_GET_FLAG(hqspi, QSPI_FLAG_BUSY) != RESET)
+ {
+ /* Configure QSPI: CR register with Abort request */
+ SET_BIT(hqspi->Instance->CR, QUADSPI_CR_ABORT);
+
+ /* Wait until TC flag is set to go back in idle state */
+ status = QSPI_WaitFlagStateUntilTimeout(hqspi, QSPI_FLAG_TC, SET, tickstart, hqspi->Timeout);
+
+ if (status == HAL_OK)
+ {
+ __HAL_QSPI_CLEAR_FLAG(hqspi, QSPI_FLAG_TC);
+
+ /* Wait until BUSY flag is reset */
+ status = QSPI_WaitFlagStateUntilTimeout(hqspi, QSPI_FLAG_BUSY, RESET, tickstart, hqspi->Timeout);
+ }
+
+ if (status == HAL_OK)
+ {
+ /* Reset functional mode configuration to indirect write mode by default */
+ CLEAR_BIT(hqspi->Instance->CCR, QUADSPI_CCR_FMODE);
+
+ /* Update state */
+ hqspi->State = HAL_QSPI_STATE_READY;
+ }
+ }
+ else
+ {
+ /* Update state */
+ hqspi->State = HAL_QSPI_STATE_READY;
+ }
+ }
+
+ return status;
+}
+
+/**
+* @brief Abort the current transmission (non-blocking function)
+* @param hqspi QSPI handle
+* @retval HAL status
+*/
+HAL_StatusTypeDef HAL_QSPI_Abort_IT(QSPI_HandleTypeDef *hqspi)
+{
+ HAL_StatusTypeDef status = HAL_OK;
+
+ /* Check if the state is in one of the busy states */
+ if (((uint32_t)hqspi->State & 0x2U) != 0U)
+ {
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+
+ /* Update QSPI state */
+ hqspi->State = HAL_QSPI_STATE_ABORT;
+
+ /* Disable all interrupts */
+ __HAL_QSPI_DISABLE_IT(hqspi, (QSPI_IT_TO | QSPI_IT_SM | QSPI_IT_FT | QSPI_IT_TC | QSPI_IT_TE));
+
+ if ((hqspi->Instance->CR & QUADSPI_CR_DMAEN) != 0U)
+ {
+ /* Disable using MDMA by clearing DMAEN, note that DMAEN bit is "reserved"
+ but no impact on H7 HW and it minimize the cost in the footprint */
+ CLEAR_BIT(hqspi->Instance->CR, QUADSPI_CR_DMAEN);
+
+ /* Abort MDMA channel */
+ hqspi->hmdma->XferAbortCallback = QSPI_DMAAbortCplt;
+ if (HAL_MDMA_Abort_IT(hqspi->hmdma) != HAL_OK)
+ {
+ /* Change state of QSPI */
+ hqspi->State = HAL_QSPI_STATE_READY;
+
+ /* Abort Complete callback */
+#if (USE_HAL_QSPI_REGISTER_CALLBACKS == 1)
+ hqspi->AbortCpltCallback(hqspi);
+#else
+ HAL_QSPI_AbortCpltCallback(hqspi);
+#endif
+ }
+ }
+ else
+ {
+ if (__HAL_QSPI_GET_FLAG(hqspi, QSPI_FLAG_BUSY) != RESET)
+ {
+ /* Clear interrupt */
+ __HAL_QSPI_CLEAR_FLAG(hqspi, QSPI_FLAG_TC);
+
+ /* Enable the QSPI Transfer Complete Interrupt */
+ __HAL_QSPI_ENABLE_IT(hqspi, QSPI_IT_TC);
+
+ /* Configure QSPI: CR register with Abort request */
+ SET_BIT(hqspi->Instance->CR, QUADSPI_CR_ABORT);
+ }
+ else
+ {
+ /* Change state of QSPI */
+ hqspi->State = HAL_QSPI_STATE_READY;
+ }
+ }
+ }
+ return status;
+}
+
+/** @brief Set QSPI timeout.
+ * @param hqspi QSPI handle.
+ * @param Timeout Timeout for the QSPI memory access.
+ * @retval None
+ */
+void HAL_QSPI_SetTimeout(QSPI_HandleTypeDef *hqspi, uint32_t Timeout)
+{
+ hqspi->Timeout = Timeout;
+}
+
+/** @brief Set QSPI Fifo threshold.
+ * @param hqspi QSPI handle.
+ * @param Threshold Threshold of the Fifo (value between 1 and 16).
+ * @retval HAL status
+ */
+HAL_StatusTypeDef HAL_QSPI_SetFifoThreshold(QSPI_HandleTypeDef *hqspi, uint32_t Threshold)
+{
+ HAL_StatusTypeDef status = HAL_OK;
+
+ /* Process locked */
+ __HAL_LOCK(hqspi);
+
+ if(hqspi->State == HAL_QSPI_STATE_READY)
+ {
+ /* Synchronize init structure with new FIFO threshold value */
+ hqspi->Init.FifoThreshold = Threshold;
+
+ /* Configure QSPI FIFO Threshold */
+ MODIFY_REG(hqspi->Instance->CR, QUADSPI_CR_FTHRES,
+ ((hqspi->Init.FifoThreshold - 1U) << QUADSPI_CR_FTHRES_Pos));
+ }
+ else
+ {
+ status = HAL_BUSY;
+ }
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+
+ /* Return function status */
+ return status;
+}
+
+/** @brief Get QSPI Fifo threshold.
+ * @param hqspi QSPI handle.
+ * @retval Fifo threshold (value between 1 and 16)
+ */
+uint32_t HAL_QSPI_GetFifoThreshold(const QSPI_HandleTypeDef *hqspi)
+{
+ return ((READ_BIT(hqspi->Instance->CR, QUADSPI_CR_FTHRES) >> QUADSPI_CR_FTHRES_Pos) + 1U);
+}
+
+/** @brief Set FlashID.
+ * @param hqspi QSPI handle.
+ * @param FlashID Index of the flash memory to be accessed.
+ * This parameter can be a value of @ref QSPI_Flash_Select.
+ * @note The FlashID is ignored when dual flash mode is enabled.
+ * @retval HAL status
+ */
+HAL_StatusTypeDef HAL_QSPI_SetFlashID(QSPI_HandleTypeDef *hqspi, uint32_t FlashID)
+{
+ HAL_StatusTypeDef status = HAL_OK;
+
+ /* Check the parameter */
+ assert_param(IS_QSPI_FLASH_ID(FlashID));
+
+ /* Process locked */
+ __HAL_LOCK(hqspi);
+
+ if(hqspi->State == HAL_QSPI_STATE_READY)
+ {
+ /* Synchronize init structure with new FlashID value */
+ hqspi->Init.FlashID = FlashID;
+
+ /* Configure QSPI FlashID */
+ MODIFY_REG(hqspi->Instance->CR, QUADSPI_CR_FSEL, FlashID);
+ }
+ else
+ {
+ status = HAL_BUSY;
+ }
+
+ /* Process unlocked */
+ __HAL_UNLOCK(hqspi);
+
+ /* Return function status */
+ return status;
+}
+
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
+
+/** @defgroup QSPI_Private_Functions QSPI Private Functions
+ * @{
+ */
+
+/**
+ * @brief DMA QSPI receive process complete callback.
+ * @param hmdma MDMA handle
+ * @retval None
+ */
+static void QSPI_DMARxCplt(MDMA_HandleTypeDef *hmdma)
+{
+ QSPI_HandleTypeDef* hqspi = (QSPI_HandleTypeDef*)(hmdma->Parent);
+ hqspi->RxXferCount = 0U;
+
+ /* Enable the QSPI transfer complete Interrupt */
+ __HAL_QSPI_ENABLE_IT(hqspi, QSPI_IT_TC);
+}
+
+/**
+ * @brief DMA QSPI transmit process complete callback.
+ * @param hmdma MDMA handle
+ * @retval None
+ */
+static void QSPI_DMATxCplt(MDMA_HandleTypeDef *hmdma)
+{
+ QSPI_HandleTypeDef* hqspi = (QSPI_HandleTypeDef*)(hmdma->Parent);
+ hqspi->TxXferCount = 0U;
+
+ /* Enable the QSPI transfer complete Interrupt */
+ __HAL_QSPI_ENABLE_IT(hqspi, QSPI_IT_TC);
+}
+
+/**
+ * @brief DMA QSPI communication error callback.
+ * @param hmdma MDMA handle
+ * @retval None
+ */
+static void QSPI_DMAError(MDMA_HandleTypeDef *hmdma)
+{
+ QSPI_HandleTypeDef* hqspi = ( QSPI_HandleTypeDef* )(hmdma->Parent);
+
+ hqspi->RxXferCount = 0U;
+ hqspi->TxXferCount = 0U;
+ hqspi->ErrorCode |= HAL_QSPI_ERROR_DMA;
+
+ /* Disable using MDMA by clearing DMAEN, note that DMAEN bit is "reserved"
+ but no impact on H7 HW and it minimize the cost in the footprint */
+ CLEAR_BIT(hqspi->Instance->CR, QUADSPI_CR_DMAEN);
+
+ /* Abort the QSPI */
+ (void)HAL_QSPI_Abort_IT(hqspi);
+
+}
+
+/**
+ * @brief MDMA QSPI abort complete callback.
+ * @param hmdma MDMA handle
+ * @retval None
+ */
+static void QSPI_DMAAbortCplt(MDMA_HandleTypeDef *hmdma)
+{
+ QSPI_HandleTypeDef* hqspi = ( QSPI_HandleTypeDef* )(hmdma->Parent);
+
+ hqspi->RxXferCount = 0U;
+ hqspi->TxXferCount = 0U;
+
+ if(hqspi->State == HAL_QSPI_STATE_ABORT)
+ {
+ /* MDMA Abort called by QSPI abort */
+ /* Clear interrupt */
+ __HAL_QSPI_CLEAR_FLAG(hqspi, QSPI_FLAG_TC);
+
+ /* Enable the QSPI Transfer Complete Interrupt */
+ __HAL_QSPI_ENABLE_IT(hqspi, QSPI_IT_TC);
+
+ /* Configure QSPI: CR register with Abort request */
+ SET_BIT(hqspi->Instance->CR, QUADSPI_CR_ABORT);
+ }
+ else
+ {
+ /* MDMA Abort called due to a transfer error interrupt */
+ /* Change state of QSPI */
+ hqspi->State = HAL_QSPI_STATE_READY;
+
+ /* Error callback */
+#if (USE_HAL_QSPI_REGISTER_CALLBACKS == 1)
+ hqspi->ErrorCallback(hqspi);
+#else
+ HAL_QSPI_ErrorCallback(hqspi);
+#endif
+ }
+}
+
+/**
+ * @brief Wait for a flag state until timeout.
+ * @param hqspi QSPI handle
+ * @param Flag Flag checked
+ * @param State Value of the flag expected
+ * @param Tickstart Tick start value
+ * @param Timeout Duration of the timeout
+ * @retval HAL status
+ */
+static HAL_StatusTypeDef QSPI_WaitFlagStateUntilTimeout(QSPI_HandleTypeDef *hqspi, uint32_t Flag,
+ FlagStatus State, uint32_t Tickstart, uint32_t Timeout)
+{
+ /* Wait until flag is in expected state */
+ while((__HAL_QSPI_GET_FLAG(hqspi, Flag)) != State)
+ {
+ /* Check for the Timeout */
+ if (Timeout != HAL_MAX_DELAY)
+ {
+ if(((HAL_GetTick() - Tickstart) > Timeout) || (Timeout == 0U))
+ {
+ hqspi->State = HAL_QSPI_STATE_ERROR;
+ hqspi->ErrorCode |= HAL_QSPI_ERROR_TIMEOUT;
+
+ return HAL_ERROR;
+ }
+ }
+ }
+ return HAL_OK;
+}
+
+/**
+ * @brief Configure the communication registers.
+ * @param hqspi QSPI handle
+ * @param cmd structure that contains the command configuration information
+ * @param FunctionalMode functional mode to configured
+ * This parameter can be one of the following values:
+ * @arg QSPI_FUNCTIONAL_MODE_INDIRECT_WRITE: Indirect write mode
+ * @arg QSPI_FUNCTIONAL_MODE_INDIRECT_READ: Indirect read mode
+ * @arg QSPI_FUNCTIONAL_MODE_AUTO_POLLING: Automatic polling mode
+ * @arg QSPI_FUNCTIONAL_MODE_MEMORY_MAPPED: Memory-mapped mode
+ * @retval None
+ */
+static void QSPI_Config(QSPI_HandleTypeDef *hqspi, QSPI_CommandTypeDef *cmd, uint32_t FunctionalMode)
+{
+ assert_param(IS_QSPI_FUNCTIONAL_MODE(FunctionalMode));
+
+ if ((cmd->DataMode != QSPI_DATA_NONE) && (FunctionalMode != QSPI_FUNCTIONAL_MODE_MEMORY_MAPPED))
+ {
+ /* Configure QSPI: DLR register with the number of data to read or write */
+ WRITE_REG(hqspi->Instance->DLR, (cmd->NbData - 1U));
+ }
+
+ if (cmd->InstructionMode != QSPI_INSTRUCTION_NONE)
+ {
+ if (cmd->AlternateByteMode != QSPI_ALTERNATE_BYTES_NONE)
+ {
+ /* Configure QSPI: ABR register with alternate bytes value */
+ WRITE_REG(hqspi->Instance->ABR, cmd->AlternateBytes);
+
+ if (cmd->AddressMode != QSPI_ADDRESS_NONE)
+ {
+ /*---- Command with instruction, address and alternate bytes ----*/
+ /* Configure QSPI: CCR register with all communications parameters */
+ WRITE_REG(hqspi->Instance->CCR, (cmd->DdrMode | cmd->DdrHoldHalfCycle | cmd->SIOOMode |
+ cmd->DataMode | (cmd->DummyCycles << QUADSPI_CCR_DCYC_Pos) |
+ cmd->AlternateBytesSize | cmd->AlternateByteMode |
+ cmd->AddressSize | cmd->AddressMode | cmd->InstructionMode |
+ cmd->Instruction | FunctionalMode));
+
+ if (FunctionalMode != QSPI_FUNCTIONAL_MODE_MEMORY_MAPPED)
+ {
+ /* Configure QSPI: AR register with address value */
+ WRITE_REG(hqspi->Instance->AR, cmd->Address);
+ }
+ }
+ else
+ {
+ /*---- Command with instruction and alternate bytes ----*/
+ /* Configure QSPI: CCR register with all communications parameters */
+ WRITE_REG(hqspi->Instance->CCR, (cmd->DdrMode | cmd->DdrHoldHalfCycle | cmd->SIOOMode |
+ cmd->DataMode | (cmd->DummyCycles << QUADSPI_CCR_DCYC_Pos) |
+ cmd->AlternateBytesSize | cmd->AlternateByteMode |
+ cmd->AddressMode | cmd->InstructionMode |
+ cmd->Instruction | FunctionalMode));
+
+ /* Clear AR register */
+ CLEAR_REG(hqspi->Instance->AR);
+ }
+ }
+ else
+ {
+ if (cmd->AddressMode != QSPI_ADDRESS_NONE)
+ {
+ /*---- Command with instruction and address ----*/
+ /* Configure QSPI: CCR register with all communications parameters */
+ WRITE_REG(hqspi->Instance->CCR, (cmd->DdrMode | cmd->DdrHoldHalfCycle | cmd->SIOOMode |
+ cmd->DataMode | (cmd->DummyCycles << QUADSPI_CCR_DCYC_Pos) |
+ cmd->AlternateByteMode | cmd->AddressSize | cmd->AddressMode |
+ cmd->InstructionMode | cmd->Instruction | FunctionalMode));
+
+ if (FunctionalMode != QSPI_FUNCTIONAL_MODE_MEMORY_MAPPED)
+ {
+ /* Configure QSPI: AR register with address value */
+ WRITE_REG(hqspi->Instance->AR, cmd->Address);
+ }
+ }
+ else
+ {
+ /*---- Command with only instruction ----*/
+ /* Configure QSPI: CCR register with all communications parameters */
+ WRITE_REG(hqspi->Instance->CCR, (cmd->DdrMode | cmd->DdrHoldHalfCycle | cmd->SIOOMode |
+ cmd->DataMode | (cmd->DummyCycles << QUADSPI_CCR_DCYC_Pos) |
+ cmd->AlternateByteMode | cmd->AddressMode |
+ cmd->InstructionMode | cmd->Instruction | FunctionalMode));
+
+ /* Clear AR register */
+ CLEAR_REG(hqspi->Instance->AR);
+ }
+ }
+ }
+ else
+ {
+ if (cmd->AlternateByteMode != QSPI_ALTERNATE_BYTES_NONE)
+ {
+ /* Configure QSPI: ABR register with alternate bytes value */
+ WRITE_REG(hqspi->Instance->ABR, cmd->AlternateBytes);
+
+ if (cmd->AddressMode != QSPI_ADDRESS_NONE)
+ {
+ /*---- Command with address and alternate bytes ----*/
+ /* Configure QSPI: CCR register with all communications parameters */
+ WRITE_REG(hqspi->Instance->CCR, (cmd->DdrMode | cmd->DdrHoldHalfCycle | cmd->SIOOMode |
+ cmd->DataMode | (cmd->DummyCycles << QUADSPI_CCR_DCYC_Pos) |
+ cmd->AlternateBytesSize | cmd->AlternateByteMode |
+ cmd->AddressSize | cmd->AddressMode |
+ cmd->InstructionMode | FunctionalMode));
+
+ if (FunctionalMode != QSPI_FUNCTIONAL_MODE_MEMORY_MAPPED)
+ {
+ /* Configure QSPI: AR register with address value */
+ WRITE_REG(hqspi->Instance->AR, cmd->Address);
+ }
+ }
+ else
+ {
+ /*---- Command with only alternate bytes ----*/
+ /* Configure QSPI: CCR register with all communications parameters */
+ WRITE_REG(hqspi->Instance->CCR, (cmd->DdrMode | cmd->DdrHoldHalfCycle | cmd->SIOOMode |
+ cmd->DataMode | (cmd->DummyCycles << QUADSPI_CCR_DCYC_Pos) |
+ cmd->AlternateBytesSize | cmd->AlternateByteMode |
+ cmd->AddressMode | cmd->InstructionMode | FunctionalMode));
+
+ /* Clear AR register */
+ CLEAR_REG(hqspi->Instance->AR);
+ }
+ }
+ else
+ {
+ if (cmd->AddressMode != QSPI_ADDRESS_NONE)
+ {
+ /*---- Command with only address ----*/
+ /* Configure QSPI: CCR register with all communications parameters */
+ WRITE_REG(hqspi->Instance->CCR, (cmd->DdrMode | cmd->DdrHoldHalfCycle | cmd->SIOOMode |
+ cmd->DataMode | (cmd->DummyCycles << QUADSPI_CCR_DCYC_Pos) |
+ cmd->AlternateByteMode | cmd->AddressSize |
+ cmd->AddressMode | cmd->InstructionMode | FunctionalMode));
+
+ if (FunctionalMode != QSPI_FUNCTIONAL_MODE_MEMORY_MAPPED)
+ {
+ /* Configure QSPI: AR register with address value */
+ WRITE_REG(hqspi->Instance->AR, cmd->Address);
+ }
+ }
+ else
+ {
+ /*---- Command with only data phase ----*/
+ if (cmd->DataMode != QSPI_DATA_NONE)
+ {
+ /* Configure QSPI: CCR register with all communications parameters */
+ WRITE_REG(hqspi->Instance->CCR, (cmd->DdrMode | cmd->DdrHoldHalfCycle | cmd->SIOOMode |
+ cmd->DataMode | (cmd->DummyCycles << QUADSPI_CCR_DCYC_Pos) |
+ cmd->AlternateByteMode | cmd->AddressMode |
+ cmd->InstructionMode | FunctionalMode));
+
+ /* Clear AR register */
+ CLEAR_REG(hqspi->Instance->AR);
+ }
+ }
+ }
+ }
+}
+
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
+
+#endif /* HAL_QSPI_MODULE_ENABLED */
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
+
+#endif /* defined(QUADSPI) */
diff --git a/cubemx/EWARM/cubemx.ewp b/cubemx/EWARM/cubemx.ewp
index 2ef07b7..80b6af2 100644
--- a/cubemx/EWARM/cubemx.ewp
+++ b/cubemx/EWARM/cubemx.ewp
@@ -1157,11 +1157,14 @@
$PROJ_DIR$/../Drivers/STM32H7xx_HAL_Driver/Src/stm32h7xx_hal_exti.c
- $PROJ_DIR$/../Drivers/STM32H7xx_HAL_Driver/Src/stm32h7xx_ll_sdmmc.c
+ $PROJ_DIR$/../Drivers/STM32H7xx_HAL_Driver/Src/stm32h7xx_hal_qspi.c
$PROJ_DIR$/../Drivers/STM32H7xx_HAL_Driver/Src/stm32h7xx_ll_delayblock.c
+
+ $PROJ_DIR$/../Drivers/STM32H7xx_HAL_Driver/Src/stm32h7xx_ll_sdmmc.c
+
$PROJ_DIR$/../Drivers/STM32H7xx_HAL_Driver/Src/stm32h7xx_hal_sd.c
diff --git a/cubemx/Inc/stm32h7xx_hal_conf.h b/cubemx/Inc/stm32h7xx_hal_conf.h
index 6665b0c..5d94a08 100644
--- a/cubemx/Inc/stm32h7xx_hal_conf.h
+++ b/cubemx/Inc/stm32h7xx_hal_conf.h
@@ -64,7 +64,7 @@
/* #define HAL_IWDG_MODULE_ENABLED */
/* #define HAL_LPTIM_MODULE_ENABLED */
/* #define HAL_LTDC_MODULE_ENABLED */
-/* #define HAL_QSPI_MODULE_ENABLED */
+#define HAL_QSPI_MODULE_ENABLED
/* #define HAL_RAMECC_MODULE_ENABLED */
/* #define HAL_RNG_MODULE_ENABLED */
/* #define HAL_RTC_MODULE_ENABLED */
diff --git a/cubemx/Inc/stm32h7xx_it.h b/cubemx/Inc/stm32h7xx_it.h
index e7e0773..6f0ead2 100644
--- a/cubemx/Inc/stm32h7xx_it.h
+++ b/cubemx/Inc/stm32h7xx_it.h
@@ -59,6 +59,7 @@ void DMA1_Stream0_IRQHandler(void);
void USART1_IRQHandler(void);
void USART2_IRQHandler(void);
void SDMMC1_IRQHandler(void);
+void QUADSPI_IRQHandler(void);
/* USER CODE BEGIN EFP */
/* USER CODE END EFP */
diff --git a/cubemx/Src/main.c b/cubemx/Src/main.c
index 22137fc..0f78269 100644
--- a/cubemx/Src/main.c
+++ b/cubemx/Src/main.c
@@ -41,6 +41,8 @@
/* Private variables ---------------------------------------------------------*/
+QSPI_HandleTypeDef hqspi;
+
SD_HandleTypeDef hsd1;
UART_HandleTypeDef huart1;
@@ -62,6 +64,7 @@ static void MX_FMC_Init(void);
static void MX_SDMMC1_SD_Init(void);
static void MX_USART1_UART_Init(void);
static void MX_USART2_UART_Init(void);
+static void MX_QUADSPI_Init(void);
/* USER CODE BEGIN PFP */
/* USER CODE END PFP */
@@ -108,6 +111,7 @@ __WEAK int main(void)
MX_SDMMC1_SD_Init();
MX_USART1_UART_Init();
MX_USART2_UART_Init();
+ MX_QUADSPI_Init();
/* USER CODE BEGIN 2 */
/* USER CODE END 2 */
@@ -153,7 +157,7 @@ void SystemClock_Config(void)
RCC_OscInitStruct.PLL.PLLM = 4;
RCC_OscInitStruct.PLL.PLLN = 60;
RCC_OscInitStruct.PLL.PLLP = 2;
- RCC_OscInitStruct.PLL.PLLQ = 6;
+ RCC_OscInitStruct.PLL.PLLQ = 8;
RCC_OscInitStruct.PLL.PLLR = 2;
RCC_OscInitStruct.PLL.PLLRGE = RCC_PLL1VCIRANGE_3;
RCC_OscInitStruct.PLL.PLLVCOSEL = RCC_PLL1VCOWIDE;
@@ -182,6 +186,41 @@ void SystemClock_Config(void)
}
}
+/**
+ * @brief QUADSPI Initialization Function
+ * @param None
+ * @retval None
+ */
+static void MX_QUADSPI_Init(void)
+{
+
+ /* USER CODE BEGIN QUADSPI_Init 0 */
+
+ /* USER CODE END QUADSPI_Init 0 */
+
+ /* USER CODE BEGIN QUADSPI_Init 1 */
+
+ /* USER CODE END QUADSPI_Init 1 */
+ /* QUADSPI parameter configuration*/
+ hqspi.Instance = QUADSPI;
+ hqspi.Init.ClockPrescaler = 255;
+ hqspi.Init.FifoThreshold = 1;
+ hqspi.Init.SampleShifting = QSPI_SAMPLE_SHIFTING_NONE;
+ hqspi.Init.FlashSize = 1;
+ hqspi.Init.ChipSelectHighTime = QSPI_CS_HIGH_TIME_1_CYCLE;
+ hqspi.Init.ClockMode = QSPI_CLOCK_MODE_0;
+ hqspi.Init.FlashID = QSPI_FLASH_ID_1;
+ hqspi.Init.DualFlash = QSPI_DUALFLASH_DISABLE;
+ if (HAL_QSPI_Init(&hqspi) != HAL_OK)
+ {
+ Error_Handler();
+ }
+ /* USER CODE BEGIN QUADSPI_Init 2 */
+
+ /* USER CODE END QUADSPI_Init 2 */
+
+}
+
/**
* @brief SDMMC1 Initialization Function
* @param None
@@ -387,6 +426,7 @@ static void MX_GPIO_Init(void)
/* GPIO Ports Clock Enable */
__HAL_RCC_GPIOF_CLK_ENABLE();
__HAL_RCC_GPIOC_CLK_ENABLE();
+ __HAL_RCC_GPIOA_CLK_ENABLE();
__HAL_RCC_GPIOH_CLK_ENABLE();
__HAL_RCC_GPIOB_CLK_ENABLE();
__HAL_RCC_GPIOG_CLK_ENABLE();
diff --git a/cubemx/Src/stm32h7xx_hal_msp.c b/cubemx/Src/stm32h7xx_hal_msp.c
index f9ff2e5..6f396da 100644
--- a/cubemx/Src/stm32h7xx_hal_msp.c
+++ b/cubemx/Src/stm32h7xx_hal_msp.c
@@ -77,6 +77,120 @@ void HAL_MspInit(void)
/* USER CODE END MspInit 1 */
}
+/**
+ * @brief QSPI MSP Initialization
+ * This function configures the hardware resources used in this example
+ * @param hqspi: QSPI handle pointer
+ * @retval None
+ */
+void HAL_QSPI_MspInit(QSPI_HandleTypeDef* hqspi)
+{
+ GPIO_InitTypeDef GPIO_InitStruct = {0};
+ RCC_PeriphCLKInitTypeDef PeriphClkInitStruct = {0};
+ if(hqspi->Instance==QUADSPI)
+ {
+ /* USER CODE BEGIN QUADSPI_MspInit 0 */
+
+ /* USER CODE END QUADSPI_MspInit 0 */
+
+ /** Initializes the peripherals clock
+ */
+ PeriphClkInitStruct.PeriphClockSelection = RCC_PERIPHCLK_QSPI;
+ PeriphClkInitStruct.QspiClockSelection = RCC_QSPICLKSOURCE_PLL;
+ if (HAL_RCCEx_PeriphCLKConfig(&PeriphClkInitStruct) != HAL_OK)
+ {
+ Error_Handler();
+ }
+
+ /* Peripheral clock enable */
+ __HAL_RCC_QSPI_CLK_ENABLE();
+
+ __HAL_RCC_GPIOF_CLK_ENABLE();
+ __HAL_RCC_GPIOB_CLK_ENABLE();
+ /**QUADSPI GPIO Configuration
+ PF6 ------> QUADSPI_BK1_IO3
+ PF7 ------> QUADSPI_BK1_IO2
+ PF8 ------> QUADSPI_BK1_IO0
+ PF9 ------> QUADSPI_BK1_IO1
+ PB2 ------> QUADSPI_CLK
+ PB6 ------> QUADSPI_BK1_NCS
+ */
+ GPIO_InitStruct.Pin = GPIO_PIN_6|GPIO_PIN_7;
+ GPIO_InitStruct.Mode = GPIO_MODE_AF_PP;
+ GPIO_InitStruct.Pull = GPIO_NOPULL;
+ GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_LOW;
+ GPIO_InitStruct.Alternate = GPIO_AF9_QUADSPI;
+ HAL_GPIO_Init(GPIOF, &GPIO_InitStruct);
+
+ GPIO_InitStruct.Pin = GPIO_PIN_8|GPIO_PIN_9;
+ GPIO_InitStruct.Mode = GPIO_MODE_AF_PP;
+ GPIO_InitStruct.Pull = GPIO_NOPULL;
+ GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_LOW;
+ GPIO_InitStruct.Alternate = GPIO_AF10_QUADSPI;
+ HAL_GPIO_Init(GPIOF, &GPIO_InitStruct);
+
+ GPIO_InitStruct.Pin = GPIO_PIN_2;
+ GPIO_InitStruct.Mode = GPIO_MODE_AF_PP;
+ GPIO_InitStruct.Pull = GPIO_NOPULL;
+ GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_LOW;
+ GPIO_InitStruct.Alternate = GPIO_AF9_QUADSPI;
+ HAL_GPIO_Init(GPIOB, &GPIO_InitStruct);
+
+ GPIO_InitStruct.Pin = GPIO_PIN_6;
+ GPIO_InitStruct.Mode = GPIO_MODE_AF_PP;
+ GPIO_InitStruct.Pull = GPIO_NOPULL;
+ GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_LOW;
+ GPIO_InitStruct.Alternate = GPIO_AF10_QUADSPI;
+ HAL_GPIO_Init(GPIOB, &GPIO_InitStruct);
+
+ /* QUADSPI interrupt Init */
+ HAL_NVIC_SetPriority(QUADSPI_IRQn, 0, 0);
+ HAL_NVIC_EnableIRQ(QUADSPI_IRQn);
+ /* USER CODE BEGIN QUADSPI_MspInit 1 */
+
+ /* USER CODE END QUADSPI_MspInit 1 */
+
+ }
+
+}
+
+/**
+ * @brief QSPI MSP De-Initialization
+ * This function freeze the hardware resources used in this example
+ * @param hqspi: QSPI handle pointer
+ * @retval None
+ */
+void HAL_QSPI_MspDeInit(QSPI_HandleTypeDef* hqspi)
+{
+ if(hqspi->Instance==QUADSPI)
+ {
+ /* USER CODE BEGIN QUADSPI_MspDeInit 0 */
+
+ /* USER CODE END QUADSPI_MspDeInit 0 */
+ /* Peripheral clock disable */
+ __HAL_RCC_QSPI_CLK_DISABLE();
+
+ /**QUADSPI GPIO Configuration
+ PF6 ------> QUADSPI_BK1_IO3
+ PF7 ------> QUADSPI_BK1_IO2
+ PF8 ------> QUADSPI_BK1_IO0
+ PF9 ------> QUADSPI_BK1_IO1
+ PB2 ------> QUADSPI_CLK
+ PB6 ------> QUADSPI_BK1_NCS
+ */
+ HAL_GPIO_DeInit(GPIOF, GPIO_PIN_6|GPIO_PIN_7|GPIO_PIN_8|GPIO_PIN_9);
+
+ HAL_GPIO_DeInit(GPIOB, GPIO_PIN_2|GPIO_PIN_6);
+
+ /* QUADSPI interrupt DeInit */
+ HAL_NVIC_DisableIRQ(QUADSPI_IRQn);
+ /* USER CODE BEGIN QUADSPI_MspDeInit 1 */
+
+ /* USER CODE END QUADSPI_MspDeInit 1 */
+ }
+
+}
+
/**
* @brief SD MSP Initialization
* This function configures the hardware resources used in this example
@@ -271,17 +385,17 @@ void HAL_UART_MspInit(UART_HandleTypeDef* huart)
/* Peripheral clock enable */
__HAL_RCC_USART2_CLK_ENABLE();
- __HAL_RCC_GPIOD_CLK_ENABLE();
+ __HAL_RCC_GPIOA_CLK_ENABLE();
/**USART2 GPIO Configuration
- PD5 ------> USART2_TX
- PD6 ------> USART2_RX
+ PA2 ------> USART2_TX
+ PA3 ------> USART2_RX
*/
- GPIO_InitStruct.Pin = GPIO_PIN_5|GPIO_PIN_6;
+ GPIO_InitStruct.Pin = GPIO_PIN_2|GPIO_PIN_3;
GPIO_InitStruct.Mode = GPIO_MODE_AF_PP;
GPIO_InitStruct.Pull = GPIO_NOPULL;
GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_LOW;
GPIO_InitStruct.Alternate = GPIO_AF7_USART2;
- HAL_GPIO_Init(GPIOD, &GPIO_InitStruct);
+ HAL_GPIO_Init(GPIOA, &GPIO_InitStruct);
/* USART2 interrupt Init */
HAL_NVIC_SetPriority(USART2_IRQn, 0, 0);
@@ -333,10 +447,10 @@ void HAL_UART_MspDeInit(UART_HandleTypeDef* huart)
__HAL_RCC_USART2_CLK_DISABLE();
/**USART2 GPIO Configuration
- PD5 ------> USART2_TX
- PD6 ------> USART2_RX
+ PA2 ------> USART2_TX
+ PA3 ------> USART2_RX
*/
- HAL_GPIO_DeInit(GPIOD, GPIO_PIN_5|GPIO_PIN_6);
+ HAL_GPIO_DeInit(GPIOA, GPIO_PIN_2|GPIO_PIN_3);
/* USART2 interrupt DeInit */
HAL_NVIC_DisableIRQ(USART2_IRQn);
diff --git a/cubemx/Src/stm32h7xx_it.c b/cubemx/Src/stm32h7xx_it.c
index 79dc3d0..85b7495 100644
--- a/cubemx/Src/stm32h7xx_it.c
+++ b/cubemx/Src/stm32h7xx_it.c
@@ -55,6 +55,7 @@
/* USER CODE END 0 */
/* External variables --------------------------------------------------------*/
+extern QSPI_HandleTypeDef hqspi;
extern SD_HandleTypeDef hsd1;
extern DMA_HandleTypeDef hdma_usart1_rx;
extern UART_HandleTypeDef huart1;
@@ -257,6 +258,20 @@ void SDMMC1_IRQHandler(void)
/* USER CODE END SDMMC1_IRQn 1 */
}
+/**
+ * @brief This function handles QUADSPI global interrupt.
+ */
+void QUADSPI_IRQHandler(void)
+{
+ /* USER CODE BEGIN QUADSPI_IRQn 0 */
+
+ /* USER CODE END QUADSPI_IRQn 0 */
+ HAL_QSPI_IRQHandler(&hqspi);
+ /* USER CODE BEGIN QUADSPI_IRQn 1 */
+
+ /* USER CODE END QUADSPI_IRQn 1 */
+}
+
/* USER CODE BEGIN 1 */
/* USER CODE END 1 */
diff --git a/cubemx/cubemx.ioc b/cubemx/cubemx.ioc
index cd95974..ba3ca49 100644
--- a/cubemx/cubemx.ioc
+++ b/cubemx/cubemx.ioc
@@ -36,71 +36,78 @@ Mcu.CPN=STM32H743IIT6
Mcu.Family=STM32H7
Mcu.IP0=CORTEX_M7
Mcu.IP1=DMA
+Mcu.IP10=USART2
Mcu.IP2=FMC
Mcu.IP3=MEMORYMAP
Mcu.IP4=NVIC
-Mcu.IP5=RCC
-Mcu.IP6=SDMMC1
-Mcu.IP7=SYS
-Mcu.IP8=USART1
-Mcu.IP9=USART2
-Mcu.IPNb=10
+Mcu.IP5=QUADSPI
+Mcu.IP6=RCC
+Mcu.IP7=SDMMC1
+Mcu.IP8=SYS
+Mcu.IP9=USART1
+Mcu.IPNb=11
Mcu.Name=STM32H743IITx
Mcu.Package=LQFP176
Mcu.Pin0=PF0
Mcu.Pin1=PF1
-Mcu.Pin10=PB1
-Mcu.Pin11=PF11
-Mcu.Pin12=PF12
-Mcu.Pin13=PF13
-Mcu.Pin14=PF14
-Mcu.Pin15=PF15
-Mcu.Pin16=PG0
-Mcu.Pin17=PG1
-Mcu.Pin18=PE7
-Mcu.Pin19=PE8
+Mcu.Pin10=PC0
+Mcu.Pin11=PA2
+Mcu.Pin12=PH2
+Mcu.Pin13=PH3
+Mcu.Pin14=PA3
+Mcu.Pin15=PB0
+Mcu.Pin16=PB1
+Mcu.Pin17=PB2
+Mcu.Pin18=PF11
+Mcu.Pin19=PF12
Mcu.Pin2=PF2
-Mcu.Pin20=PE9
-Mcu.Pin21=PE10
-Mcu.Pin22=PE11
-Mcu.Pin23=PE12
-Mcu.Pin24=PE13
-Mcu.Pin25=PE14
-Mcu.Pin26=PE15
-Mcu.Pin27=PB14
-Mcu.Pin28=PB15
-Mcu.Pin29=PD8
+Mcu.Pin20=PF13
+Mcu.Pin21=PF14
+Mcu.Pin22=PF15
+Mcu.Pin23=PG0
+Mcu.Pin24=PG1
+Mcu.Pin25=PE7
+Mcu.Pin26=PE8
+Mcu.Pin27=PE9
+Mcu.Pin28=PE10
+Mcu.Pin29=PE11
Mcu.Pin3=PF3
-Mcu.Pin30=PD9
-Mcu.Pin31=PD10
-Mcu.Pin32=PD14
-Mcu.Pin33=PD15
-Mcu.Pin34=PG2
-Mcu.Pin35=PG4
-Mcu.Pin36=PG5
-Mcu.Pin37=PG8
-Mcu.Pin38=PC8
-Mcu.Pin39=PC9
+Mcu.Pin30=PE12
+Mcu.Pin31=PE13
+Mcu.Pin32=PE14
+Mcu.Pin33=PE15
+Mcu.Pin34=PB14
+Mcu.Pin35=PB15
+Mcu.Pin36=PD8
+Mcu.Pin37=PD9
+Mcu.Pin38=PD10
+Mcu.Pin39=PD14
Mcu.Pin4=PF4
-Mcu.Pin40=PC10
-Mcu.Pin41=PC11
-Mcu.Pin42=PC12
-Mcu.Pin43=PD0
-Mcu.Pin44=PD1
-Mcu.Pin45=PD2
-Mcu.Pin46=PD5
-Mcu.Pin47=PD6
-Mcu.Pin48=PG15
-Mcu.Pin49=PE0
+Mcu.Pin40=PD15
+Mcu.Pin41=PG2
+Mcu.Pin42=PG4
+Mcu.Pin43=PG5
+Mcu.Pin44=PG8
+Mcu.Pin45=PC8
+Mcu.Pin46=PC9
+Mcu.Pin47=PC10
+Mcu.Pin48=PC11
+Mcu.Pin49=PC12
Mcu.Pin5=PF5
-Mcu.Pin50=PE1
-Mcu.Pin51=VP_SYS_VS_Systick
-Mcu.Pin52=VP_MEMORYMAP_VS_MEMORYMAP
-Mcu.Pin6=PC0
-Mcu.Pin7=PH2
-Mcu.Pin8=PH3
-Mcu.Pin9=PB0
-Mcu.PinsNb=53
+Mcu.Pin50=PD0
+Mcu.Pin51=PD1
+Mcu.Pin52=PD2
+Mcu.Pin53=PG15
+Mcu.Pin54=PB6
+Mcu.Pin55=PE0
+Mcu.Pin56=PE1
+Mcu.Pin57=VP_SYS_VS_Systick
+Mcu.Pin58=VP_MEMORYMAP_VS_MEMORYMAP
+Mcu.Pin6=PF6
+Mcu.Pin7=PF7
+Mcu.Pin8=PF8
+Mcu.Pin9=PF9
+Mcu.PinsNb=59
Mcu.ThirdPartyNb=0
Mcu.UserConstants=
Mcu.UserName=STM32H743IITx
@@ -115,12 +122,17 @@ NVIC.MemoryManagement_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false
NVIC.NonMaskableInt_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false
NVIC.PendSV_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false
NVIC.PriorityGroup=NVIC_PRIORITYGROUP_4
+NVIC.QUADSPI_IRQn=true\:0\:0\:false\:false\:true\:true\:true\:true
NVIC.SDMMC1_IRQn=true\:0\:0\:false\:false\:true\:true\:true\:true
NVIC.SVCall_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false
NVIC.SysTick_IRQn=true\:15\:0\:false\:false\:true\:false\:true\:false
NVIC.USART1_IRQn=true\:0\:0\:false\:false\:true\:true\:true\:true
NVIC.USART2_IRQn=true\:0\:0\:false\:false\:true\:true\:true\:true
NVIC.UsageFault_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false
+PA2.Mode=Asynchronous
+PA2.Signal=USART2_TX
+PA3.Mode=Asynchronous
+PA3.Signal=USART2_RX
PB0.Locked=true
PB0.Signal=GPIO_Output
PB1.Locked=true
@@ -129,6 +141,12 @@ PB14.Mode=Asynchronous
PB14.Signal=USART1_TX
PB15.Mode=Asynchronous
PB15.Signal=USART1_RX
+PB2.Locked=true
+PB2.Mode=Single Bank 1
+PB2.Signal=QUADSPI_CLK
+PB6.Locked=true
+PB6.Mode=Single Bank 1
+PB6.Signal=QUADSPI_BK1_NCS
PC0.Signal=FMC_SDNWE
PC10.Mode=SD_4_bits_Wide_bus
PC10.Signal=SDMMC1_D2
@@ -147,10 +165,6 @@ PD14.Signal=FMC_D0_DA0
PD15.Signal=FMC_D1_DA1
PD2.Mode=SD_4_bits_Wide_bus
PD2.Signal=SDMMC1_CMD
-PD5.Mode=Asynchronous
-PD5.Signal=USART2_TX
-PD6.Mode=Asynchronous
-PD6.Signal=USART2_RX
PD8.Signal=FMC_D13_DA13
PD9.Signal=FMC_D14_DA14
PE0.Signal=FMC_NBL0
@@ -175,6 +189,18 @@ PF2.Signal=FMC_A2
PF3.Signal=FMC_A3
PF4.Signal=FMC_A4
PF5.Signal=FMC_A5
+PF6.Locked=true
+PF6.Mode=Single Bank 1
+PF6.Signal=QUADSPI_BK1_IO3
+PF7.Locked=true
+PF7.Mode=Single Bank 1
+PF7.Signal=QUADSPI_BK1_IO2
+PF8.Locked=true
+PF8.Mode=Single Bank 1
+PF8.Signal=QUADSPI_BK1_IO0
+PF9.Locked=true
+PF9.Mode=Single Bank 1
+PF9.Signal=QUADSPI_BK1_IO1
PG0.Signal=FMC_A10
PG1.Signal=FMC_A11
PG15.Signal=FMC_SDNCAS
@@ -220,7 +246,7 @@ ProjectManager.ToolChainLocation=
ProjectManager.UAScriptAfterPath=
ProjectManager.UAScriptBeforePath=
ProjectManager.UnderRoot=false
-ProjectManager.functionlistsort=1-SystemClock_Config-RCC-false-HAL-false,2-MX_GPIO_Init-GPIO-false-HAL-true,3-MX_DMA_Init-DMA-false-HAL-true,4-MX_FMC_Init-FMC-false-HAL-true,5-MX_SDMMC1_SD_Init-SDMMC1-false-HAL-true,6-MX_USART1_UART_Init-USART1-false-HAL-true,7-MX_USART2_UART_Init-USART2-false-HAL-true,8-MX_LTDC_Init-LTDC-false-HAL-true,0-MX_CORTEX_M7_Init-CORTEX_M7-false-HAL-true
+ProjectManager.functionlistsort=1-SystemClock_Config-RCC-false-HAL-false,2-MX_GPIO_Init-GPIO-false-HAL-true,3-MX_DMA_Init-DMA-false-HAL-true,4-MX_FMC_Init-FMC-false-HAL-true,5-MX_SDMMC1_SD_Init-SDMMC1-false-HAL-true,6-MX_USART1_UART_Init-USART1-false-HAL-true,7-MX_USART2_UART_Init-USART2-false-HAL-true,0-MX_CORTEX_M7_Init-CORTEX_M7-false-HAL-true
RCC.ADCFreq_Value=150000000
RCC.AHB12Freq_Value=240000000
RCC.AHB4Freq_Value=240000000
@@ -238,7 +264,7 @@ RCC.D1PPRE=RCC_APB3_DIV2
RCC.D2PPRE1=RCC_APB1_DIV2
RCC.D2PPRE2=RCC_APB2_DIV2
RCC.D3PPRE=RCC_APB4_DIV2
-RCC.DFSDMACLkFreq_Value=160000000
+RCC.DFSDMACLkFreq_Value=120000000
RCC.DFSDMFreq_Value=120000000
RCC.DIVM1=4
RCC.DIVN1=60
@@ -246,14 +272,14 @@ RCC.DIVN2=150
RCC.DIVP1Freq_Value=480000000
RCC.DIVP2Freq_Value=150000000
RCC.DIVP3Freq_Value=129000000
-RCC.DIVQ1=6
-RCC.DIVQ1Freq_Value=160000000
+RCC.DIVQ1=8
+RCC.DIVQ1Freq_Value=120000000
RCC.DIVQ2Freq_Value=150000000
RCC.DIVQ3Freq_Value=129000000
RCC.DIVR1Freq_Value=480000000
RCC.DIVR2Freq_Value=150000000
RCC.DIVR3Freq_Value=129000000
-RCC.FDCANFreq_Value=160000000
+RCC.FDCANFreq_Value=120000000
RCC.FMCFreq_Value=240000000
RCC.FamilyName=M
RCC.HCLK3ClockFreq_Value=240000000
@@ -262,7 +288,7 @@ RCC.HPRE=RCC_HCLK_DIV2
RCC.HRTIMFreq_Value=240000000
RCC.I2C123Freq_Value=120000000
RCC.I2C4Freq_Value=120000000
-RCC.IPParameters=ADCFreq_Value,AHB12Freq_Value,AHB4Freq_Value,APB1Freq_Value,APB2Freq_Value,APB3Freq_Value,APB4Freq_Value,AXIClockFreq_Value,CECFreq_Value,CKPERFreq_Value,CortexFreq_Value,CpuClockFreq_Value,D1CPREFreq_Value,D1PPRE,D2PPRE1,D2PPRE2,D3PPRE,DFSDMACLkFreq_Value,DFSDMFreq_Value,DIVM1,DIVN1,DIVN2,DIVP1Freq_Value,DIVP2Freq_Value,DIVP3Freq_Value,DIVQ1,DIVQ1Freq_Value,DIVQ2Freq_Value,DIVQ3Freq_Value,DIVR1Freq_Value,DIVR2Freq_Value,DIVR3Freq_Value,FDCANFreq_Value,FMCFreq_Value,FamilyName,HCLK3ClockFreq_Value,HCLKFreq_Value,HPRE,HRTIMFreq_Value,I2C123Freq_Value,I2C4Freq_Value,LPTIM1Freq_Value,LPTIM2Freq_Value,LPTIM345Freq_Value,LPUART1Freq_Value,LTDCFreq_Value,MCO1PinFreq_Value,MCO2PinFreq_Value,PLL2FRACN,PLL3FRACN,PLLFRACN,QSPIFreq_Value,RNGFreq_Value,RTCFreq_Value,SAI1Freq_Value,SAI23Freq_Value,SAI4AFreq_Value,SAI4BFreq_Value,SDMMC1CLockSelection,SDMMCFreq_Value,SPDIFRXFreq_Value,SPI123Freq_Value,SPI45Freq_Value,SPI6Freq_Value,SWPMI1Freq_Value,SYSCLKFreq_VALUE,SYSCLKSource,Tim1OutputFreq_Value,Tim2OutputFreq_Value,TraceFreq_Value,USART16Freq_Value,USART234578Freq_Value,USBFreq_Value,VCO1OutputFreq_Value,VCO2OutputFreq_Value,VCO3OutputFreq_Value,VCOInput1Freq_Value,VCOInput2Freq_Value,VCOInput3Freq_Value
+RCC.IPParameters=ADCFreq_Value,AHB12Freq_Value,AHB4Freq_Value,APB1Freq_Value,APB2Freq_Value,APB3Freq_Value,APB4Freq_Value,AXIClockFreq_Value,CECFreq_Value,CKPERFreq_Value,CortexFreq_Value,CpuClockFreq_Value,D1CPREFreq_Value,D1PPRE,D2PPRE1,D2PPRE2,D3PPRE,DFSDMACLkFreq_Value,DFSDMFreq_Value,DIVM1,DIVN1,DIVN2,DIVP1Freq_Value,DIVP2Freq_Value,DIVP3Freq_Value,DIVQ1,DIVQ1Freq_Value,DIVQ2Freq_Value,DIVQ3Freq_Value,DIVR1Freq_Value,DIVR2Freq_Value,DIVR3Freq_Value,FDCANFreq_Value,FMCFreq_Value,FamilyName,HCLK3ClockFreq_Value,HCLKFreq_Value,HPRE,HRTIMFreq_Value,I2C123Freq_Value,I2C4Freq_Value,LPTIM1Freq_Value,LPTIM2Freq_Value,LPTIM345Freq_Value,LPUART1Freq_Value,LTDCFreq_Value,MCO1PinFreq_Value,MCO2PinFreq_Value,PLL2FRACN,PLL3FRACN,PLLFRACN,QSPICLockSelection,QSPIFreq_Value,RNGFreq_Value,RTCFreq_Value,SAI1Freq_Value,SAI23Freq_Value,SAI4AFreq_Value,SAI4BFreq_Value,SDMMC1CLockSelection,SDMMCFreq_Value,SPDIFRXFreq_Value,SPI123Freq_Value,SPI45Freq_Value,SPI6Freq_Value,SWPMI1Freq_Value,SYSCLKFreq_VALUE,SYSCLKSource,Tim1OutputFreq_Value,Tim2OutputFreq_Value,TraceFreq_Value,USART16Freq_Value,USART234578Freq_Value,USBFreq_Value,VCO1OutputFreq_Value,VCO2OutputFreq_Value,VCO3OutputFreq_Value,VCOInput1Freq_Value,VCOInput2Freq_Value,VCOInput3Freq_Value
RCC.LPTIM1Freq_Value=120000000
RCC.LPTIM2Freq_Value=120000000
RCC.LPTIM345Freq_Value=120000000
@@ -273,17 +299,18 @@ RCC.MCO2PinFreq_Value=480000000
RCC.PLL2FRACN=0
RCC.PLL3FRACN=0
RCC.PLLFRACN=0
-RCC.QSPIFreq_Value=240000000
+RCC.QSPICLockSelection=RCC_QSPICLKSOURCE_PLL
+RCC.QSPIFreq_Value=120000000
RCC.RNGFreq_Value=48000000
RCC.RTCFreq_Value=32000
-RCC.SAI1Freq_Value=160000000
-RCC.SAI23Freq_Value=160000000
-RCC.SAI4AFreq_Value=160000000
-RCC.SAI4BFreq_Value=160000000
+RCC.SAI1Freq_Value=120000000
+RCC.SAI23Freq_Value=120000000
+RCC.SAI4AFreq_Value=120000000
+RCC.SAI4BFreq_Value=120000000
RCC.SDMMC1CLockSelection=RCC_SDMMCCLKSOURCE_PLL2
RCC.SDMMCFreq_Value=150000000
-RCC.SPDIFRXFreq_Value=160000000
-RCC.SPI123Freq_Value=160000000
+RCC.SPDIFRXFreq_Value=120000000
+RCC.SPI123Freq_Value=120000000
RCC.SPI45Freq_Value=120000000
RCC.SPI6Freq_Value=120000000
RCC.SWPMI1Freq_Value=120000000
@@ -294,7 +321,7 @@ RCC.Tim2OutputFreq_Value=240000000
RCC.TraceFreq_Value=64000000
RCC.USART16Freq_Value=120000000
RCC.USART234578Freq_Value=120000000
-RCC.USBFreq_Value=160000000
+RCC.USBFreq_Value=120000000
RCC.VCO1OutputFreq_Value=960000000
RCC.VCO2OutputFreq_Value=300000000
RCC.VCO3OutputFreq_Value=258000000
diff --git a/drivers/board.h b/drivers/board.h
index 59a647a..4a8f5b6 100644
--- a/drivers/board.h
+++ b/drivers/board.h
@@ -174,7 +174,7 @@ extern "C"
*/
/*#define BSP_USING_QSPI*/
-
+#define BSP_USING_QSPI
/*-------------------------- QSPI CONFIG END --------------------------*/
/*-------------------------- PWM CONFIG BEGIN --------------------------*/
diff --git a/packages/littlefs-v2.11.2/.gitattributes b/packages/littlefs-v2.11.2/.gitattributes
new file mode 100644
index 0000000..26d0425
--- /dev/null
+++ b/packages/littlefs-v2.11.2/.gitattributes
@@ -0,0 +1,4 @@
+# GitHub really wants to mark littlefs as a python project, telling it to
+# reclassify our test .toml files as C code (which they are 95% of anyways)
+# remedies this
+*.toml linguist-language=c
diff --git a/packages/littlefs-v2.11.2/.gitignore b/packages/littlefs-v2.11.2/.gitignore
new file mode 100644
index 0000000..09707c6
--- /dev/null
+++ b/packages/littlefs-v2.11.2/.gitignore
@@ -0,0 +1,34 @@
+# Compilation output
+*.o
+*.d
+*.a
+*.ci
+*.csv
+*.t.*
+*.b.*
+*.gcno
+*.gcda
+*.perf
+lfs
+liblfs.a
+
+# Testing things
+runners/test_runner
+runners/bench_runner
+lfs.code.csv
+lfs.data.csv
+lfs.stack.csv
+lfs.structs.csv
+lfs.cov.csv
+lfs.perf.csv
+lfs.perfbd.csv
+lfs.test.csv
+lfs.bench.csv
+
+# Misc
+tags
+.gdb_history
+scripts/__pycache__
+
+# Historical, probably should remove at some point
+tests/*.toml.*
diff --git a/packages/littlefs-v2.11.2/DESIGN.md b/packages/littlefs-v2.11.2/DESIGN.md
new file mode 100644
index 0000000..9c9703a
--- /dev/null
+++ b/packages/littlefs-v2.11.2/DESIGN.md
@@ -0,0 +1,2173 @@
+## The design of littlefs
+
+A little fail-safe filesystem designed for microcontrollers.
+
+```
+ | | | .---._____
+ .-----. | |
+--|o |---| littlefs |
+--| |---| |
+ '-----' '----------'
+ | | |
+```
+
+littlefs was originally built as an experiment to learn about filesystem design
+in the context of microcontrollers. The question was: How would you build a
+filesystem that is resilient to power-loss and flash wear without using
+unbounded memory?
+
+This document covers the high-level design of littlefs, how it is different
+than other filesystems, and the design decisions that got us here. For the
+low-level details covering every bit on disk, check out [SPEC.md](SPEC.md).
+
+## The problem
+
+The embedded systems littlefs targets are usually 32-bit microcontrollers with
+around 32 KiB of RAM and 512 KiB of ROM. These are often paired with SPI NOR
+flash chips with about 4 MiB of flash storage. These devices are too small for
+Linux and most existing filesystems, requiring code written specifically with
+size in mind.
+
+Flash itself is an interesting piece of technology with its own quirks and
+nuance. Unlike other forms of storage, writing to flash requires two
+operations: erasing and programming. Programming (setting bits to 0) is
+relatively cheap and can be very granular. Erasing however (setting bits to 1),
+requires an expensive and destructive operation which gives flash its name.
+[Wikipedia][wikipedia-flash] has more information on how exactly flash works.
+
+To make the situation more annoying, it's very common for these embedded
+systems to lose power at any time. Usually, microcontroller code is simple and
+reactive, with no concept of a shutdown routine. This presents a big challenge
+for persistent storage, where an unlucky power loss can corrupt the storage and
+leave a device unrecoverable.
+
+This leaves us with three major requirements for an embedded filesystem.
+
+1. **Power-loss resilience** - On these systems, power can be lost at any time.
+ If a power loss corrupts any persistent data structures, this can cause the
+ device to become unrecoverable. An embedded filesystem must be designed to
+ recover from a power loss during any write operation.
+
+1. **Wear leveling** - Writing to flash is destructive. If a filesystem
+ repeatedly writes to the same block, eventually that block will wear out.
+ Filesystems that don't take wear into account can easily burn through blocks
+ used to store frequently updated metadata and cause a device's early death.
+
+1. **Bounded RAM/ROM** - If the above requirements weren't enough, these
+ systems also have very limited amounts of memory. This prevents many
+ existing filesystem designs, which can lean on relatively large amounts of
+ RAM to temporarily store filesystem metadata.
+
+ For ROM, this means we need to keep our design simple and reuse code paths
+ where possible. For RAM we have a stronger requirement, all RAM usage is
+ bounded. This means RAM usage does not grow as the filesystem changes in
+ size or number of files. This creates a unique challenge as even presumably
+ simple operations, such as traversing the filesystem, become surprisingly
+ difficult.
+
+## Existing designs?
+
+So, what's already out there? There are, of course, many different filesystems,
+however they often share and borrow feature from each other. If we look at
+power-loss resilience and wear leveling, we can narrow these down to a handful
+of designs.
+
+1. First we have the non-resilient, block based filesystems, such as [FAT] and
+ [ext2]. These are the earliest filesystem designs and often the most simple.
+ Here storage is divided into blocks, with each file being stored in a
+ collection of blocks. Without modifications, these filesystems are not
+ power-loss resilient, so updating a file is a simple as rewriting the blocks
+ in place.
+
+ ```
+ .--------.
+ | root |
+ | |
+ | |
+ '--------'
+ .-' '-.
+ v v
+ .--------. .--------.
+ | A | | B |
+ | | | |
+ | | | |
+ '--------' '--------'
+ .-' .-' '-.
+ v v v
+ .--------. .--------. .--------.
+ | C | | D | | E |
+ | | | | | |
+ | | | | | |
+ '--------' '--------' '--------'
+ ```
+
+ Because of their simplicity, these filesystems are usually both the fastest
+ and smallest. However the lack of power resilience is not great, and the
+ binding relationship of storage location and data removes the filesystem's
+ ability to manage wear.
+
+2. In a completely different direction, we have logging filesystems, such as
+ [JFFS], [YAFFS], and [SPIFFS], storage location is not bound to a piece of
+ data, instead the entire storage is used for a circular log which is
+ appended with every change made to the filesystem. Writing appends new
+ changes, while reading requires traversing the log to reconstruct a file.
+ Some logging filesystems cache files to avoid the read cost, but this comes
+ at a tradeoff of RAM.
+
+ ```
+ v
+ .--------.--------.--------.--------.--------.--------.--------.--------.
+ | C | new B | new A | | A | B |
+ | | | |-> | | |
+ | | | | | | |
+ '--------'--------'--------'--------'--------'--------'--------'--------'
+ ```
+
+ Logging filesystem are beautifully elegant. With a checksum, we can easily
+ detect power-loss and fall back to the previous state by ignoring failed
+ appends. And if that wasn't good enough, their cyclic nature means that
+ logging filesystems distribute wear across storage perfectly.
+
+ The main downside is performance. If we look at garbage collection, the
+ process of cleaning up outdated data from the end of the log, I've yet to
+ see a pure logging filesystem that does not have one of these two costs:
+
+ 1. _O(n²)_ runtime
+ 2. _O(n)_ RAM
+
+ SPIFFS is a very interesting case here, as it uses the fact that repeated
+ programs to NOR flash is both atomic and masking. This is a very neat
+ solution, however it limits the type of storage you can support.
+
+3. Perhaps the most common type of filesystem, a journaling filesystem is the
+ offspring that happens when you mate a block based filesystem with a logging
+ filesystem. [ext4] and [NTFS] are good examples. Here, we take a normal
+ block based filesystem and add a bounded log where we note every change
+ before it occurs.
+
+ ```
+ journal
+ .--------.--------.
+ .--------. | C'| D'| | E'|
+ | root |-->| | |-> | |
+ | | | | | | |
+ | | '--------'--------'
+ '--------'
+ .-' '-.
+ v v
+ .--------. .--------.
+ | A | | B |
+ | | | |
+ | | | |
+ '--------' '--------'
+ .-' .-' '-.
+ v v v
+ .--------. .--------. .--------.
+ | C | | D | | E |
+ | | | | | |
+ | | | | | |
+ '--------' '--------' '--------'
+ ```
+
+
+ This sort of filesystem takes the best from both worlds. Performance can be
+ as fast as a block based filesystem (though updating the journal does have
+ a small cost), and atomic updates to the journal allow the filesystem to
+ recover in the event of a power loss.
+
+ Unfortunately, journaling filesystems have a couple of problems. They are
+ fairly complex, since there are effectively two filesystems running in
+ parallel, which comes with a code size cost. They also offer no protection
+ against wear because of the strong relationship between storage location
+ and data.
+
+4. Last but not least we have copy-on-write (COW) filesystems, such as
+ [btrfs] and [ZFS]. These are very similar to other block based filesystems,
+ but instead of updating block inplace, all updates are performed by creating
+ a copy with the changes and replacing any references to the old block with
+ our new block. This recursively pushes all of our problems upwards until we
+ reach the root of our filesystem, which is often stored in a very small log.
+
+ ```
+ .--------. .--------.
+ | root | write |new root|
+ | | ==> | |
+ | | | |
+ '--------' '--------'
+ .-' '-. | '-.
+ | .-------|------------------' v
+ v v v .--------.
+ .--------. .--------. | new B |
+ | A | | B | | |
+ | | | | | |
+ | | | | '--------'
+ '--------' '--------' .-' |
+ .-' .-' '-. .------------|------'
+ | | | | v
+ v v v v .--------.
+ .--------. .--------. .--------. | new D |
+ | C | | D | | E | | |
+ | | | | | | | |
+ | | | | | | '--------'
+ '--------' '--------' '--------'
+ ```
+
+ COW filesystems are interesting. They offer very similar performance to
+ block based filesystems while managing to pull off atomic updates without
+ storing data changes directly in a log. They even disassociate the storage
+ location of data, which creates an opportunity for wear leveling.
+
+ Well, almost. The unbounded upwards movement of updates causes some
+ problems. Because updates to a COW filesystem don't stop until they've
+ reached the root, an update can cascade into a larger set of writes than
+ would be needed for the original data. On top of this, the upward motion
+ focuses these writes into the block, which can wear out much earlier than
+ the rest of the filesystem.
+
+## littlefs
+
+So what does littlefs do?
+
+If we look at existing filesystems, there are two interesting design patterns
+that stand out, but each have their own set of problems. Logging, which
+provides independent atomicity, has poor runtime performance. And COW data
+structures, which perform well, push the atomicity problem upwards.
+
+Can we work around these limitations?
+
+Consider logging. It has either a _O(n²)_ runtime or _O(n)_ RAM cost. We
+can't avoid these costs, _but_ if we put an upper bound on the size we can at
+least prevent the theoretical cost from becoming problem. This relies on the
+super secret computer science hack where you can pretend any algorithmic
+complexity is _O(1)_ by bounding the input.
+
+In the case of COW data structures, we can try twisting the definition a bit.
+Let's say that our COW structure doesn't copy after a single write, but instead
+copies after _n_ writes. This doesn't change most COW properties (assuming you
+can write atomically!), but what it does do is prevent the upward motion of
+wear. This sort of copy-on-bounded-writes (CObW) still focuses wear, but at
+each level we divide the propagation of wear by _n_. With a sufficiently
+large _n_ (> branching factor) wear propagation is no longer a problem.
+
+See where this is going? Separate, logging and COW are imperfect solutions and
+have weaknesses that limit their usefulness. But if we merge the two they can
+mutually solve each other's limitations.
+
+This is the idea behind littlefs. At the sub-block level, littlefs is built
+out of small, two block logs that provide atomic updates to metadata anywhere
+on the filesystem. At the super-block level, littlefs is a CObW tree of blocks
+that can be evicted on demand.
+
+```
+ root
+ .--------.--------.
+ | A'| B'| |
+ | | |-> |
+ | | | |
+ '--------'--------'
+ .----' '--------------.
+ A v B v
+ .--------.--------. .--------.--------.
+ | C'| D'| | | E'|new| |
+ | | |-> | | | E'|-> |
+ | | | | | | | |
+ '--------'--------' '--------'--------'
+ .-' '--. | '------------------.
+ v v .-' v
+.--------. .--------. v .--------.
+| C | | D | .--------. write | new E |
+| | | | | E | ==> | |
+| | | | | | | |
+'--------' '--------' | | '--------'
+ '--------' .-' |
+ .-' '-. .-------------|------'
+ v v v v
+ .--------. .--------. .--------.
+ | F | | G | | new F |
+ | | | | | |
+ | | | | | |
+ '--------' '--------' '--------'
+```
+
+There are still some minor issues. Small logs can be expensive in terms of
+storage, in the worst case a small log costs 4x the size of the original data.
+CObW structures require an efficient block allocator since allocation occurs
+every _n_ writes. And there is still the challenge of keeping the RAM usage
+constant.
+
+## Metadata pairs
+
+Metadata pairs are the backbone of littlefs. These are small, two block logs
+that allow atomic updates anywhere in the filesystem.
+
+Why two blocks? Well, logs work by appending entries to a circular buffer
+stored on disk. But remember that flash has limited write granularity. We can
+incrementally program new data onto erased blocks, but we need to erase a full
+block at a time. This means that in order for our circular buffer to work, we
+need more than one block.
+
+We could make our logs larger than two blocks, but the next challenge is how
+do we store references to these logs? Because the blocks themselves are erased
+during writes, using a data structure to track these blocks is complicated.
+The simple solution here is to store a two block addresses for every metadata
+pair. This has the added advantage that we can change out blocks in the
+metadata pair independently, and we don't reduce our block granularity for
+other operations.
+
+In order to determine which metadata block is the most recent, we store a
+revision count that we compare using [sequence arithmetic][wikipedia-sna]
+(very handy for avoiding problems with integer overflow). Conveniently, this
+revision count also gives us a rough idea of how many erases have occurred on
+the block.
+
+```
+metadata pair pointer: {block 0, block 1}
+ | '--------------------.
+ '-. |
+disk v v
+.--------.--------.--------.--------.--------.--------.--------.--------.
+| | |metadata| |metadata| |
+| | |block 0 | |block 1 | |
+| | | | | | |
+'--------'--------'--------'--------'--------'--------'--------'--------'
+ '--. .----'
+ v v
+ metadata pair .----------------.----------------.
+ | revision 11 | revision 12 |
+ block 1 is |----------------|----------------|
+ most recent | A | A'' |
+ |----------------|----------------|
+ | checksum | checksum |
+ |----------------|----------------|
+ | B | A''' | <- most recent A
+ |----------------|----------------|
+ | A'' | checksum |
+ |----------------|----------------|
+ | checksum | | |
+ |----------------| v |
+ '----------------'----------------'
+```
+
+So how do we atomically update our metadata pairs? Atomicity (a type of
+power-loss resilience) requires two parts: redundancy and error detection.
+Error detection can be provided with a checksum, and in littlefs's case we
+use a 32-bit [CRC][wikipedia-crc]. Maintaining redundancy, on the other hand,
+requires multiple stages.
+
+1. If our block is not full and the program size is small enough to let us
+ append more entries, we can simply append the entries to the log. Because
+ we don't overwrite the original entries (remember rewriting flash requires
+ an erase), we still have the original entries if we lose power during the
+ append.
+
+ ```
+ commit A
+ .----------------.----------------. .----------------.----------------.
+ | revision 1 | revision 0 | => | revision 1 | revision 0 |
+ |----------------|----------------| |----------------|----------------|
+ | | | | | A | |
+ | v | | |----------------| |
+ | | | | checksum | |
+ | | | |----------------| |
+ | | | | | | |
+ | | | | v | |
+ | | | | | |
+ | | | | | |
+ | | | | | |
+ | | | | | |
+ '----------------'----------------' '----------------'----------------'
+ ```
+
+ Note that littlefs doesn't maintain a checksum for each entry. Many logging
+ filesystems do this, but it limits what you can update in a single atomic
+ operation. What we can do instead is group multiple entries into a commit
+ that shares a single checksum. This lets us update multiple unrelated pieces
+ of metadata as long as they reside on the same metadata pair.
+
+ ```
+ commit B and A'
+ .----------------.----------------. .----------------.----------------.
+ | revision 1 | revision 0 | => | revision 1 | revision 0 |
+ |----------------|----------------| |----------------|----------------|
+ | A | | | A | |
+ |----------------| | |----------------| |
+ | checksum | | | checksum | |
+ |----------------| | |----------------| |
+ | | | | | B | |
+ | v | | |----------------| |
+ | | | | A' | |
+ | | | |----------------| |
+ | | | | checksum | |
+ | | | |----------------| |
+ '----------------'----------------' '----------------'----------------'
+ ```
+
+2. If our block _is_ full of entries, we need to somehow remove outdated
+ entries to make space for new ones. This process is called garbage
+ collection, but because littlefs has multiple garbage collectors, we
+ also call this specific case compaction.
+
+ Compared to other filesystems, littlefs's garbage collector is relatively
+ simple. We want to avoid RAM consumption, so we use a sort of brute force
+ solution where for each entry we check to see if a newer entry has been
+ written. If the entry is the most recent we append it to our new block. This
+ is where having two blocks becomes important, if we lose power we still have
+ everything in our original block.
+
+ During this compaction step we also erase the metadata block and increment
+ the revision count. Because we can commit multiple entries at once, we can
+ write all of these changes to the second block without worrying about power
+ loss. It's only when the commit's checksum is written that the compacted
+ entries and revision count become committed and readable.
+
+ ```
+ commit B', need to compact
+ .----------------.----------------. .----------------.----------------.
+ | revision 1 | revision 0 | => | revision 1 | revision 2 |
+ |----------------|----------------| |----------------|----------------|
+ | A | | | A | A' |
+ |----------------| | |----------------|----------------|
+ | checksum | | | checksum | B' |
+ |----------------| | |----------------|----------------|
+ | B | | | B | checksum |
+ |----------------| | |----------------|----------------|
+ | A' | | | A' | | |
+ |----------------| | |----------------| v |
+ | checksum | | | checksum | |
+ |----------------| | |----------------| |
+ '----------------'----------------' '----------------'----------------'
+ ```
+
+3. If our block is full of entries _and_ we can't find any garbage, then what?
+ At this point, most logging filesystems would return an error indicating no
+ more space is available, but because we have small logs, overflowing a log
+ isn't really an error condition.
+
+ Instead, we split our original metadata pair into two metadata pairs, each
+ containing half of the entries, connected by a tail pointer. Instead of
+ increasing the size of the log and dealing with the scalability issues
+ associated with larger logs, we form a linked list of small bounded logs.
+ This is a tradeoff as this approach does use more storage space, but at the
+ benefit of improved scalability.
+
+ Despite writing to two metadata pairs, we can still maintain power
+ resilience during this split step by first preparing the new metadata pair,
+ and then inserting the tail pointer during the commit to the original
+ metadata pair.
+
+ ```
+ commit C and D, need to split
+ .----------------.----------------. .----------------.----------------.
+ | revision 1 | revision 2 | => | revision 3 | revision 2 |
+ |----------------|----------------| |----------------|----------------|
+ | A | A' | | A' | A' |
+ |----------------|----------------| |----------------|----------------|
+ | checksum | B' | | B' | B' |
+ |----------------|----------------| |----------------|----------------|
+ | B | checksum | | tail ---------------------.
+ |----------------|----------------| |----------------|----------------| |
+ | A' | | | | checksum | | |
+ |----------------| v | |----------------| | |
+ | checksum | | | | | | |
+ |----------------| | | v | | |
+ '----------------'----------------' '----------------'----------------' |
+ .----------------.---------'
+ v v
+ .----------------.----------------.
+ | revision 1 | revision 0 |
+ |----------------|----------------|
+ | C | |
+ |----------------| |
+ | D | |
+ |----------------| |
+ | checksum | |
+ |----------------| |
+ | | | |
+ | v | |
+ | | |
+ | | |
+ '----------------'----------------'
+ ```
+
+There is another complexity the crops up when dealing with small logs. The
+amortized runtime cost of garbage collection is not only dependent on its
+one time cost (_O(n²)_ for littlefs), but also depends on how often
+garbage collection occurs.
+
+Consider two extremes:
+
+1. Log is empty, garbage collection occurs once every _n_ updates
+2. Log is full, garbage collection occurs **every** update
+
+Clearly we need to be more aggressive than waiting for our metadata pair to
+be full. As the metadata pair approaches fullness the frequency of compactions
+grows very rapidly.
+
+Looking at the problem generically, consider a log with ![n] bytes for each
+entry, ![d] dynamic entries (entries that are outdated during garbage
+collection), and ![s] static entries (entries that need to be copied during
+garbage collection). If we look at the amortized runtime complexity of updating
+this log we get this formula:
+
+![cost = n + n (s / d+1)][metadata-formula1]
+
+If we let ![r] be the ratio of static space to the size of our log in bytes, we
+find an alternative representation of the number of static and dynamic entries:
+
+![s = r (size/n)][metadata-formula2]
+
+![d = (1 - r) (size/n)][metadata-formula3]
+
+Substituting these in for ![d] and ![s] gives us a nice formula for the cost of
+updating an entry given how full the log is:
+
+![cost = n + n (r (size/n) / ((1-r) (size/n) + 1))][metadata-formula4]
+
+Assuming 100 byte entries in a 4 KiB log, we can graph this using the entry
+size to find a multiplicative cost:
+
+![Metadata pair update cost graph][metadata-cost-graph]
+
+So at 50% usage, we're seeing an average of 2x cost per update, and at 75%
+usage, we're already at an average of 4x cost per update.
+
+To avoid this exponential growth, instead of waiting for our metadata pair
+to be full, we split the metadata pair once we exceed 50% capacity. We do this
+lazily, waiting until we need to compact before checking if we fit in our 50%
+limit. This limits the overhead of garbage collection to 2x the runtime cost,
+giving us an amortized runtime complexity of _O(1)_.
+
+---
+
+If we look at metadata pairs and linked-lists of metadata pairs at a high
+level, they have fairly nice runtime costs. Assuming _n_ metadata pairs,
+each containing _m_ metadata entries, the _lookup_ cost for a specific
+entry has a worst case runtime complexity of _O(nm)_. For _updating_ a specific
+entry, the worst case complexity is _O(nm²)_, with an amortized complexity
+of only _O(nm)_.
+
+However, splitting at 50% capacity does mean that in the best case our
+metadata pairs will only be 1/2 full. If we include the overhead of the second
+block in our metadata pair, each metadata entry has an effective storage cost
+of 4x the original size. I imagine users would not be happy if they found
+that they can only use a quarter of their original storage. Metadata pairs
+provide a mechanism for performing atomic updates, but we need a separate
+mechanism for storing the bulk of our data.
+
+## CTZ skip-lists
+
+Metadata pairs provide efficient atomic updates but unfortunately have a large
+storage cost. But we can work around this storage cost by only using the
+metadata pairs to store references to more dense, copy-on-write (COW) data
+structures.
+
+[Copy-on-write data structures][wikipedia-cow], also called purely functional
+data structures, are a category of data structures where the underlying
+elements are immutable. Making changes to the data requires creating new
+elements containing a copy of the updated data and replacing any references
+with references to the new elements. Generally, the performance of a COW data
+structure depends on how many old elements can be reused after replacing parts
+of the data.
+
+littlefs has several requirements of its COW structures. They need to be
+efficient to read and write, but most frustrating, they need to be traversable
+with a constant amount of RAM. Notably this rules out
+[B-trees][wikipedia-B-tree], which can not be traversed with constant RAM, and
+[B+-trees][wikipedia-B+-tree], which are not possible to update with COW
+operations.
+
+---
+
+So, what can we do? First let's consider storing files in a simple COW
+linked-list. Appending a block, which is the basis for writing files, means we
+have to update the last block to point to our new block. This requires a COW
+operation, which means we need to update the second-to-last block, and then the
+third-to-last, and so on until we've copied out the entire file.
+
+```
+A linked-list
+.--------. .--------. .--------. .--------. .--------. .--------.
+| data 0 |->| data 1 |->| data 2 |->| data 4 |->| data 5 |->| data 6 |
+| | | | | | | | | | | |
+| | | | | | | | | | | |
+'--------' '--------' '--------' '--------' '--------' '--------'
+```
+
+To avoid a full copy during appends, we can store the data backwards. Appending
+blocks just requires adding the new block and no other blocks need to be
+updated. If we update a block in the middle, we still need to copy the
+following blocks, but can reuse any blocks before it. Since most file writes
+are linear, this design gambles that appends are the most common type of data
+update.
+
+```
+A backwards linked-list
+.--------. .--------. .--------. .--------. .--------. .--------.
+| data 0 |<-| data 1 |<-| data 2 |<-| data 4 |<-| data 5 |<-| data 6 |
+| | | | | | | | | | | |
+| | | | | | | | | | | |
+'--------' '--------' '--------' '--------' '--------' '--------'
+```
+
+However, a backwards linked-list does have a rather glaring problem. Iterating
+over a file _in order_ has a runtime cost of _O(n²)_. A quadratic runtime
+just to read a file! That's awful.
+
+Fortunately we can do better. Instead of a singly linked list, littlefs
+uses a multilayered linked-list often called a
+[skip-list][wikipedia-skip-list]. However, unlike the most common type of
+skip-list, littlefs's skip-lists are strictly deterministic built around some
+interesting properties of the count-trailing-zeros (CTZ) instruction.
+
+The rules CTZ skip-lists follow are that for every _n_th block where _n_
+is divisible by 2_ˣ_, that block contains a pointer to block
+_n_-2_ˣ_. This means that each block contains anywhere from 1 to
+log₂_n_ pointers that skip to different preceding elements of the
+skip-list.
+
+The name comes from heavy use of the [CTZ instruction][wikipedia-ctz], which
+lets us calculate the power-of-two factors efficiently. For a given block _n_,
+that block contains ctz(_n_)+1 pointers.
+
+```
+A backwards CTZ skip-list
+.--------. .--------. .--------. .--------. .--------. .--------.
+| data 0 |<-| data 1 |<-| data 2 |<-| data 3 |<-| data 4 |<-| data 5 |
+| |<-| |--| |<-| |--| | | |
+| |<-| |--| |--| |--| | | |
+'--------' '--------' '--------' '--------' '--------' '--------'
+```
+
+The additional pointers let us navigate the data-structure on disk much more
+efficiently than in a singly linked list.
+
+Consider a path from data block 5 to data block 1. You can see how data block 3
+was completely skipped:
+```
+.--------. .--------. .--------. .--------. .--------. .--------.
+| data 0 | | data 1 |<-| data 2 | | data 3 | | data 4 |<-| data 5 |
+| | | | | |<-| |--| | | |
+| | | | | | | | | | | |
+'--------' '--------' '--------' '--------' '--------' '--------'
+```
+
+The path to data block 0 is even faster, requiring only two jumps:
+```
+.--------. .--------. .--------. .--------. .--------. .--------.
+| data 0 | | data 1 | | data 2 | | data 3 | | data 4 |<-| data 5 |
+| | | | | | | | | | | |
+| |<-| |--| |--| |--| | | |
+'--------' '--------' '--------' '--------' '--------' '--------'
+```
+
+We can find the runtime complexity by looking at the path to any block from
+the block containing the most pointers. Every step along the path divides
+the search space for the block in half, giving us a runtime of _O(log n)_.
+To get _to_ the block with the most pointers, we can perform the same steps
+backwards, which puts the runtime at _O(2 log n)_ = _O(log n)_. An interesting
+note is that this optimal path occurs naturally if we greedily choose the
+pointer that covers the most distance without passing our target.
+
+So now we have a [COW] data structure that is cheap to append with a runtime
+of _O(1)_, and can be read with a worst case runtime of _O(n log n)_. Given
+that this runtime is also divided by the amount of data we can store in a
+block, this cost is fairly reasonable.
+
+---
+
+This is a new data structure, so we still have several questions. What is the
+storage overhead? Can the number of pointers exceed the size of a block? How do
+we store a CTZ skip-list in our metadata pairs?
+
+To find the storage overhead, we can look at the data structure as multiple
+linked-lists. Each linked-list skips twice as many blocks as the previous,
+or from another perspective, each linked-list uses half as much storage as
+the previous. As we approach infinity, the storage overhead forms a geometric
+series. Solving this tells us that on average our storage overhead is only
+2 pointers per block.
+
+![lim,n->inf((1/n)sum,i,0->n(ctz(i)+1)) = sum,i,0->inf(1/2^i) = 2][ctz-formula1]
+
+Because our file size is limited the word width we use to store sizes, we can
+also solve for the maximum number of pointers we would ever need to store in a
+block. If we set the overhead of pointers equal to the block size, we get the
+following equation. Note that both a smaller block size (![B][bigB]) and larger
+word width (![w]) result in more storage overhead.
+
+![B = (w/8)ceil(log2(2^w / (B-2w/8)))][ctz-formula2]
+
+Solving the equation for ![B][bigB] gives us the minimum block size for some
+common word widths:
+
+1. 32-bit CTZ skip-list => minimum block size of 104 bytes
+2. 64-bit CTZ skip-list => minimum block size of 448 bytes
+
+littlefs uses a 32-bit word width, so our blocks can only overflow with
+pointers if they are smaller than 104 bytes. This is an easy requirement, as
+in practice, most block sizes start at 512 bytes. As long as our block size
+is larger than 104 bytes, we can avoid the extra logic needed to handle
+pointer overflow.
+
+This last question is how do we store CTZ skip-lists? We need a pointer to the
+head block, the size of the skip-list, the index of the head block, and our
+offset in the head block. But it's worth noting that each size maps to a unique
+index + offset pair. So in theory we can store only a single pointer and size.
+
+However, calculating the index + offset pair from the size is a bit
+complicated. We can start with a summation that loops through all of the blocks
+up until our given size. Let ![B][bigB] be the block size in bytes, ![w] be the
+word width in bits, ![n] be the index of the block in the skip-list, and
+![N][bigN] be the file size in bytes:
+
+![N = sum,i,0->n(B-(w/8)(ctz(i)+1))][ctz-formula3]
+
+This works quite well, but requires _O(n)_ to compute, which brings the full
+runtime of reading a file up to _O(n² log n)_. Fortunately, that summation
+doesn't need to touch the disk, so the practical impact is minimal.
+
+However, despite the integration of a bitwise operation, we can actually reduce
+this equation to a _O(1)_ form. While browsing the amazing resource that is
+the [On-Line Encyclopedia of Integer Sequences (OEIS)][oeis], I managed to find
+[A001511], which matches the iteration of the CTZ instruction,
+and [A005187], which matches its partial summation. Much to my
+surprise, these both result from simple equations, leading us to a rather
+unintuitive property that ties together two seemingly unrelated bitwise
+instructions:
+
+![sum,i,0->n(ctz(i)+1) = 2n-popcount(n)][ctz-formula4]
+
+where:
+
+1. ctz(![x]) = the number of trailing bits that are 0 in ![x]
+2. popcount(![x]) = the number of bits that are 1 in ![x]
+
+Initial tests of this surprising property seem to hold. As ![n] approaches
+infinity, we end up with an average overhead of 2 pointers, which matches our
+assumption from earlier. During iteration, the popcount function seems to
+handle deviations from this average. Of course, just to make sure I wrote a
+quick script that verified this property for all 32-bit integers.
+
+Now we can substitute into our original equation to find a more efficient
+equation for file size:
+
+![N = Bn - (w/8)(2n-popcount(n))][ctz-formula5]
+
+Unfortunately, the popcount function is non-injective, so we can't solve this
+equation for our index. But what we can do is solve for an ![n'] index that
+is greater than ![n] with error bounded by the range of the popcount function.
+We can repeatedly substitute ![n'] into the original equation until the error
+is smaller than our integer resolution. As it turns out, we only need to
+perform this substitution once, which gives us this formula for our index:
+
+![n = floor((N-(w/8)popcount(N/(B-2w/8))) / (B-2w/8))][ctz-formula6]
+
+Now that we have our index ![n], we can just plug it back into the above
+equation to find the offset. We run into a bit of a problem with integer
+overflow, but we can avoid this by rearranging the equation a bit:
+
+![off = N - (B-2w/8)n - (w/8)popcount(n)][ctz-formula7]
+
+Our solution requires quite a bit of math, but computers are very good at math.
+Now we can find both our block index and offset from a size in _O(1)_, letting
+us store CTZ skip-lists with only a pointer and size.
+
+CTZ skip-lists give us a COW data structure that is easily traversable in
+_O(n)_, can be appended in _O(1)_, and can be read in _O(n log n)_. All of
+these operations work in a bounded amount of RAM and require only two words of
+storage overhead per block. In combination with metadata pairs, CTZ skip-lists
+provide power resilience and compact storage of data.
+
+```
+ .--------.
+ .|metadata|
+ || |
+ || |
+ |'--------'
+ '----|---'
+ v
+.--------. .--------. .--------. .--------.
+| data 0 |<-| data 1 |<-| data 2 |<-| data 3 |
+| |<-| |--| | | |
+| | | | | | | |
+'--------' '--------' '--------' '--------'
+
+write data to disk, create copies
+=>
+ .--------.
+ .|metadata|
+ || |
+ || |
+ |'--------'
+ '----|---'
+ v
+.--------. .--------. .--------. .--------.
+| data 0 |<-| data 1 |<-| data 2 |<-| data 3 |
+| |<-| |--| | | |
+| | | | | | | |
+'--------' '--------' '--------' '--------'
+ ^ ^ ^
+ | | | .--------. .--------. .--------. .--------.
+ | | '----| new |<-| new |<-| new |<-| new |
+ | '----------------| data 2 |<-| data 3 |--| data 4 | | data 5 |
+ '------------------| |--| |--| | | |
+ '--------' '--------' '--------' '--------'
+
+commit to metadata pair
+=>
+ .--------.
+ .|new |
+ ||metadata|
+ || |
+ |'--------'
+ '----|---'
+ |
+.--------. .--------. .--------. .--------. |
+| data 0 |<-| data 1 |<-| data 2 |<-| data 3 | |
+| |<-| |--| | | | |
+| | | | | | | | |
+'--------' '--------' '--------' '--------' |
+ ^ ^ ^ v
+ | | | .--------. .--------. .--------. .--------.
+ | | '----| new |<-| new |<-| new |<-| new |
+ | '----------------| data 2 |<-| data 3 |--| data 4 | | data 5 |
+ '------------------| |--| |--| | | |
+ '--------' '--------' '--------' '--------'
+```
+
+## The block allocator
+
+So we now have the framework for an atomic, wear leveling filesystem. Small two
+block metadata pairs provide atomic updates, while CTZ skip-lists provide
+compact storage of data in COW blocks.
+
+But now we need to look at the [elephant] in the room. Where do all these
+blocks come from?
+
+Deciding which block to use next is the responsibility of the block allocator.
+In filesystem design, block allocation is often a second-class citizen, but in
+a COW filesystem its role becomes much more important as it is needed for
+nearly every write to the filesystem.
+
+Normally, block allocation involves some sort of free list or bitmap stored on
+the filesystem that is updated with free blocks. However, with power
+resilience, keeping these structures consistent becomes difficult. It doesn't
+help that any mistake in updating these structures can result in lost blocks
+that are impossible to recover.
+
+littlefs takes a cautious approach. Instead of trusting a free list on disk,
+littlefs relies on the fact that the filesystem on disk is a mirror image of
+the free blocks on the disk. The block allocator operates much like a garbage
+collector in a scripting language, scanning for unused blocks on demand.
+
+```
+ .----.
+ |root|
+ | |
+ '----'
+ v-------' '-------v
+.----. . . .----.
+| A | . . | B |
+| | . . | |
+'----' . . '----'
+. . . . v--' '------------v---------v
+. . . .----. . .----. .----.
+. . . | C | . | D | | E |
+. . . | | . | | | |
+. . . '----' . '----' '----'
+. . . . . . . . . .
+.----.----.----.----.----.----.----.----.----.----.----.----.
+| A | |root| C | B | | D | | E | |
+| | | | | | | | | | |
+'----'----'----'----'----'----'----'----'----'----'----'----'
+ ^ ^ ^ ^ ^
+ '-------------------'----'-------------------'----'-- free blocks
+```
+
+While this approach may sound complicated, the decision to not maintain a free
+list greatly simplifies the overall design of littlefs. Unlike programming
+languages, there are only a handful of data structures we need to traverse.
+And block deallocation, which occurs nearly as often as block allocation,
+is simply a noop. This "drop it on the floor" strategy greatly reduces the
+complexity of managing on disk data structures, especially when handling
+high-risk error conditions.
+
+---
+
+Our block allocator needs to find free blocks efficiently. You could traverse
+through every block on storage and check each one against our filesystem tree;
+however, the runtime would be abhorrent. We need to somehow collect multiple
+blocks per traversal.
+
+Looking at existing designs, some larger filesystems that use a similar "drop
+it on the floor" strategy store a bitmap of the entire storage in [RAM]. This
+works well because bitmaps are surprisingly compact. We can't use the same
+strategy here, as it violates our constant RAM requirement, but we may be able
+to modify the idea into a workable solution.
+
+```
+.----.----.----.----.----.----.----.----.----.----.----.----.
+| A | |root| C | B | | D | | E | |
+| | | | | | | | | | |
+'----'----'----'----'----'----'----'----'----'----'----'----'
+ 1 0 1 1 1 0 0 1 0 1 0 0
+ \---------------------------+----------------------------/
+ v
+ bitmap: 0xb94 (0b101110010100)
+```
+
+The block allocator in littlefs is a compromise between a disk-sized bitmap and
+a brute force traversal. Instead of a bitmap the size of storage, we keep track
+of a small, fixed-size bitmap called the lookahead buffer. During block
+allocation, we take blocks from the lookahead buffer. If the lookahead buffer
+is empty, we scan the filesystem for more free blocks, populating our lookahead
+buffer. In each scan we use an increasing offset, circling the storage as
+blocks are allocated.
+
+Here's what it might look like to allocate 4 blocks on a decently busy
+filesystem with a 32 bit lookahead and a total of 128 blocks (512 KiB
+of storage if blocks are 4 KiB):
+```
+boot... lookahead:
+ fs blocks: fffff9fffffffffeffffffffffff0000
+scanning... lookahead: fffff9ff
+ fs blocks: fffff9fffffffffeffffffffffff0000
+alloc = 21 lookahead: fffffdff
+ fs blocks: fffffdfffffffffeffffffffffff0000
+alloc = 22 lookahead: ffffffff
+ fs blocks: fffffffffffffffeffffffffffff0000
+scanning... lookahead: fffffffe
+ fs blocks: fffffffffffffffeffffffffffff0000
+alloc = 63 lookahead: ffffffff
+ fs blocks: ffffffffffffffffffffffffffff0000
+scanning... lookahead: ffffffff
+ fs blocks: ffffffffffffffffffffffffffff0000
+scanning... lookahead: ffffffff
+ fs blocks: ffffffffffffffffffffffffffff0000
+scanning... lookahead: ffff0000
+ fs blocks: ffffffffffffffffffffffffffff0000
+alloc = 112 lookahead: ffff8000
+ fs blocks: ffffffffffffffffffffffffffff8000
+```
+
+This lookahead approach has a runtime complexity of _O(n²)_ to completely
+scan storage; however, bitmaps are surprisingly compact, and in practice only
+one or two passes are usually needed to find free blocks. Additionally, the
+performance of the allocator can be optimized by adjusting the block size or
+size of the lookahead buffer, trading either write granularity or RAM for
+allocator performance.
+
+## Wear leveling
+
+The block allocator has a secondary role: wear leveling.
+
+Wear leveling is the process of distributing wear across all blocks in the
+storage to prevent the filesystem from experiencing an early death due to
+wear on a single block in the storage.
+
+littlefs has two methods of protecting against wear:
+1. Detection and recovery from bad blocks
+2. Evenly distributing wear across dynamic blocks
+
+---
+
+Recovery from bad blocks doesn't actually have anything to do with the block
+allocator itself. Instead, it relies on the ability of the filesystem to detect
+and evict bad blocks when they occur.
+
+In littlefs, it is fairly straightforward to detect bad blocks at write time.
+All writes must be sourced by some form of data in RAM, so immediately after we
+write to a block, we can read the data back and verify that it was written
+correctly. If we find that the data on disk does not match the copy we have in
+RAM, a write error has occurred and we most likely have a bad block.
+
+Once we detect a bad block, we need to recover from it. In the case of write
+errors, we have a copy of the corrupted data in RAM, so all we need to do is
+evict the bad block, allocate a new, hopefully good block, and repeat the write
+that previously failed.
+
+The actual act of evicting the bad block and replacing it with a new block is
+left up to the filesystem's copy-on-bounded-writes (CObW) data structures. One
+property of CObW data structures is that any block can be replaced during a
+COW operation. The bounded-writes part is normally triggered by a counter, but
+nothing prevents us from triggering a COW operation as soon as we find a bad
+block.
+
+```
+ .----.
+ |root|
+ | |
+ '----'
+ v--' '----------------------v
+.----. .----.
+| A | | B |
+| | | |
+'----' '----'
+. . v---' .
+. . .----. .
+. . | C | .
+. . | | .
+. . '----' .
+. . . . .
+.----.----.----.----.----.----.----.----.----.----.
+| A |root| | C | B | |
+| | | | | | |
+'----'----'----'----'----'----'----'----'----'----'
+
+update C
+=>
+ .----.
+ |root|
+ | |
+ '----'
+ v--' '----------------------v
+.----. .----.
+| A | | B |
+| | | |
+'----' '----'
+. . v---' .
+. . .----. .
+. . |bad | .
+. . |blck| .
+. . '----' .
+. . . . .
+.----.----.----.----.----.----.----.----.----.----.
+| A |root| |bad | B | |
+| | | |blck| | |
+'----'----'----'----'----'----'----'----'----'----'
+
+oh no! bad block! relocate C
+=>
+ .----.
+ |root|
+ | |
+ '----'
+ v--' '----------------------v
+.----. .----.
+| A | | B |
+| | | |
+'----' '----'
+. . v---' .
+. . .----. .
+. . |bad | .
+. . |blck| .
+. . '----' .
+. . . . .
+.----.----.----.----.----.----.----.----.----.----.
+| A |root| |bad | B |bad | |
+| | | |blck| |blck| |
+'----'----'----'----'----'----'----'----'----'----'
+ --------->
+oh no! bad block! relocate C
+=>
+ .----.
+ |root|
+ | |
+ '----'
+ v--' '----------------------v
+.----. .----.
+| A | | B |
+| | | |
+'----' '----'
+. . v---' .
+. . .----. . .----.
+. . |bad | . | C' |
+. . |blck| . | |
+. . '----' . '----'
+. . . . . . .
+.----.----.----.----.----.----.----.----.----.----.
+| A |root| |bad | B |bad | C' | |
+| | | |blck| |blck| | |
+'----'----'----'----'----'----'----'----'----'----'
+ -------------->
+successfully relocated C, update B
+=>
+ .----.
+ |root|
+ | |
+ '----'
+ v--' '----------------------v
+.----. .----.
+| A | |bad |
+| | |blck|
+'----' '----'
+. . v---' .
+. . .----. . .----.
+. . |bad | . | C' |
+. . |blck| . | |
+. . '----' . '----'
+. . . . . . .
+.----.----.----.----.----.----.----.----.----.----.
+| A |root| |bad |bad |bad | C' | |
+| | | |blck|blck|blck| | |
+'----'----'----'----'----'----'----'----'----'----'
+
+oh no! bad block! relocate B
+=>
+ .----.
+ |root|
+ | |
+ '----'
+ v--' '----------------------v
+.----. .----. .----.
+| A | |bad | |bad |
+| | |blck| |blck|
+'----' '----' '----'
+. . v---' . . .
+. . .----. . .----. .
+. . |bad | . | C' | .
+. . |blck| . | | .
+. . '----' . '----' .
+. . . . . . . .
+.----.----.----.----.----.----.----.----.----.----.
+| A |root| |bad |bad |bad | C' |bad |
+| | | |blck|blck|blck| |blck|
+'----'----'----'----'----'----'----'----'----'----'
+ -------------->
+oh no! bad block! relocate B
+=>
+ .----.
+ |root|
+ | |
+ '----'
+ v--' '----------------------v
+.----. .----. .----.
+| A | | B' | |bad |
+| | | | |blck|
+'----' '----' '----'
+. . . | . .---' .
+. . . '--------------v-------------v
+. . . . .----. . .----.
+. . . . |bad | . | C' |
+. . . . |blck| . | |
+. . . . '----' . '----'
+. . . . . . . . .
+.----.----.----.----.----.----.----.----.----.----.
+| A |root| B' | |bad |bad |bad | C' |bad |
+| | | | |blck|blck|blck| |blck|
+'----'----'----'----'----'----'----'----'----'----'
+------------> ------------------
+successfully relocated B, update root
+=>
+ .----.
+ |root|
+ | |
+ '----'
+ v--' '--v
+.----. .----.
+| A | | B' |
+| | | |
+'----' '----'
+. . . '---------------------------v
+. . . . .----.
+. . . . | C' |
+. . . . | |
+. . . . '----'
+. . . . . .
+.----.----.----.----.----.----.----.----.----.----.
+| A |root| B' | |bad |bad |bad | C' |bad |
+| | | | |blck|blck|blck| |blck|
+'----'----'----'----'----'----'----'----'----'----'
+```
+
+We may find that the new block is also bad, but hopefully after repeating this
+cycle we'll eventually find a new block where a write succeeds. If we don't,
+that means that all blocks in our storage are bad, and we've reached the end of
+our device's usable life. At this point, littlefs will return an "out of space"
+error. This is technically true, as there are no more good blocks, but as an
+added benefit it also matches the error condition expected by users of
+dynamically sized data.
+
+---
+
+Read errors, on the other hand, are quite a bit more complicated. We don't have
+a copy of the data lingering around in RAM, so we need a way to reconstruct the
+original data even after it has been corrupted. One such mechanism for this is
+[error-correction-codes (ECC)][wikipedia-ecc].
+
+ECC is an extension to the idea of a checksum. Where a checksum such as CRC can
+detect that an error has occurred in the data, ECC can detect and actually
+correct some amount of errors. However, there is a limit to how many errors ECC
+can detect: the [Hamming bound][wikipedia-hamming-bound]. As the number of
+errors approaches the Hamming bound, we may still be able to detect errors, but
+can no longer fix the data. If we've reached this point the block is
+unrecoverable.
+
+littlefs by itself does **not** provide ECC. The block nature and relatively
+large footprint of ECC does not work well with the dynamically sized data of
+filesystems, correcting errors without RAM is complicated, and ECC fits better
+with the geometry of block devices. In fact, several NOR flash chips have extra
+storage intended for ECC, and many NAND chips can even calculate ECC on the
+chip itself.
+
+In littlefs, ECC is entirely optional. Read errors can instead be prevented
+proactively by wear leveling. But it's important to note that ECC can be used
+at the block device level to modestly extend the life of a device. littlefs
+respects any errors reported by the block device, allowing a block device to
+provide additional aggressive error detection.
+
+---
+
+To avoid read errors, we need to be proactive, as opposed to reactive as we
+were with write errors.
+
+One way to do this is to detect when the number of errors in a block exceeds
+some threshold, but is still recoverable. With ECC we can do this at write
+time, and treat the error as a write error, evicting the block before fatal
+read errors have a chance to develop.
+
+A different, more generic strategy, is to proactively distribute wear across
+all blocks in the storage, with the hope that no single block fails before the
+rest of storage is approaching the end of its usable life. This is called
+wear leveling.
+
+Generally, wear leveling algorithms fall into one of two categories:
+
+1. [Dynamic wear leveling][wikipedia-dynamic-wear-leveling], where we
+ distribute wear over "dynamic" blocks. The can be accomplished by
+ only considering unused blocks.
+
+2. [Static wear leveling][wikipedia-static-wear-leveling], where we
+ distribute wear over both "dynamic" and "static" blocks. To make this work,
+ we need to consider all blocks, including blocks that already contain data.
+
+As a tradeoff for code size and complexity, littlefs (currently) only provides
+dynamic wear leveling. This is a best effort solution. Wear is not distributed
+perfectly, but it is distributed among the free blocks and greatly extends the
+life of a device.
+
+On top of this, littlefs uses a statistical wear leveling algorithm. What this
+means is that we don’t actively track wear, instead we rely on a uniform
+distribution of wear across storage to approximate a dynamic wear leveling
+algorithm. Despite the long name, this is actually a simplification of dynamic
+wear leveling.
+
+The uniform distribution of wear is left up to the block allocator, which
+creates a uniform distribution in two parts. The easy part is when the device
+is powered, in which case we allocate the blocks linearly, circling the device.
+The harder part is what to do when the device loses power. We can't just
+restart the allocator at the beginning of storage, as this would bias the wear.
+Instead, we start the allocator as a random offset every time we mount the
+filesystem. As long as this random offset is uniform, the combined allocation
+pattern is also a uniform distribution.
+
+![Cumulative wear distribution graph][wear-distribution-graph]
+
+Initially, this approach to wear leveling looks like it creates a difficult
+dependency on a power-independent random number generator, which must return
+different random numbers on each boot. However, the filesystem is in a
+relatively unique situation in that it is sitting on top of a large of amount
+of entropy that persists across power loss.
+
+We can actually use the data on disk to directly drive our random number
+generator. In practice, this is implemented by xoring the checksums of each
+metadata pair, which is already calculated to fetch and mount the filesystem.
+
+```
+ .--------. \ probably random
+ .|metadata| | ^
+ || | +-> crc ----------------------> xor
+ || | | ^
+ |'--------' / |
+ '---|--|-' |
+ .-' '-------------------------. |
+ | | |
+ | .--------------> xor ------------> xor
+ | | ^ | ^
+ v crc crc v crc
+ .--------. \ ^ .--------. \ ^ .--------. \ ^
+ .|metadata|-|--|-->|metadata| | | .|metadata| | |
+ || | +--' || | +--' || | +--'
+ || | | || | | || | |
+ |'--------' / |'--------' / |'--------' /
+ '---|--|-' '----|---' '---|--|-'
+ .-' '-. | .-' '-.
+ v v v v v
+.--------. .--------. .--------. .--------. .--------.
+| data | | data | | data | | data | | data |
+| | | | | | | | | |
+| | | | | | | | | |
+'--------' '--------' '--------' '--------' '--------'
+```
+
+Note that this random number generator is not perfect. It only returns unique
+random numbers when the filesystem is modified. This is exactly what we want
+for distributing wear in the allocator, but means this random number generator
+is not useful for general use.
+
+---
+
+Together, bad block detection and dynamic wear leveling provide a best effort
+solution for avoiding the early death of a filesystem due to wear. Importantly,
+littlefs's wear leveling algorithm provides a key feature: You can increase the
+life of a device simply by increasing the size of storage. And if more
+aggressive wear leveling is desired, you can always combine littlefs with a
+[flash translation layer (FTL)][wikipedia-ftl] to get a small power resilient
+filesystem with static wear leveling.
+
+## Files
+
+Now that we have our building blocks out of the way, we can start looking at
+our filesystem as a whole.
+
+The first step: How do we actually store our files?
+
+We've determined that CTZ skip-lists are pretty good at storing data compactly,
+so following the precedent found in other filesystems we could give each file
+a skip-list stored in a metadata pair that acts as an inode for the file.
+
+
+```
+ .--------.
+ .|metadata|
+ || |
+ || |
+ |'--------'
+ '----|---'
+ v
+.--------. .--------. .--------. .--------.
+| data 0 |<-| data 1 |<-| data 2 |<-| data 3 |
+| |<-| |--| | | |
+| | | | | | | |
+'--------' '--------' '--------' '--------'
+```
+
+However, this doesn't work well when files are small, which is common for
+embedded systems. Compared to PCs, _all_ data in an embedded system is small.
+
+Consider a small 4-byte file. With a two block metadata-pair and one block for
+the CTZ skip-list, we find ourselves using a full 3 blocks. On most NOR flash
+with 4 KiB blocks, this is 12 KiB of overhead. A ridiculous 3072x increase.
+
+```
+file stored as inode, 4 bytes costs ~12 KiB
+
+ .----------------. \
+.| revision | |
+||----------------| \ |
+|| skiplist ---. +- metadata |
+||----------------| | / 4x8 bytes |
+|| checksum | | 32 bytes |
+||----------------| | |
+|| | | | +- metadata pair
+|| v | | | 2x4 KiB
+|| | | | 8 KiB
+|| | | |
+|| | | |
+|| | | |
+|'----------------' | |
+'----------------' | /
+ .--------'
+ v
+ .----------------. \ \
+ | data | +- data |
+ |----------------| / 4 bytes |
+ | | |
+ | | |
+ | | |
+ | | +- data block
+ | | | 4 KiB
+ | | |
+ | | |
+ | | |
+ | | |
+ | | |
+ '----------------' /
+```
+
+We can make several improvements. First, instead of giving each file its own
+metadata pair, we can store multiple files in a single metadata pair. One way
+to do this is to directly associate a directory with a metadata pair (or a
+linked list of metadata pairs). This makes it easy for multiple files to share
+the directory's metadata pair for logging and reduces the collective storage
+overhead.
+
+The strict binding of metadata pairs and directories also gives users
+direct control over storage utilization depending on how they organize their
+directories.
+
+```
+multiple files stored in metadata pair, 4 bytes costs ~4 KiB
+
+ .----------------.
+ .| revision |
+ ||----------------|
+ || A name |
+ || A skiplist -----.
+ ||----------------| | \
+ || B name | | +- metadata
+ || B skiplist ---. | | 4x8 bytes
+ ||----------------| | | / 32 bytes
+ || checksum | | |
+ ||----------------| | |
+ || | | | |
+ || v | | |
+ |'----------------' | |
+ '----------------' | |
+ .----------------' |
+ v v
+.----------------. .----------------. \ \
+| A data | | B data | +- data |
+| | |----------------| / 4 bytes |
+| | | | |
+| | | | |
+| | | | |
+| | | | + data block
+| | | | | 4 KiB
+| | | | |
+|----------------| | | |
+| | | | |
+| | | | |
+| | | | |
+'----------------' '----------------' /
+```
+
+The second improvement we can make is noticing that for very small files, our
+attempts to use CTZ skip-lists for compact storage backfires. Metadata pairs
+have a ~4x storage cost, so if our file is smaller than 1/4 the block size,
+there's actually no benefit in storing our file outside of our metadata pair.
+
+In this case, we can store the file directly in our directory's metadata pair.
+We call this an inline file, and it allows a directory to store many small
+files quite efficiently. Our previous 4 byte file now only takes up a
+theoretical 16 bytes on disk.
+
+```
+inline files stored in metadata pair, 4 bytes costs ~16 bytes
+
+ .----------------.
+.| revision |
+||----------------|
+|| A name |
+|| A skiplist ---.
+||----------------| | \
+|| B name | | +- data
+|| B data | | | 4x4 bytes
+||----------------| | / 16 bytes
+|| checksum | |
+||----------------| |
+|| | | |
+|| v | |
+|'----------------' |
+'----------------' |
+ .---------'
+ v
+ .----------------.
+ | A data |
+ | |
+ | |
+ | |
+ | |
+ | |
+ | |
+ | |
+ |----------------|
+ | |
+ | |
+ | |
+ '----------------'
+```
+
+Once the file exceeds 1/4 the block size, we switch to a CTZ skip-list. This
+means that our files never use more than 4x storage overhead, decreasing as
+the file grows in size.
+
+![File storage cost graph][file-cost-graph]
+
+## Directories
+
+Now we just need directories to store our files. As mentioned above we want
+a strict binding of directories and metadata pairs, but there are a few
+complications we need to sort out.
+
+On their own, each directory is a linked-list of metadata pairs. This lets us
+store an unlimited number of files in each directory, and we don't need to
+worry about the runtime complexity of unbounded logs. We can store other
+directory pointers in our metadata pairs, which gives us a directory tree, much
+like what you find on other filesystems.
+
+```
+ .--------.
+ .| root |
+ || |
+ || |
+ |'--------'
+ '---|--|-'
+ .-' '-------------------------.
+ v v
+ .--------. .--------. .--------.
+ .| dir A |------->| dir A | .| dir B |
+ || | || | || |
+ || | || | || |
+ |'--------' |'--------' |'--------'
+ '---|--|-' '----|---' '---|--|-'
+ .-' '-. | .-' '-.
+ v v v v v
+.--------. .--------. .--------. .--------. .--------.
+| file C | | file D | | file E | | file F | | file G |
+| | | | | | | | | |
+| | | | | | | | | |
+'--------' '--------' '--------' '--------' '--------'
+```
+
+The main complication is, once again, traversal with a constant amount of
+[RAM]. The directory tree is a tree, and the unfortunate fact is you can't
+traverse a tree with constant RAM.
+
+Fortunately, the elements of our tree are metadata pairs, so unlike CTZ
+skip-lists, we're not limited to strict COW operations. One thing we can do is
+thread a linked-list through our tree, explicitly enabling cheap traversal
+over the entire filesystem.
+
+```
+ .--------.
+ .| root |-.
+ || | |
+ .-------|| |-'
+ | |'--------'
+ | '---|--|-'
+ | .-' '-------------------------.
+ | v v
+ | .--------. .--------. .--------.
+ '->| dir A |------->| dir A |------->| dir B |
+ || | || | || |
+ || | || | || |
+ |'--------' |'--------' |'--------'
+ '---|--|-' '----|---' '---|--|-'
+ .-' '-. | .-' '-.
+ v v v v v
+.--------. .--------. .--------. .--------. .--------.
+| file C | | file D | | file E | | file F | | file G |
+| | | | | | | | | |
+| | | | | | | | | |
+'--------' '--------' '--------' '--------' '--------'
+```
+
+Unfortunately, not sticking to pure COW operations creates some problems. Now,
+whenever we want to manipulate the directory tree, multiple pointers need to be
+updated. If you're familiar with designing atomic data structures this should
+set off a bunch of red flags.
+
+To work around this, our threaded linked-list has a bit of leeway. Instead of
+only containing metadata pairs found in our filesystem, it is allowed to
+contain metadata pairs that have no parent because of a power loss. These are
+called orphaned metadata pairs.
+
+With the possibility of orphans, we can build power loss resilient operations
+that maintain a filesystem tree threaded with a linked-list for traversal.
+
+Adding a directory to our tree:
+
+```
+ .--------.
+ .| root |-.
+ || | |
+.-------|| |-'
+| |'--------'
+| '---|--|-'
+| .-' '-.
+| v v
+| .--------. .--------.
+'->| dir A |->| dir C |
+ || | || |
+ || | || |
+ |'--------' |'--------'
+ '--------' '--------'
+
+allocate dir B
+=>
+ .--------.
+ .| root |-.
+ || | |
+.-------|| |-'
+| |'--------'
+| '---|--|-'
+| .-' '-.
+| v v
+| .--------. .--------.
+'->| dir A |--->| dir C |
+ || | .->| |
+ || | | || |
+ |'--------' | |'--------'
+ '--------' | '--------'
+ |
+ .--------. |
+ .| dir B |-'
+ || |
+ || |
+ |'--------'
+ '--------'
+
+insert dir B into threaded linked-list, creating an orphan
+=>
+ .--------.
+ .| root |-.
+ || | |
+.-------|| |-'
+| |'--------'
+| '---|--|-'
+| .-' '-------------.
+| v v
+| .--------. .--------. .--------.
+'->| dir A |->| dir B |->| dir C |
+ || | || orphan!| || |
+ || | || | || |
+ |'--------' |'--------' |'--------'
+ '--------' '--------' '--------'
+
+add dir B to parent directory
+=>
+ .--------.
+ .| root |-.
+ || | |
+.-------------|| |-'
+| |'--------'
+| '--|-|-|-'
+| .------' | '-------.
+| v v v
+| .--------. .--------. .--------.
+'->| dir A |->| dir B |->| dir C |
+ || | || | || |
+ || | || | || |
+ |'--------' |'--------' |'--------'
+ '--------' '--------' '--------'
+```
+
+Removing a directory:
+
+```
+ .--------.
+ .| root |-.
+ || | |
+.-------------|| |-'
+| |'--------'
+| '--|-|-|-'
+| .------' | '-------.
+| v v v
+| .--------. .--------. .--------.
+'->| dir A |->| dir B |->| dir C |
+ || | || | || |
+ || | || | || |
+ |'--------' |'--------' |'--------'
+ '--------' '--------' '--------'
+
+remove dir B from parent directory, creating an orphan
+=>
+ .--------.
+ .| root |-.
+ || | |
+.-------|| |-'
+| |'--------'
+| '---|--|-'
+| .-' '-------------.
+| v v
+| .--------. .--------. .--------.
+'->| dir A |->| dir B |->| dir C |
+ || | || orphan!| || |
+ || | || | || |
+ |'--------' |'--------' |'--------'
+ '--------' '--------' '--------'
+
+remove dir B from threaded linked-list, returning dir B to free blocks
+=>
+ .--------.
+ .| root |-.
+ || | |
+.-------|| |-'
+| |'--------'
+| '---|--|-'
+| .-' '-.
+| v v
+| .--------. .--------.
+'->| dir A |->| dir C |
+ || | || |
+ || | || |
+ |'--------' |'--------'
+ '--------' '--------'
+```
+
+In addition to normal directory tree operations, we can use orphans to evict
+blocks in a metadata pair when the block goes bad or exceeds its allocated
+erases. If we lose power while evicting a metadata block we may end up with
+a situation where the filesystem references the replacement block while the
+threaded linked-list still contains the evicted block. We call this a
+half-orphan.
+
+```
+ .--------.
+ .| root |-.
+ || | |
+.-------------|| |-'
+| |'--------'
+| '--|-|-|-'
+| .------' | '-------.
+| v v v
+| .--------. .--------. .--------.
+'->| dir A |->| dir B |->| dir C |
+ || | || | || |
+ || | || | || |
+ |'--------' |'--------' |'--------'
+ '--------' '--------' '--------'
+
+try to write to dir B
+=>
+ .--------.
+ .| root |-.
+ || | |
+.----------------|| |-'
+| |'--------'
+| '-|-||-|-'
+| .--------' || '-----.
+| v |v v
+| .--------. .--------. .--------.
+'->| dir A |---->| dir B |->| dir C |
+ || |-. | | || |
+ || | | | | || |
+ |'--------' | '--------' |'--------'
+ '--------' | v '--------'
+ | .--------.
+ '->| dir B |
+ | bad |
+ | block! |
+ '--------'
+
+oh no! bad block detected, allocate replacement
+=>
+ .--------.
+ .| root |-.
+ || | |
+.----------------|| |-'
+| |'--------'
+| '-|-||-|-'
+| .--------' || '-------.
+| v |v v
+| .--------. .--------. .--------.
+'->| dir A |---->| dir B |--->| dir C |
+ || |-. | | .->| |
+ || | | | | | || |
+ |'--------' | '--------' | |'--------'
+ '--------' | v | '--------'
+ | .--------. |
+ '->| dir B | |
+ | bad | |
+ | block! | |
+ '--------' |
+ |
+ .--------. |
+ | dir B |--'
+ | |
+ | |
+ '--------'
+
+insert replacement in threaded linked-list, creating a half-orphan
+=>
+ .--------.
+ .| root |-.
+ || | |
+.----------------|| |-'
+| |'--------'
+| '-|-||-|-'
+| .--------' || '-------.
+| v |v v
+| .--------. .--------. .--------.
+'->| dir A |---->| dir B |--->| dir C |
+ || |-. | | .->| |
+ || | | | | | || |
+ |'--------' | '--------' | |'--------'
+ '--------' | v | '--------'
+ | .--------. |
+ | | dir B | |
+ | | bad | |
+ | | block! | |
+ | '--------' |
+ | |
+ | .--------. |
+ '->| dir B |--'
+ | half |
+ | orphan!|
+ '--------'
+
+fix reference in parent directory
+=>
+ .--------.
+ .| root |-.
+ || | |
+.-------------|| |-'
+| |'--------'
+| '--|-|-|-'
+| .------' | '-------.
+| v v v
+| .--------. .--------. .--------.
+'->| dir A |->| dir B |->| dir C |
+ || | || | || |
+ || | || | || |
+ |'--------' |'--------' |'--------'
+ '--------' '--------' '--------'
+```
+
+Finding orphans and half-orphans is expensive, requiring a _O(n²)_
+comparison of every metadata pair with every directory entry. But the tradeoff
+is a power resilient filesystem that works with only a bounded amount of RAM.
+Fortunately, we only need to check for orphans on the first allocation after
+boot, and a read-only littlefs can ignore the threaded linked-list entirely.
+
+If we only had some sort of global state, then we could also store a flag and
+avoid searching for orphans unless we knew we were specifically interrupted
+while manipulating the directory tree (foreshadowing!).
+
+## The move problem
+
+We have one last challenge: the move problem. Phrasing the problem is simple:
+
+How do you atomically move a file between two directories?
+
+In littlefs we can atomically commit to directories, but we can't create
+an atomic commit that spans multiple directories. The filesystem must go
+through a minimum of two distinct states to complete a move.
+
+To make matters worse, file moves are a common form of synchronization for
+filesystems. As a filesystem designed for power-loss, it's important we get
+atomic moves right.
+
+So what can we do?
+
+- We definitely can't just let power-loss result in duplicated or lost files.
+ This could easily break users' code and would only reveal itself in extreme
+ cases. We were only able to be lazy about the threaded linked-list because
+ it isn't user facing and we can handle the corner cases internally.
+
+- Some filesystems propagate COW operations up the tree until a common parent
+ is found. Unfortunately this interacts poorly with our threaded tree and
+ brings back the issue of upward propagation of wear.
+
+- In a previous version of littlefs we tried to solve this problem by going
+ back and forth between the source and destination, marking and unmarking the
+ file as moving in order to make the move atomic from the user perspective.
+ This worked, but not well. Finding failed moves was expensive and required
+ a unique identifier for each file.
+
+In the end, solving the move problem required creating a new mechanism for
+sharing knowledge between multiple metadata pairs. In littlefs this led to the
+introduction of a mechanism called "global state".
+
+---
+
+Global state is a small set of state that can be updated from _any_ metadata
+pair. Combining global state with metadata pairs' ability to update multiple
+entries in one commit gives us a powerful tool for crafting complex atomic
+operations.
+
+How does global state work?
+
+Global state exists as a set of deltas that are distributed across the metadata
+pairs in the filesystem. The actual global state can be built out of these
+deltas by xoring together all of the deltas in the filesystem.
+
+```
+ .--------. .--------. .--------. .--------. .--------.
+.| |->| gdelta |->| |->| gdelta |->| gdelta |
+|| | || 0x23 | || | || 0xff | || 0xce |
+|| | || | || | || | || |
+|'--------' |'--------' |'--------' |'--------' |'--------'
+'--------' '----|---' '--------' '----|---' '----|---'
+ v v v
+ 0x00 --> xor ------------------> xor ------> xor --> gstate 0x12
+```
+
+To update the global state from a metadata pair, we take the global state we
+know and xor it with both our changes and any existing delta in the metadata
+pair. Committing this new delta to the metadata pair commits the changes to
+the filesystem's global state.
+
+```
+ .--------. .--------. .--------. .--------. .--------.
+.| |->| gdelta |->| |->| gdelta |->| gdelta |
+|| | || 0x23 | || | || 0xff | || 0xce |
+|| | || | || | || | || |
+|'--------' |'--------' |'--------' |'--------' |'--------'
+'--------' '----|---' '--------' '--|---|-' '----|---'
+ v v | v
+ 0x00 --> xor ----------------> xor -|------> xor --> gstate = 0x12
+ | |
+ | |
+change gstate to 0xab --> xor <------------|--------------------------'
+=> | v
+ '------------> xor
+ |
+ v
+ .--------. .--------. .--------. .--------. .--------.
+.| |->| gdelta |->| |->| gdelta |->| gdelta |
+|| | || 0x23 | || | || 0x46 | || 0xce |
+|| | || | || | || | || |
+|'--------' |'--------' |'--------' |'--------' |'--------'
+'--------' '----|---' '--------' '----|---' '----|---'
+ v v v
+ 0x00 --> xor ------------------> xor ------> xor --> gstate = 0xab
+```
+
+To make this efficient, we always keep a copy of the global state in RAM. We
+only need to iterate over our metadata pairs and build the global state when
+the filesystem is mounted.
+
+You may have noticed that global state is very expensive. We keep a copy in
+RAM and a delta in an unbounded number of metadata pairs. Even if we reset
+the global state to its initial value, we can't easily clean up the deltas on
+disk. For this reason, it's very important that we keep the size of global
+state bounded and extremely small. But, even with a strict budget, global
+state is incredibly valuable.
+
+---
+
+Now we can solve the move problem. We can create global state describing our
+move atomically with the creation of the new file, and we can clear this move
+state atomically with the removal of the old file.
+
+```
+ .--------. gstate = no move
+ .| root |-.
+ || | |
+.-------------|| |-'
+| |'--------'
+| '--|-|-|-'
+| .------' | '-------.
+| v v v
+| .--------. .--------. .--------.
+'->| dir A |->| dir B |->| dir C |
+ || | || | || |
+ || | || | || |
+ |'--------' |'--------' |'--------'
+ '----|---' '--------' '--------'
+ v
+ .--------.
+ | file D |
+ | |
+ | |
+ '--------'
+
+begin move, add reference in dir C, change gstate to have move
+=>
+ .--------. gstate = moving file D in dir A (m1)
+ .| root |-.
+ || | |
+.-------------|| |-'
+| |'--------'
+| '--|-|-|-'
+| .------' | '-------.
+| v v v
+| .--------. .--------. .--------.
+'->| dir A |->| dir B |->| dir C |
+ || | || | || gdelta |
+ || | || | || =m1 |
+ |'--------' |'--------' |'--------'
+ '----|---' '--------' '----|---'
+ | .----------------'
+ v v
+ .--------.
+ | file D |
+ | |
+ | |
+ '--------'
+
+complete move, remove reference in dir A, change gstate to no move
+=>
+ .--------. gstate = no move (m1^~m1)
+ .| root |-.
+ || | |
+.-------------|| |-'
+| |'--------'
+| '--|-|-|-'
+| .------' | '-------.
+| v v v
+| .--------. .--------. .--------.
+'->| dir A |->| dir B |->| dir C |
+ || gdelta | || | || gdelta |
+ || =~m1 | || | || =m1 |
+ |'--------' |'--------' |'--------'
+ '--------' '--------' '----|---'
+ v
+ .--------.
+ | file D |
+ | |
+ | |
+ '--------'
+```
+
+
+If, after building our global state during mount, we find information
+describing an ongoing move, we know we lost power during a move and the file
+is duplicated in both the source and destination directories. If this happens,
+we can resolve the move using the information in the global state to remove
+one of the files.
+
+```
+ .--------. gstate = moving file D in dir A (m1)
+ .| root |-. ^
+ || |------------> xor
+.---------------|| |-' ^
+| |'--------' |
+| '--|-|-|-' |
+| .--------' | '---------. |
+| | | | |
+| | .----------> xor --------> xor
+| v | v ^ v ^
+| .--------. | .--------. | .--------. |
+'->| dir A |-|->| dir B |-|->| dir C | |
+ || |-' || |-' || gdelta |-'
+ || | || | || =m1 |
+ |'--------' |'--------' |'--------'
+ '----|---' '--------' '----|---'
+ | .---------------------'
+ v v
+ .--------.
+ | file D |
+ | |
+ | |
+ '--------'
+```
+
+We can also move directories the same way we move files. There is the threaded
+linked-list to consider, but leaving the threaded linked-list unchanged works
+fine as the order doesn't really matter.
+
+```
+ .--------. gstate = no move (m1^~m1)
+ .| root |-.
+ || | |
+.-------------|| |-'
+| |'--------'
+| '--|-|-|-'
+| .------' | '-------.
+| v v v
+| .--------. .--------. .--------.
+'->| dir A |->| dir B |->| dir C |
+ || gdelta | || | || gdelta |
+ || =~m1 | || | || =m1 |
+ |'--------' |'--------' |'--------'
+ '--------' '--------' '----|---'
+ v
+ .--------.
+ | file D |
+ | |
+ | |
+ '--------'
+
+begin move, add reference in dir C, change gstate to have move
+=>
+ .--------. gstate = moving dir B in root (m1^~m1^m2)
+ .| root |-.
+ || | |
+.--------------|| |-'
+| |'--------'
+| '--|-|-|-'
+| .-------' | '----------.
+| v | v
+| .--------. | .--------.
+'->| dir A |-. | .->| dir C |
+ || gdelta | | | | || gdelta |
+ || =~m1 | | | | || =m1^m2 |
+ |'--------' | | | |'--------'
+ '--------' | | | '---|--|-'
+ | | .-------' |
+ | v v | v
+ | .--------. | .--------.
+ '->| dir B |-' | file D |
+ || | | |
+ || | | |
+ |'--------' '--------'
+ '--------'
+
+complete move, remove reference in root, change gstate to no move
+=>
+ .--------. gstate = no move (m1^~m1^m2^~m2)
+ .| root |-.
+ || gdelta | |
+.-----------|| =~m2 |-'
+| |'--------'
+| '---|--|-'
+| .-----' '-----.
+| v v
+| .--------. .--------.
+'->| dir A |-. .->| dir C |
+ || gdelta | | | || gdelta |
+ || =~m1 | | '-|| =m1^m2 |-------.
+ |'--------' | |'--------' |
+ '--------' | '---|--|-' |
+ | .-' '-. |
+ | v v |
+ | .--------. .--------. |
+ '->| dir B |--| file D |-'
+ || | | |
+ || | | |
+ |'--------' '--------'
+ '--------'
+```
+
+Global state gives us a powerful tool we can use to solve the move problem.
+And the result is surprisingly performant, only needing the minimum number
+of states and using the same number of commits as a naive move. Additionally,
+global state gives us a bit of persistent state we can use for some other
+small improvements.
+
+## Conclusion
+
+And that's littlefs, thanks for reading!
+
+
+[wikipedia-flash]: https://en.wikipedia.org/wiki/Flash_memory
+[wikipedia-sna]: https://en.wikipedia.org/wiki/Serial_number_arithmetic
+[wikipedia-crc]: https://en.wikipedia.org/wiki/Cyclic_redundancy_check
+[wikipedia-cow]: https://en.wikipedia.org/wiki/Copy-on-write
+[wikipedia-B-tree]: https://en.wikipedia.org/wiki/B-tree
+[wikipedia-B+-tree]: https://en.wikipedia.org/wiki/B%2B_tree
+[wikipedia-skip-list]: https://en.wikipedia.org/wiki/Skip_list
+[wikipedia-ctz]: https://en.wikipedia.org/wiki/Count_trailing_zeros
+[wikipedia-ecc]: https://en.wikipedia.org/wiki/Error_correction_code
+[wikipedia-hamming-bound]: https://en.wikipedia.org/wiki/Hamming_bound
+[wikipedia-dynamic-wear-leveling]: https://en.wikipedia.org/wiki/Wear_leveling#Dynamic_wear_leveling
+[wikipedia-static-wear-leveling]: https://en.wikipedia.org/wiki/Wear_leveling#Static_wear_leveling
+[wikipedia-ftl]: https://en.wikipedia.org/wiki/Flash_translation_layer
+
+[oeis]: https://oeis.org
+[A001511]: https://oeis.org/A001511
+[A005187]: https://oeis.org/A005187
+
+[fat]: https://en.wikipedia.org/wiki/Design_of_the_FAT_file_system
+[ext2]: http://e2fsprogs.sourceforge.net/ext2intro.html
+[jffs]: https://www.sourceware.org/jffs2/jffs2-html
+[yaffs]: https://yaffs.net/documents/how-yaffs-works
+[spiffs]: https://github.com/pellepl/spiffs/blob/master/docs/TECH_SPEC
+[ext4]: https://ext4.wiki.kernel.org/index.php/Ext4_Design
+[ntfs]: https://en.wikipedia.org/wiki/NTFS
+[btrfs]: https://btrfs.wiki.kernel.org/index.php/Btrfs_design
+[zfs]: https://en.wikipedia.org/wiki/ZFS
+
+[cow]: https://upload.wikimedia.org/wikipedia/commons/0/0c/Cow_female_black_white.jpg
+[elephant]: https://upload.wikimedia.org/wikipedia/commons/3/37/African_Bush_Elephant.jpg
+[ram]: https://upload.wikimedia.org/wikipedia/commons/9/97/New_Mexico_Bighorn_Sheep.JPG
+
+[metadata-formula1]: https://latex.codecogs.com/svg.latex?cost%20%3D%20n%20+%20n%20%5Cfrac%7Bs%7D%7Bd+1%7D
+[metadata-formula2]: https://latex.codecogs.com/svg.latex?s%20%3D%20r%20%5Cfrac%7Bsize%7D%7Bn%7D
+[metadata-formula3]: https://latex.codecogs.com/svg.latex?d%20%3D%20%281-r%29%20%5Cfrac%7Bsize%7D%7Bn%7D
+[metadata-formula4]: https://latex.codecogs.com/svg.latex?cost%20%3D%20n%20+%20n%20%5Cfrac%7Br%5Cfrac%7Bsize%7D%7Bn%7D%7D%7B%281-r%29%5Cfrac%7Bsize%7D%7Bn%7D+1%7D
+
+[ctz-formula1]: https://latex.codecogs.com/svg.latex?%5Clim_%7Bn%5Cto%5Cinfty%7D%5Cfrac%7B1%7D%7Bn%7D%5Csum_%7Bi%3D0%7D%5E%7Bn%7D%5Cleft%28%5Ctext%7Bctz%7D%28i%29+1%5Cright%29%20%3D%20%5Csum_%7Bi%3D0%7D%5Cfrac%7B1%7D%7B2%5Ei%7D%20%3D%202
+[ctz-formula2]: https://latex.codecogs.com/svg.latex?B%20%3D%20%5Cfrac%7Bw%7D%7B8%7D%5Cleft%5Clceil%5Clog_2%5Cleft%28%5Cfrac%7B2%5Ew%7D%7BB-2%5Cfrac%7Bw%7D%7B8%7D%7D%5Cright%29%5Cright%5Crceil
+[ctz-formula3]: https://latex.codecogs.com/svg.latex?N%20%3D%20%5Csum_i%5En%5Cleft%5BB-%5Cfrac%7Bw%7D%7B8%7D%5Cleft%28%5Ctext%7Bctz%7D%28i%29+1%5Cright%29%5Cright%5D
+[ctz-formula4]: https://latex.codecogs.com/svg.latex?%5Csum_i%5En%5Cleft%28%5Ctext%7Bctz%7D%28i%29+1%5Cright%29%20%3D%202n-%5Ctext%7Bpopcount%7D%28n%29
+[ctz-formula5]: https://latex.codecogs.com/svg.latex?N%20%3D%20Bn%20-%20%5Cfrac%7Bw%7D%7B8%7D%5Cleft%282n-%5Ctext%7Bpopcount%7D%28n%29%5Cright%29
+[ctz-formula6]: https://latex.codecogs.com/svg.latex?n%20%3D%20%5Cleft%5Clfloor%5Cfrac%7BN-%5Cfrac%7Bw%7D%7B8%7D%5Cleft%28%5Ctext%7Bpopcount%7D%5Cleft%28%5Cfrac%7BN%7D%7BB-2%5Cfrac%7Bw%7D%7B8%7D%7D-1%5Cright%29+2%5Cright%29%7D%7BB-2%5Cfrac%7Bw%7D%7B8%7D%7D%5Cright%5Crfloor
+[ctz-formula7]: https://latex.codecogs.com/svg.latex?%5Cmathit%7Boff%7D%20%3D%20N%20-%20%5Cleft%28B-2%5Cfrac%7Bw%7D%7B8%7D%5Cright%29n%20-%20%5Cfrac%7Bw%7D%7B8%7D%5Ctext%7Bpopcount%7D%28n%29
+
+[bigB]: https://latex.codecogs.com/svg.latex?B
+[d]: https://latex.codecogs.com/svg.latex?d
+[m]: https://latex.codecogs.com/svg.latex?m
+[bigN]: https://latex.codecogs.com/svg.latex?N
+[n]: https://latex.codecogs.com/svg.latex?n
+[n']: https://latex.codecogs.com/svg.latex?n%27
+[r]: https://latex.codecogs.com/svg.latex?r
+[s]: https://latex.codecogs.com/svg.latex?s
+[w]: https://latex.codecogs.com/svg.latex?w
+[x]: https://latex.codecogs.com/svg.latex?x
+
+[metadata-cost-graph]: https://raw.githubusercontent.com/geky/littlefs/gh-images/metadata-cost.svg?sanitize=true
+[wear-distribution-graph]: https://raw.githubusercontent.com/geky/littlefs/gh-images/wear-distribution.svg?sanitize=true
+[file-cost-graph]: https://raw.githubusercontent.com/geky/littlefs/gh-images/file-cost.svg?sanitize=true
diff --git a/packages/littlefs-v2.11.2/LICENSE.md b/packages/littlefs-v2.11.2/LICENSE.md
new file mode 100644
index 0000000..e6c3a7b
--- /dev/null
+++ b/packages/littlefs-v2.11.2/LICENSE.md
@@ -0,0 +1,25 @@
+Copyright (c) 2022, The littlefs authors.
+Copyright (c) 2017, Arm Limited. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+- Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+- Redistributions in binary form must reproduce the above copyright notice, this
+ list of conditions and the following disclaimer in the documentation and/or
+ other materials provided with the distribution.
+- Neither the name of ARM nor the names of its contributors may be used to
+ endorse or promote products derived from this software without specific prior
+ written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/packages/littlefs-v2.11.2/README.md b/packages/littlefs-v2.11.2/README.md
new file mode 100644
index 0000000..d4dfdaa
--- /dev/null
+++ b/packages/littlefs-v2.11.2/README.md
@@ -0,0 +1,342 @@
+## littlefs for RT-Thread
+
+A little fail-safe filesystem designed for microcontrollers.
+
+```
+ | | | .---._____
+ .-----. | |
+--|o |---| littlefs |
+--| |---| |
+ '-----' '----------'
+ | | |
+```
+
+**Power-loss resilience** - littlefs is designed to handle random power
+failures. All file operations have strong copy-on-write guarantees and if
+power is lost the filesystem will fall back to the last known good state.
+
+**Dynamic wear leveling** - littlefs is designed with flash in mind, and
+provides wear leveling over dynamic blocks. Additionally, littlefs can
+detect bad blocks and work around them.
+
+**Bounded RAM/ROM** - littlefs is designed to work with a small amount of
+memory. RAM usage is strictly bounded, which means RAM consumption does not
+change as the filesystem grows. The filesystem contains no unbounded
+recursion and dynamic memory is limited to configurable buffers that can be
+provided statically.
+
+## Example
+
+Here's a simple example that updates a file named `boot_count` every time
+main runs. The program can be interrupted at any time without losing track
+of how many times it has been booted and without corrupting the filesystem:
+
+``` c
+#include "lfs.h"
+
+// variables used by the filesystem
+lfs_t lfs;
+lfs_file_t file;
+
+// configuration of the filesystem is provided by this struct
+const struct lfs_config cfg = {
+ // block device operations
+ .read = user_provided_block_device_read,
+ .prog = user_provided_block_device_prog,
+ .erase = user_provided_block_device_erase,
+ .sync = user_provided_block_device_sync,
+
+ // block device configuration
+ .read_size = 16,
+ .prog_size = 16,
+ .block_size = 4096,
+ .block_count = 128,
+ .cache_size = 16,
+ .lookahead_size = 16,
+ .block_cycles = 500,
+};
+
+// entry point
+int main(void) {
+ // mount the filesystem
+ int err = lfs_mount(&lfs, &cfg);
+
+ // reformat if we can't mount the filesystem
+ // this should only happen on the first boot
+ if (err) {
+ lfs_format(&lfs, &cfg);
+ lfs_mount(&lfs, &cfg);
+ }
+
+ // read current count
+ uint32_t boot_count = 0;
+ lfs_file_open(&lfs, &file, "boot_count", LFS_O_RDWR | LFS_O_CREAT);
+ lfs_file_read(&lfs, &file, &boot_count, sizeof(boot_count));
+
+ // update boot count
+ boot_count += 1;
+ lfs_file_rewind(&lfs, &file);
+ lfs_file_write(&lfs, &file, &boot_count, sizeof(boot_count));
+
+ // remember the storage is not updated until the file is closed successfully
+ lfs_file_close(&lfs, &file);
+
+ // release any resources we were using
+ lfs_unmount(&lfs);
+
+ // print the boot count
+ printf("boot_count: %d\n", boot_count);
+}
+```
+
+## Usage
+
+Detailed documentation (or at least as much detail as is currently available)
+can be found in the comments in [lfs.h](lfs.h).
+
+littlefs takes in a configuration structure that defines how the filesystem
+operates. The configuration struct provides the filesystem with the block
+device operations and dimensions, tweakable parameters that tradeoff memory
+usage for performance, and optional static buffers if the user wants to avoid
+dynamic memory.
+
+The state of the littlefs is stored in the `lfs_t` type which is left up
+to the user to allocate, allowing multiple filesystems to be in use
+simultaneously. With the `lfs_t` and configuration struct, a user can
+format a block device or mount the filesystem.
+
+Once mounted, the littlefs provides a full set of POSIX-like file and
+directory functions, with the deviation that the allocation of filesystem
+structures must be provided by the user.
+
+All POSIX operations, such as remove and rename, are atomic, even in event
+of power-loss. Additionally, file updates are not actually committed to
+the filesystem until sync or close is called on the file.
+
+## Other notes
+
+Littlefs is written in C, and specifically should compile with any compiler
+that conforms to the `C99` standard.
+
+All littlefs calls have the potential to return a negative error code. The
+errors can be either one of those found in the `enum lfs_error` in
+[lfs.h](lfs.h), or an error returned by the user's block device operations.
+
+In the configuration struct, the `prog` and `erase` function provided by the
+user may return a `LFS_ERR_CORRUPT` error if the implementation already can
+detect corrupt blocks. However, the wear leveling does not depend on the return
+code of these functions, instead all data is read back and checked for
+integrity.
+
+If your storage caches writes, make sure that the provided `sync` function
+flushes all the data to memory and ensures that the next read fetches the data
+from memory, otherwise data integrity can not be guaranteed. If the `write`
+function does not perform caching, and therefore each `read` or `write` call
+hits the memory, the `sync` function can simply return 0.
+
+## Design
+
+At a high level, littlefs is a block based filesystem that uses small logs to
+store metadata and larger copy-on-write (COW) structures to store file data.
+
+In littlefs, these ingredients form a sort of two-layered cake, with the small
+logs (called metadata pairs) providing fast updates to metadata anywhere on
+storage, while the COW structures store file data compactly and without any
+wear amplification cost.
+
+Both of these data structures are built out of blocks, which are fed by a
+common block allocator. By limiting the number of erases allowed on a block
+per allocation, the allocator provides dynamic wear leveling over the entire
+filesystem.
+
+```
+ root
+ .--------.--------.
+ | A'| B'| |
+ | | |-> |
+ | | | |
+ '--------'--------'
+ .----' '--------------.
+ A v B v
+ .--------.--------. .--------.--------.
+ | C'| D'| | | E'|new| |
+ | | |-> | | | E'|-> |
+ | | | | | | | |
+ '--------'--------' '--------'--------'
+ .-' '--. | '------------------.
+ v v .-' v
+.--------. .--------. v .--------.
+| C | | D | .--------. write | new E |
+| | | | | E | ==> | |
+| | | | | | | |
+'--------' '--------' | | '--------'
+ '--------' .-' |
+ .-' '-. .-------------|------'
+ v v v v
+ .--------. .--------. .--------.
+ | F | | G | | new F |
+ | | | | | |
+ | | | | | |
+ '--------' '--------' '--------'
+```
+
+More details on how littlefs works can be found in [DESIGN.md](DESIGN.md) and
+[SPEC.md](SPEC.md).
+
+- [DESIGN.md](DESIGN.md) - A fully detailed dive into how littlefs works.
+ I would suggest reading it as the tradeoffs at work are quite interesting.
+
+- [SPEC.md](SPEC.md) - The on-disk specification of littlefs with all the
+ nitty-gritty details. May be useful for tooling development.
+
+## Testing
+
+The littlefs comes with a test suite designed to run on a PC using the
+[emulated block device](bd/lfs_testbd.h) found in the `bd` directory.
+The tests assume a Linux environment and can be started with make:
+
+``` bash
+make test
+```
+
+Tests are implemented in C in the .toml files found in the `tests` directory.
+When developing a feature or fixing a bug, it is frequently useful to run a
+single test case or suite of tests:
+
+``` bash
+./scripts/test.py -l runners/test_runner # list available test suites
+./scripts/test.py -L runners/test_runner test_dirs # list available test cases
+./scripts/test.py runners/test_runner test_dirs # run a specific test suite
+```
+
+If an assert fails in a test, test.py will try to print information about the
+failure:
+
+``` bash
+tests/test_dirs.toml:1:failure: test_dirs_root:1g12gg2 (PROG_SIZE=16, ERASE_SIZE=512) failed
+tests/test_dirs.toml:5:assert: assert failed with 0, expected eq 42
+ lfs_mount(&lfs, cfg) => 42;
+```
+
+This includes the test id, which can be passed to test.py to run only that
+specific test permutation:
+
+``` bash
+./scripts/test.py runners/test_runner test_dirs_root:1g12gg2 # run a specific test permutation
+./scripts/test.py runners/test_runner test_dirs_root:1g12gg2 --gdb # drop into gdb on failure
+```
+
+Some other flags that may be useful:
+
+```bash
+./scripts/test.py runners/test_runner -b -j # run tests in parallel
+./scripts/test.py runners/test_runner -v -O- # redirect stdout to stdout
+./scripts/test.py runners/test_runner -ddisk # capture resulting disk image
+```
+
+See `-h/--help` for a full list of available flags:
+
+``` bash
+./scripts/test.py --help
+```
+
+## License
+
+The littlefs is provided under the [BSD-3-Clause] license. See
+[LICENSE.md](LICENSE.md) for more information. Contributions to this project
+are accepted under the same license.
+
+Individual files contain the following tag instead of the full license text.
+
+ SPDX-License-Identifier: BSD-3-Clause
+
+This enables machine processing of license information based on the SPDX
+License Identifiers that are here available: http://spdx.org/licenses/
+
+## Related projects
+
+- [littlefs-fuse] - A [FUSE] wrapper for littlefs. The project allows you to
+ mount littlefs directly on a Linux machine. Can be useful for debugging
+ littlefs if you have an SD card handy.
+
+- [littlefs-js] - A javascript wrapper for littlefs. I'm not sure why you would
+ want this, but it is handy for demos. You can see it in action
+ [here][littlefs-js-demo].
+
+- [littlefs-python] - A Python wrapper for littlefs. The project allows you
+ to create images of the filesystem on your PC. Check if littlefs will fit
+ your needs, create images for a later download to the target memory or
+ inspect the content of a binary image of the target memory.
+
+- [littlefs-toy] - A command-line tool for creating and working with littlefs
+ images. Uses syntax similar to tar command for ease of use. Supports working
+ on littlefs images embedded inside another file (firmware image, etc).
+
+- [littlefs2-rust] - A Rust wrapper for littlefs. This project allows you
+ to use littlefs in a Rust-friendly API, reaping the benefits of Rust's memory
+ safety and other guarantees.
+
+- [nim-littlefs] - A Nim wrapper and API for littlefs. Includes a fuse
+ implementation based on [littlefs-fuse]
+
+- [chamelon] - A pure-OCaml implementation of (most of) littlefs, designed for
+ use with the MirageOS library operating system project. It is interoperable
+ with the reference implementation, with some caveats.
+
+- [littlefs-disk-img-viewer] - A memory-efficient web application for viewing
+ littlefs disk images in your web browser.
+
+- [mklfs] - A command line tool for creating littlefs images. Used in the Lua
+ RTOS ecosystem.
+
+- [mklittlefs] - A command line tool for creating littlefs images. Used in the
+ ESP8266 and RP2040 ecosystem.
+
+- [pico-littlefs-usb] - An interface for littlefs that emulates a FAT12
+ filesystem over USB. Allows mounting littlefs on a host PC without additional
+ drivers.
+
+- [ramcrc32bd] - An example block device using littlefs's 32-bit CRC for
+ error-correction.
+
+- [ramrsbd] - An example block device using Reed-Solomon codes for
+ error-correction.
+
+- [Mbed OS] - The easiest way to get started with littlefs is to jump into Mbed
+ which already has block device drivers for most forms of embedded storage.
+ littlefs is available in Mbed OS as the [LittleFileSystem] class.
+
+- [SPIFFS] - Another excellent embedded filesystem for NOR flash. As a more
+ traditional logging filesystem with full static wear-leveling, SPIFFS will
+ likely outperform littlefs on small memories such as the internal flash on
+ microcontrollers.
+
+- [Dhara] - An interesting NAND flash translation layer designed for small
+ MCUs. It offers static wear-leveling and power-resilience with only a fixed
+ _O(|address|)_ pointer structure stored on each block and in RAM.
+
+- [ChaN's FatFs] - A lightweight reimplementation of the infamous FAT filesystem
+ for microcontroller-scale devices. Due to limitations of FAT it can't provide
+ power-loss resilience, but it does allow easy interop with PCs.
+
+[BSD-3-Clause]: https://spdx.org/licenses/BSD-3-Clause.html
+[littlefs-fuse]: https://github.com/geky/littlefs-fuse
+[FUSE]: https://github.com/libfuse/libfuse
+[littlefs-js]: https://github.com/geky/littlefs-js
+[littlefs-js-demo]:http://littlefs.geky.net/demo.html
+[littlefs-python]: https://pypi.org/project/littlefs-python/
+[littlefs-toy]: https://github.com/tjko/littlefs-toy
+[littlefs2-rust]: https://crates.io/crates/littlefs2
+[nim-littlefs]: https://github.com/Graveflo/nim-littlefs
+[chamelon]: https://github.com/yomimono/chamelon
+[littlefs-disk-img-viewer]: https://github.com/tniessen/littlefs-disk-img-viewer
+[mklfs]: https://github.com/whitecatboard/Lua-RTOS-ESP32/tree/master/components/mklfs/src
+[mklittlefs]: https://github.com/earlephilhower/mklittlefs
+[pico-littlefs-usb]: https://github.com/oyama/pico-littlefs-usb
+[ramcrc32bd]: https://github.com/geky/ramcrc32bd
+[ramrsbd]: https://github.com/geky/ramrsbd
+[Mbed OS]: https://github.com/armmbed/mbed-os
+[LittleFileSystem]: https://os.mbed.com/docs/mbed-os/latest/apis/littlefilesystem.html
+[SPIFFS]: https://github.com/pellepl/spiffs
+[Dhara]: https://github.com/dlbeer/dhara
+[ChaN's FatFs]: http://elm-chan.org/fsw/ff/00index_e.html
diff --git a/packages/littlefs-v2.11.2/SConscript b/packages/littlefs-v2.11.2/SConscript
new file mode 100644
index 0000000..df6eda8
--- /dev/null
+++ b/packages/littlefs-v2.11.2/SConscript
@@ -0,0 +1,25 @@
+# RT-Thread building script for component
+
+import os
+import shutil
+
+from building import *
+
+cwd = GetCurrentDir()
+src = Glob('*.c') + Glob('*.cpp')
+CPPPATH = [cwd]
+CPPDEFINES = ['LFS_CONFIG=lfs_config.h']
+
+#delate non-used files
+try:
+ shutil.rmtree(os.path.join(cwd,'.github'))
+ shutil.rmtree(os.path.join(cwd,'bd'))
+ shutil.rmtree(os.path.join(cwd,'scripts'))
+ shutil.rmtree(os.path.join(cwd,'tests'))
+ os.remove(os.path.join(cwd,'Makefile'))
+except:
+ pass
+
+group = DefineGroup('littlefs', src, depend = ['PKG_USING_LITTLEFS', 'RT_USING_DFS'], CPPPATH = CPPPATH, CPPDEFINES = CPPDEFINES)
+
+Return('group')
diff --git a/packages/littlefs-v2.11.2/SPEC.md b/packages/littlefs-v2.11.2/SPEC.md
new file mode 100644
index 0000000..6682c74
--- /dev/null
+++ b/packages/littlefs-v2.11.2/SPEC.md
@@ -0,0 +1,867 @@
+## littlefs technical specification
+
+This is the technical specification of the little filesystem with on-disk
+version lfs2.1. This document covers the technical details of how the littlefs
+is stored on disk for introspection and tooling. This document assumes you are
+familiar with the design of the littlefs, for more info on how littlefs works
+check out [DESIGN.md](DESIGN.md).
+
+```
+ | | | .---._____
+ .-----. | |
+--|o |---| littlefs |
+--| |---| |
+ '-----' '----------'
+ | | |
+```
+
+## Some quick notes
+
+- littlefs is a block-based filesystem. The disk is divided into an array of
+ evenly sized blocks that are used as the logical unit of storage.
+
+- Block pointers are stored in 32 bits, with the special value `0xffffffff`
+ representing a null block address.
+
+- In addition to the logical block size (which usually matches the erase
+ block size), littlefs also uses a program block size and read block size.
+ These determine the alignment of block device operations, but don't need
+ to be consistent for portability.
+
+- By default, all values in littlefs are stored in little-endian byte order.
+
+## Directories / Metadata pairs
+
+Metadata pairs form the backbone of littlefs and provide a system for
+distributed atomic updates. Even the superblock is stored in a metadata pair.
+
+As their name suggests, a metadata pair is stored in two blocks, with one block
+providing a backup during erase cycles in case power is lost. These two blocks
+are not necessarily sequential and may be anywhere on disk, so a "pointer" to a
+metadata pair is stored as two block pointers.
+
+On top of this, each metadata block behaves as an appendable log, containing a
+variable number of commits. Commits can be appended to the metadata log in
+order to update the metadata without requiring an erase cycles. Note that
+successive commits may supersede the metadata in previous commits. Only the
+most recent metadata should be considered valid.
+
+The high-level layout of a metadata block is fairly simple:
+
+```
+ .---------------------------------------.
+.-| revision count | entries | \
+| |-------------------+ | |
+| | | |
+| | | +-- 1st commit
+| | | |
+| | +-------------------| |
+| | | CRC | /
+| |-------------------+-------------------|
+| | entries | \
+| | | |
+| | | +-- 2nd commit
+| | +-------------------+--------------| |
+| | | CRC | padding | /
+| |----+-------------------+--------------|
+| | entries | \
+| | | |
+| | | +-- 3rd commit
+| | +-------------------+---------| |
+| | | CRC | | /
+| |---------+-------------------+ |
+| | unwritten storage | more commits
+| | | |
+| | | v
+| | |
+| | |
+| '---------------------------------------'
+'---------------------------------------'
+```
+
+Each metadata block contains a 32-bit revision count followed by a number of
+commits. Each commit contains a variable number of metadata entries followed
+by a 32-bit CRC.
+
+Note also that entries aren't necessarily word-aligned. This allows us to
+store metadata more compactly, however we can only write to addresses that are
+aligned to our program block size. This means each commit may have padding for
+alignment.
+
+Metadata block fields:
+
+1. **Revision count (32-bits)** - Incremented every erase cycle. If both blocks
+ contain valid commits, only the block with the most recent revision count
+ should be used. Sequence comparison must be used to avoid issues with
+ integer overflow.
+
+2. **CRC (32-bits)** - Detects corruption from power-loss or other write
+ issues. Uses a CRC-32 with a polynomial of `0x04c11db7` initialized
+ with `0xffffffff`.
+
+Entries themselves are stored as a 32-bit tag followed by a variable length
+blob of data. But exactly how these tags are stored is a little bit tricky.
+
+Metadata blocks support both forward and backward iteration. In order to do
+this without duplicating the space for each tag, neighboring entries have their
+tags XORed together, starting with `0xffffffff`.
+
+```
+ Forward iteration Backward iteration
+
+.-------------------. 0xffffffff .-------------------.
+| revision count | | | revision count |
+|-------------------| v |-------------------|
+| tag ~A |---> xor -> tag A | tag ~A |---> xor -> 0xffffffff
+|-------------------| | |-------------------| ^
+| data A | | | data A | |
+| | | | | |
+| | | | | |
+|-------------------| v |-------------------| |
+| tag AxB |---> xor -> tag B | tag AxB |---> xor -> tag A
+|-------------------| | |-------------------| ^
+| data B | | | data B | |
+| | | | | |
+| | | | | |
+|-------------------| v |-------------------| |
+| tag BxC |---> xor -> tag C | tag BxC |---> xor -> tag B
+|-------------------| |-------------------| ^
+| data C | | data C | |
+| | | | tag C
+| | | |
+| | | |
+'-------------------' '-------------------'
+```
+
+Here's a more complete example of metadata block containing 4 entries:
+
+```
+ .---------------------------------------.
+.-| revision count | tag ~A | \
+| |-------------------+-------------------| |
+| | data A | |
+| | | |
+| |-------------------+-------------------| |
+| | tag AxB | data B | <--. |
+| |-------------------+ | | |
+| | | | +-- 1st commit
+| | +-------------------+---------| | |
+| | | tag BxC | | <-.| |
+| |---------+-------------------+ | || |
+| | data C | || |
+| | | || |
+| |-------------------+-------------------| || |
+| | tag CxCRC | CRC | || /
+| |-------------------+-------------------| ||
+| | tag CRCxA' | data A' | || \
+| |-------------------+ | || |
+| | | || |
+| | +-------------------+----| || +-- 2nd commit
+| | | tag CRCxA' | | || |
+| |--------------+-------------------+----| || |
+| | CRC | padding | || /
+| |--------------+----+-------------------| ||
+| | tag CRCxA'' | data A'' | <---. \
+| |-------------------+ | ||| |
+| | | ||| |
+| | +-------------------+---------| ||| |
+| | | tag A''xD | | < ||| |
+| |---------+-------------------+ | |||| +-- 3rd commit
+| | data D | |||| |
+| | +---------| |||| |
+| | | tag Dx| |||| |
+| |---------+-------------------+---------| |||| |
+| |CRC | CRC | | |||| /
+| |---------+-------------------+ | ||||
+| | unwritten storage | |||| more commits
+| | | |||| |
+| | | |||| v
+| | | ||||
+| | | ||||
+| '---------------------------------------' ||||
+'---------------------------------------' |||'- most recent A
+ ||'-- most recent B
+ |'--- most recent C
+ '---- most recent D
+```
+
+Two things to note before we get into the details around tag encoding:
+
+1. Each tag contains a valid bit used to indicate if the tag and containing
+ commit is valid. After XORing, this bit should always be zero.
+
+ At the end of each commit, the valid bit of the previous tag is XORed
+ with the lowest bit in the type field of the CRC tag. This allows
+ the CRC tag to force the next commit to fail the valid bit test if it
+ has not yet been written to.
+
+2. The valid bit alone is not enough info to know if the next commit has been
+ erased. We don't know the order bits will be programmed in a program block,
+ so it's possible that the next commit had an attempted program that left the
+ valid bit unchanged.
+
+ To ensure we only ever program erased bytes, each commit can contain an
+ optional forward-CRC (FCRC). An FCRC contains a checksum of some amount of
+ bytes in the next commit at the time it was erased.
+
+ ```
+ .-------------------. \ \
+ | revision count | | |
+ |-------------------| | |
+ | metadata | | |
+ | | +---. +-- current commit
+ | | | | |
+ |-------------------| | | |
+ | FCRC ---|-. | |
+ |-------------------| / | | |
+ | CRC -----|-' /
+ |-------------------| |
+ | padding | | padding (does't need CRC)
+ | | |
+ |-------------------| \ | \
+ | erased? | +-' |
+ | | | | +-- next commit
+ | v | / |
+ | | /
+ | |
+ '-------------------'
+ ```
+
+ If the FCRC is missing or the checksum does not match, we must assume a
+ commit was attempted but failed due to power-loss.
+
+ Note that end-of-block commits do not need an FCRC.
+
+## Metadata tags
+
+So in littlefs, 32-bit tags describe every type of metadata. And this means
+_every_ type of metadata, including file entries, directory fields, and
+global state. Even the CRCs used to mark the end of commits get their own tag.
+
+Because of this, the tag format contains some densely packed information. Note
+that there are multiple levels of types which break down into more info:
+
+```
+[---- 32 ----]
+[1|-- 11 --|-- 10 --|-- 10 --]
+ ^. ^ . ^ ^- length
+ |. | . '------------ id
+ |. '-----.------------------ type (type3)
+ '.-----------.------------------ valid bit
+ [-3-|-- 8 --]
+ ^ ^- chunk
+ '------- type (type1)
+```
+
+
+Before we go further, there's one important thing to note. These tags are
+**not** stored in little-endian. Tags stored in commits are actually stored
+in big-endian (and is the only thing in littlefs stored in big-endian). This
+little bit of craziness comes from the fact that the valid bit must be the
+first bit in a commit, and when converted to little-endian, the valid bit finds
+itself in byte 4. We could restructure the tag to store the valid bit lower,
+but, because none of the fields are byte-aligned, this would be more
+complicated than just storing the tag in big-endian.
+
+Another thing to note is that both the tags `0x00000000` and `0xffffffff` are
+invalid and can be used for null values.
+
+Metadata tag fields:
+
+1. **Valid bit (1-bit)** - Indicates if the tag is valid.
+
+2. **Type3 (11-bits)** - Type of the tag. This field is broken down further
+ into a 3-bit abstract type and an 8-bit chunk field. Note that the value
+ `0x000` is invalid and not assigned a type.
+
+ 1. **Type1 (3-bits)** - Abstract type of the tag. Groups the tags into
+ 8 categories that facilitate bitmasked lookups.
+
+ 2. **Chunk (8-bits)** - Chunk field used for various purposes by the different
+ abstract types. type1+chunk+id form a unique identifier for each tag in the
+ metadata block.
+
+3. **Id (10-bits)** - File id associated with the tag. Each file in a metadata
+ block gets a unique id which is used to associate tags with that file. The
+ special value `0x3ff` is used for any tags that are not associated with a
+ file, such as directory and global metadata.
+
+4. **Length (10-bits)** - Length of the data in bytes. The special value
+ `0x3ff` indicates that this tag has been deleted.
+
+## Metadata types
+
+What follows is an exhaustive list of metadata in littlefs.
+
+---
+#### `0x401` LFS_TYPE_CREATE
+
+Creates a new file with this id. Note that files in a metadata block
+don't necessarily need a create tag. All a create does is move over any
+files using this id. In this sense a create is similar to insertion into
+an imaginary array of files.
+
+The create and delete tags allow littlefs to keep files in a directory
+ordered alphabetically by filename.
+
+---
+#### `0x4ff` LFS_TYPE_DELETE
+
+Deletes the file with this id. An inverse to create, this tag moves over
+any files neighboring this id similar to a deletion from an imaginary
+array of files.
+
+---
+#### `0x0xx` LFS_TYPE_NAME
+
+Associates the id with a file name and file type.
+
+The data contains the file name stored as an ASCII string (may be expanded to
+UTF8 in the future).
+
+The chunk field in this tag indicates an 8-bit file type which can be one of
+the following.
+
+Currently, the name tag must precede any other tags associated with the id and
+can not be reassigned without deleting the file.
+
+Layout of the name tag:
+
+```
+ tag data
+[-- 32 --][--- variable length ---]
+[1| 3| 8 | 10 | 10 ][--- (size * 8) ---]
+ ^ ^ ^ ^ ^- size ^- file name
+ | | | '------ id
+ | | '----------- file type
+ | '-------------- type1 (0x0)
+ '----------------- valid bit
+```
+
+Name fields:
+
+1. **file type (8-bits)** - Type of the file.
+
+2. **file name** - File name stored as an ASCII string.
+
+---
+#### `0x001` LFS_TYPE_REG
+
+Initializes the id + name as a regular file.
+
+How each file is stored depends on its struct tag, which is described below.
+
+---
+#### `0x002` LFS_TYPE_DIR
+
+Initializes the id + name as a directory.
+
+Directories in littlefs are stored on disk as a linked-list of metadata pairs,
+each pair containing any number of files in alphabetical order. A pointer to
+the directory is stored in the struct tag, which is described below.
+
+---
+#### `0x0ff` LFS_TYPE_SUPERBLOCK
+
+Initializes the id as a superblock entry.
+
+The superblock entry is a special entry used to store format-time configuration
+and identify the filesystem.
+
+The name is a bit of a misnomer. While the superblock entry serves the same
+purpose as a superblock found in other filesystems, in littlefs the superblock
+does not get a dedicated block. Instead, the superblock entry is duplicated
+across a linked-list of metadata pairs rooted on the blocks 0 and 1. The last
+metadata pair doubles as the root directory of the filesystem.
+
+```
+ .--------. .--------. .--------. .--------. .--------.
+.| super |->| super |->| super |->| super |->| file B |
+|| block | || block | || block | || block | || file C |
+|| | || | || | || file A | || file D |
+|'--------' |'--------' |'--------' |'--------' |'--------'
+'--------' '--------' '--------' '--------' '--------'
+
+\----------------+----------------/ \----------+----------/
+ superblock pairs root directory
+```
+
+The filesystem starts with only the root directory. The superblock metadata
+pairs grow every time the root pair is compacted in order to prolong the
+life of the device exponentially.
+
+The contents of the superblock entry are stored in a name tag with the
+superblock type and an inline-struct tag. The name tag contains the magic
+string "littlefs", while the inline-struct tag contains version and
+configuration information.
+
+Layout of the superblock name tag and inline-struct tag:
+
+```
+ tag data
+[-- 32 --][-- 32 --|-- 32 --]
+[1|- 11 -| 10 | 10 ][--- 64 ---]
+ ^ ^ ^ ^- size (8) ^- magic string ("littlefs")
+ | | '------ id (0)
+ | '------------ type (0x0ff)
+ '----------------- valid bit
+
+ tag data
+[-- 32 --][-- 32 --|-- 32 --|-- 32 --]
+[1|- 11 -| 10 | 10 ][-- 32 --|-- 32 --|-- 32 --]
+ ^ ^ ^ ^ ^- version ^- block size ^- block count
+ | | | | [-- 32 --|-- 32 --|-- 32 --]
+ | | | | [-- 32 --|-- 32 --|-- 32 --]
+ | | | | ^- name max ^- file max ^- attr max
+ | | | '- size (24)
+ | | '------ id (0)
+ | '------------ type (0x201)
+ '----------------- valid bit
+```
+
+Superblock fields:
+
+1. **Magic string (8-bytes)** - Magic string indicating the presence of
+ littlefs on the device. Must be the string "littlefs".
+
+2. **Version (32-bits)** - The version of littlefs at format time. The version
+ is encoded in a 32-bit value with the upper 16-bits containing the major
+ version, and the lower 16-bits containing the minor version.
+
+ This specification describes version 2.0 (`0x00020000`).
+
+3. **Block size (32-bits)** - Size of the logical block size used by the
+ filesystem in bytes.
+
+4. **Block count (32-bits)** - Number of blocks in the filesystem.
+
+5. **Name max (32-bits)** - Maximum size of file names in bytes.
+
+6. **File max (32-bits)** - Maximum size of files in bytes.
+
+7. **Attr max (32-bits)** - Maximum size of file attributes in bytes.
+
+The superblock must always be the first entry (id 0) in the metadata pair, and
+the name tag must always be the first tag in the metadata pair. This makes it
+so that the magic string "littlefs" will always reside at offset=8 in a valid
+littlefs superblock.
+
+---
+#### `0x2xx` LFS_TYPE_STRUCT
+
+Associates the id with an on-disk data structure.
+
+The exact layout of the data depends on the data structure type stored in the
+chunk field and can be one of the following.
+
+Any type of struct supersedes all other structs associated with the id. For
+example, appending a ctz-struct replaces an inline-struct on the same file.
+
+---
+#### `0x200` LFS_TYPE_DIRSTRUCT
+
+Gives the id a directory data structure.
+
+Directories in littlefs are stored on disk as a linked-list of metadata pairs,
+each pair containing any number of files in alphabetical order.
+
+```
+ |
+ v
+ .--------. .--------. .--------. .--------. .--------. .--------.
+.| file A |->| file D |->| file G |->| file I |->| file J |->| file M |
+|| file B | || file E | || file H | || | || file K | || file N |
+|| file C | || file F | || | || | || file L | || |
+|'--------' |'--------' |'--------' |'--------' |'--------' |'--------'
+'--------' '--------' '--------' '--------' '--------' '--------'
+```
+
+The dir-struct tag contains only the pointer to the first metadata-pair in the
+directory. The directory size is not known without traversing the directory.
+
+The pointer to the next metadata-pair in the directory is stored in a tail tag,
+which is described below.
+
+Layout of the dir-struct tag:
+
+```
+ tag data
+[-- 32 --][-- 32 --|-- 32 --]
+[1|- 11 -| 10 | 10 ][--- 64 ---]
+ ^ ^ ^ ^- size (8) ^- metadata pair
+ | | '------ id
+ | '------------ type (0x200)
+ '----------------- valid bit
+```
+
+Dir-struct fields:
+
+1. **Metadata pair (8-bytes)** - Pointer to the first metadata-pair
+ in the directory.
+
+---
+#### `0x201` LFS_TYPE_INLINESTRUCT
+
+Gives the id an inline data structure.
+
+Inline structs store small files that can fit in the metadata pair. In this
+case, the file data is stored directly in the tag's data area.
+
+Layout of the inline-struct tag:
+
+```
+ tag data
+[-- 32 --][--- variable length ---]
+[1|- 11 -| 10 | 10 ][--- (size * 8) ---]
+ ^ ^ ^ ^- size ^- inline data
+ | | '------ id
+ | '------------ type (0x201)
+ '----------------- valid bit
+```
+
+Inline-struct fields:
+
+1. **Inline data** - File data stored directly in the metadata-pair.
+
+---
+#### `0x202` LFS_TYPE_CTZSTRUCT
+
+Gives the id a CTZ skip-list data structure.
+
+CTZ skip-lists store files that can not fit in the metadata pair. These files
+are stored in a skip-list in reverse, with a pointer to the head of the
+skip-list. Note that the head of the skip-list and the file size is enough
+information to read the file.
+
+How exactly CTZ skip-lists work is a bit complicated. A full explanation can be
+found in the [DESIGN.md](DESIGN.md#ctz-skip-lists).
+
+A quick summary: For every _n_th block where _n_ is divisible by
+2_ˣ_, that block contains a pointer to block _n_-2_ˣ_.
+These pointers are stored in increasing order of _x_ in each block of the file
+before the actual data.
+
+```
+ |
+ v
+.--------. .--------. .--------. .--------. .--------. .--------.
+| A |<-| D |<-| G |<-| J |<-| M |<-| P |
+| B |<-| E |--| H |<-| K |--| N | | Q |
+| C |<-| F |--| I |--| L |--| O | | |
+'--------' '--------' '--------' '--------' '--------' '--------'
+ block 0 block 1 block 2 block 3 block 4 block 5
+ 1 skip 2 skips 1 skip 3 skips 1 skip
+```
+
+Note that the maximum number of pointers in a block is bounded by the maximum
+file size divided by the block size. With 32 bits for file size, this results
+in a minimum block size of 104 bytes.
+
+Layout of the CTZ-struct tag:
+
+```
+ tag data
+[-- 32 --][-- 32 --|-- 32 --]
+[1|- 11 -| 10 | 10 ][-- 32 --|-- 32 --]
+ ^ ^ ^ ^ ^ ^- file size
+ | | | | '-------------------- file head
+ | | | '- size (8)
+ | | '------ id
+ | '------------ type (0x202)
+ '----------------- valid bit
+```
+
+CTZ-struct fields:
+
+1. **File head (32-bits)** - Pointer to the block that is the head of the
+ file's CTZ skip-list.
+
+2. **File size (32-bits)** - Size of the file in bytes.
+
+---
+#### `0x3xx` LFS_TYPE_USERATTR
+
+Attaches a user attribute to an id.
+
+littlefs has a concept of "user attributes". These are small user-provided
+attributes that can be used to store things like timestamps, hashes,
+permissions, etc.
+
+Each user attribute is uniquely identified by an 8-bit type which is stored in
+the chunk field, and the user attribute itself can be found in the tag's data.
+
+There are currently no standard user attributes and a portable littlefs
+implementation should work with any user attributes missing.
+
+Layout of the user-attr tag:
+
+```
+ tag data
+[-- 32 --][--- variable length ---]
+[1| 3| 8 | 10 | 10 ][--- (size * 8) ---]
+ ^ ^ ^ ^ ^- size ^- attr data
+ | | | '------ id
+ | | '----------- attr type
+ | '-------------- type1 (0x3)
+ '----------------- valid bit
+```
+
+User-attr fields:
+
+1. **Attr type (8-bits)** - Type of the user attributes.
+
+2. **Attr data** - The data associated with the user attribute.
+
+---
+#### `0x6xx` LFS_TYPE_TAIL
+
+Provides the tail pointer for the metadata pair itself.
+
+The metadata pair's tail pointer is used in littlefs for a linked-list
+containing all metadata pairs. The chunk field contains the type of the tail,
+which indicates if the following metadata pair is a part of the directory
+(hard-tail) or only used to traverse the filesystem (soft-tail).
+
+```
+ .--------.
+ .| dir A |-.
+ ||softtail| |
+.--------| |-'
+| |'--------'
+| '---|--|-'
+| .-' '-------------.
+| v v
+| .--------. .--------. .--------.
+'->| dir B |->| dir B |->| dir C |
+ ||hardtail| ||softtail| || |
+ || | || | || |
+ |'--------' |'--------' |'--------'
+ '--------' '--------' '--------'
+```
+
+Currently any type supersedes any other preceding tails in the metadata pair,
+but this may change if additional metadata pair state is added.
+
+A note about the metadata pair linked-list: Normally, this linked-list contains
+every metadata pair in the filesystem. However, there are some operations that
+can cause this linked-list to become out of sync if a power-loss were to occur.
+When this happens, littlefs sets the "sync" flag in the global state. How
+exactly this flag is stored is described below.
+
+When the sync flag is set:
+
+1. The linked-list may contain an orphaned directory that has been removed in
+ the filesystem.
+2. The linked-list may contain a metadata pair with a bad block that has been
+ replaced in the filesystem.
+
+If the sync flag is set, the threaded linked-list must be checked for these
+errors before it can be used reliably. Note that the threaded linked-list can
+be ignored if littlefs is mounted read-only.
+
+Layout of the tail tag:
+
+```
+ tag data
+[-- 32 --][-- 32 --|-- 32 --]
+[1| 3| 8 | 10 | 10 ][--- 64 ---]
+ ^ ^ ^ ^ ^- size (8) ^- metadata pair
+ | | | '------ id
+ | | '---------- tail type
+ | '------------- type1 (0x6)
+ '---------------- valid bit
+```
+
+Tail fields:
+
+1. **Tail type (8-bits)** - Type of the tail pointer.
+
+2. **Metadata pair (8-bytes)** - Pointer to the next metadata-pair.
+
+---
+#### `0x600` LFS_TYPE_SOFTTAIL
+
+Provides a tail pointer that points to the next metadata pair in the
+filesystem.
+
+In this case, the next metadata pair is not a part of our current directory
+and should only be followed when traversing the entire filesystem.
+
+---
+#### `0x601` LFS_TYPE_HARDTAIL
+
+Provides a tail pointer that points to the next metadata pair in the
+directory.
+
+In this case, the next metadata pair belongs to the current directory. Note
+that because directories in littlefs are sorted alphabetically, the next
+metadata pair should only contain filenames greater than any filename in the
+current pair.
+
+---
+#### `0x7xx` LFS_TYPE_GSTATE
+
+Provides delta bits for global state entries.
+
+littlefs has a concept of "global state". This is a small set of state that
+can be updated by a commit to _any_ metadata pair in the filesystem.
+
+The way this works is that the global state is stored as a set of deltas
+distributed across the filesystem such that the global state can be found by
+the xor-sum of these deltas.
+
+```
+ .--------. .--------. .--------. .--------. .--------.
+.| |->| gdelta |->| |->| gdelta |->| gdelta |
+|| | || 0x23 | || | || 0xff | || 0xce |
+|| | || | || | || | || |
+|'--------' |'--------' |'--------' |'--------' |'--------'
+'--------' '----|---' '--------' '----|---' '----|---'
+ v v v
+ 0x00 --> xor ------------------> xor ------> xor --> gstate = 0x12
+```
+
+Note that storing globals this way is very expensive in terms of storage usage,
+so any global state should be kept very small.
+
+The size and format of each piece of global state depends on the type, which
+is stored in the chunk field. Currently, the only global state is move state,
+which is outlined below.
+
+---
+#### `0x7ff` LFS_TYPE_MOVESTATE
+
+Provides delta bits for the global move state.
+
+The move state in littlefs is used to store info about operations that could
+cause to filesystem to go out of sync if the power is lost. The operations
+where this could occur is moves of files between metadata pairs and any
+operation that changes metadata pairs on the threaded linked-list.
+
+In the case of moves, the move state contains a tag + metadata pair describing
+the source of the ongoing move. If this tag is non-zero, that means that power
+was lost during a move, and the file exists in two different locations. If this
+happens, the source of the move should be considered deleted, and the move
+should be completed (the source should be deleted) before any other write
+operations to the filesystem.
+
+In the case of operations to the threaded linked-list, a single "sync" bit is
+used to indicate that a modification is ongoing. If this sync flag is set, the
+threaded linked-list will need to be checked for errors before it can be used
+reliably. The exact cases to check for are described above in the tail tag.
+
+Layout of the move state:
+
+```
+ tag data
+[-- 32 --][-- 32 --|-- 32 --|-- 32 --]
+[1|- 11 -| 10 | 10 ][1|- 11 -| 10 | 10 |--- 64 ---]
+ ^ ^ ^ ^ ^ ^ ^ ^- padding (0) ^- metadata pair
+ | | | | | | '------ move id
+ | | | | | '------------ move type
+ | | | | '----------------- sync bit
+ | | | |
+ | | | '- size (12)
+ | | '------ id (0x3ff)
+ | '------------ type (0x7ff)
+ '----------------- valid bit
+```
+
+Move state fields:
+
+1. **Sync bit (1-bit)** - Indicates if the metadata pair threaded linked-list
+ is in-sync. If set, the threaded linked-list should be checked for errors.
+
+2. **Move type (11-bits)** - Type of move being performed. Must be either
+ `0x000`, indicating no move, or `0x4ff` indicating the source file should
+ be deleted.
+
+3. **Move id (10-bits)** - The file id being moved.
+
+4. **Metadata pair (8-bytes)** - Pointer to the metadata-pair containing
+ the move.
+
+---
+#### `0x5xx` LFS_TYPE_CRC
+
+Last but not least, the CRC tag marks the end of a commit and provides a
+checksum for any commits to the metadata block.
+
+The first 32-bits of the data contain a CRC-32 with a polynomial of
+`0x04c11db7` initialized with `0xffffffff`. This CRC provides a checksum for
+all metadata since the previous CRC tag, including the CRC tag itself. For
+the first commit, this includes the revision count for the metadata block.
+
+However, the size of the data is not limited to 32-bits. The data field may
+larger to pad the commit to the next program-aligned boundary.
+
+In addition, the CRC tag's chunk field contains a set of flags which can
+change the behaviour of commits. Currently the only flag in use is the lowest
+bit, which determines the expected state of the valid bit for any following
+tags. This is used to guarantee that unwritten storage in a metadata block
+will be detected as invalid.
+
+Layout of the CRC tag:
+
+```
+ tag data
+[-- 32 --][-- 32 --|--- variable length ---]
+[1| 3| 8 | 10 | 10 ][-- 32 --|--- (size * 8 - 32) ---]
+ ^ ^ ^ ^ ^ ^- crc ^- padding
+ | | | | '- size
+ | | | '------ id (0x3ff)
+ | | '----------- valid state
+ | '-------------- type1 (0x5)
+ '----------------- valid bit
+```
+
+CRC fields:
+
+1. **Valid state (1-bit)** - Indicates the expected value of the valid bit for
+ any tags in the next commit.
+
+2. **CRC (32-bits)** - CRC-32 with a polynomial of `0x04c11db7` initialized
+ with `0xffffffff`.
+
+3. **Padding** - Padding to the next program-aligned boundary. No guarantees
+ are made about the contents.
+
+---
+#### `0x5ff` LFS_TYPE_FCRC
+
+Added in lfs2.1, the optional FCRC tag contains a checksum of some amount of
+bytes in the next commit at the time it was erased. This allows us to ensure
+that we only ever program erased bytes, even if a previous commit failed due
+to power-loss.
+
+When programming a commit, the FCRC size must be at least as large as the
+program block size. However, the program block is not saved on disk, and can
+change between mounts, so the FCRC size on disk may be different than the
+current program block size.
+
+If the FCRC is missing or the checksum does not match, we must assume a
+commit was attempted but failed due to power-loss.
+
+Layout of the FCRC tag:
+
+```
+ tag data
+[-- 32 --][-- 32 --|-- 32 --]
+[1|- 11 -| 10 | 10 ][-- 32 --|-- 32 --]
+ ^ ^ ^ ^ ^- fcrc size ^- fcrc
+ | | | '- size (8)
+ | | '------ id (0x3ff)
+ | '------------ type (0x5ff)
+ '----------------- valid bit
+```
+
+FCRC fields:
+
+1. **FCRC size (32-bits)** - Number of bytes after this commit's CRC tag's
+ padding to include in the FCRC.
+
+2. **FCRC (32-bits)** - CRC of the bytes after this commit's CRC tag's padding
+ when erased. Like the CRC tag, this uses a CRC-32 with a polynomial of
+ `0x04c11db7` initialized with `0xffffffff`.
+
+---
diff --git a/packages/littlefs-v2.11.2/benches/bench_dir.toml b/packages/littlefs-v2.11.2/benches/bench_dir.toml
new file mode 100644
index 0000000..5f8cb49
--- /dev/null
+++ b/packages/littlefs-v2.11.2/benches/bench_dir.toml
@@ -0,0 +1,270 @@
+[cases.bench_dir_open]
+# 0 = in-order
+# 1 = reversed-order
+# 2 = random-order
+defines.ORDER = [0, 1, 2]
+defines.N = 1024
+defines.FILE_SIZE = 8
+defines.CHUNK_SIZE = 8
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+
+ // first create the files
+ char name[256];
+ uint8_t buffer[CHUNK_SIZE];
+ for (lfs_size_t i = 0; i < N; i++) {
+ sprintf(name, "file%08x", i);
+ lfs_file_t file;
+ lfs_file_open(&lfs, &file, name,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+
+ uint32_t file_prng = i;
+ for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
+ for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
+ buffer[k] = BENCH_PRNG(&file_prng);
+ }
+ lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+
+ // then read the files
+ BENCH_START();
+ uint32_t prng = 42;
+ for (lfs_size_t i = 0; i < N; i++) {
+ lfs_off_t i_
+ = (ORDER == 0) ? i
+ : (ORDER == 1) ? (N-1-i)
+ : BENCH_PRNG(&prng) % N;
+ sprintf(name, "file%08x", i_);
+ lfs_file_t file;
+ lfs_file_open(&lfs, &file, name, LFS_O_RDONLY) => 0;
+
+ uint32_t file_prng = i_;
+ for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
+ lfs_file_read(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
+ for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
+ assert(buffer[k] == BENCH_PRNG(&file_prng));
+ }
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ BENCH_STOP();
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[cases.bench_dir_creat]
+# 0 = in-order
+# 1 = reversed-order
+# 2 = random-order
+defines.ORDER = [0, 1, 2]
+defines.N = 1024
+defines.FILE_SIZE = 8
+defines.CHUNK_SIZE = 8
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+
+ BENCH_START();
+ uint32_t prng = 42;
+ char name[256];
+ uint8_t buffer[CHUNK_SIZE];
+ for (lfs_size_t i = 0; i < N; i++) {
+ lfs_off_t i_
+ = (ORDER == 0) ? i
+ : (ORDER == 1) ? (N-1-i)
+ : BENCH_PRNG(&prng) % N;
+ sprintf(name, "file%08x", i_);
+ lfs_file_t file;
+ lfs_file_open(&lfs, &file, name,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+
+ uint32_t file_prng = i_;
+ for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
+ for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
+ buffer[k] = BENCH_PRNG(&file_prng);
+ }
+ lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ BENCH_STOP();
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[cases.bench_dir_remove]
+# 0 = in-order
+# 1 = reversed-order
+# 2 = random-order
+defines.ORDER = [0, 1, 2]
+defines.N = 1024
+defines.FILE_SIZE = 8
+defines.CHUNK_SIZE = 8
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+
+ // first create the files
+ char name[256];
+ uint8_t buffer[CHUNK_SIZE];
+ for (lfs_size_t i = 0; i < N; i++) {
+ sprintf(name, "file%08x", i);
+ lfs_file_t file;
+ lfs_file_open(&lfs, &file, name,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+
+ uint32_t file_prng = i;
+ for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
+ for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
+ buffer[k] = BENCH_PRNG(&file_prng);
+ }
+ lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+
+ // then remove the files
+ BENCH_START();
+ uint32_t prng = 42;
+ for (lfs_size_t i = 0; i < N; i++) {
+ lfs_off_t i_
+ = (ORDER == 0) ? i
+ : (ORDER == 1) ? (N-1-i)
+ : BENCH_PRNG(&prng) % N;
+ sprintf(name, "file%08x", i_);
+ int err = lfs_remove(&lfs, name);
+ assert(!err || err == LFS_ERR_NOENT);
+ }
+ BENCH_STOP();
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[cases.bench_dir_read]
+defines.N = 1024
+defines.FILE_SIZE = 8
+defines.CHUNK_SIZE = 8
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+
+ // first create the files
+ char name[256];
+ uint8_t buffer[CHUNK_SIZE];
+ for (lfs_size_t i = 0; i < N; i++) {
+ sprintf(name, "file%08x", i);
+ lfs_file_t file;
+ lfs_file_open(&lfs, &file, name,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+
+ uint32_t file_prng = i;
+ for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
+ for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
+ buffer[k] = BENCH_PRNG(&file_prng);
+ }
+ lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+
+ // then read the directory
+ BENCH_START();
+ lfs_dir_t dir;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ struct lfs_info info;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ for (int i = 0; i < N; i++) {
+ sprintf(name, "file%08x", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_REG);
+ assert(strcmp(info.name, name) == 0);
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ BENCH_STOP();
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[cases.bench_dir_mkdir]
+# 0 = in-order
+# 1 = reversed-order
+# 2 = random-order
+defines.ORDER = [0, 1, 2]
+defines.N = 8
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+
+ BENCH_START();
+ uint32_t prng = 42;
+ char name[256];
+ for (lfs_size_t i = 0; i < N; i++) {
+ lfs_off_t i_
+ = (ORDER == 0) ? i
+ : (ORDER == 1) ? (N-1-i)
+ : BENCH_PRNG(&prng) % N;
+ printf("hm %d\n", i);
+ sprintf(name, "dir%08x", i_);
+ int err = lfs_mkdir(&lfs, name);
+ assert(!err || err == LFS_ERR_EXIST);
+ }
+ BENCH_STOP();
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[cases.bench_dir_rmdir]
+# 0 = in-order
+# 1 = reversed-order
+# 2 = random-order
+defines.ORDER = [0, 1, 2]
+defines.N = 8
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+
+ // first create the dirs
+ char name[256];
+ for (lfs_size_t i = 0; i < N; i++) {
+ sprintf(name, "dir%08x", i);
+ lfs_mkdir(&lfs, name) => 0;
+ }
+
+ // then remove the dirs
+ BENCH_START();
+ uint32_t prng = 42;
+ for (lfs_size_t i = 0; i < N; i++) {
+ lfs_off_t i_
+ = (ORDER == 0) ? i
+ : (ORDER == 1) ? (N-1-i)
+ : BENCH_PRNG(&prng) % N;
+ sprintf(name, "dir%08x", i_);
+ int err = lfs_remove(&lfs, name);
+ assert(!err || err == LFS_ERR_NOENT);
+ }
+ BENCH_STOP();
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+
diff --git a/packages/littlefs-v2.11.2/benches/bench_file.toml b/packages/littlefs-v2.11.2/benches/bench_file.toml
new file mode 100644
index 0000000..168eaad
--- /dev/null
+++ b/packages/littlefs-v2.11.2/benches/bench_file.toml
@@ -0,0 +1,95 @@
+[cases.bench_file_read]
+# 0 = in-order
+# 1 = reversed-order
+# 2 = random-order
+defines.ORDER = [0, 1, 2]
+defines.SIZE = '128*1024'
+defines.CHUNK_SIZE = 64
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_size_t chunks = (SIZE+CHUNK_SIZE-1)/CHUNK_SIZE;
+
+ // first write the file
+ lfs_file_t file;
+ uint8_t buffer[CHUNK_SIZE];
+ lfs_file_open(&lfs, &file, "file",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ for (lfs_size_t i = 0; i < chunks; i++) {
+ uint32_t chunk_prng = i;
+ for (lfs_size_t j = 0; j < CHUNK_SIZE; j++) {
+ buffer[j] = BENCH_PRNG(&chunk_prng);
+ }
+
+ lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
+ }
+ lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
+ lfs_file_close(&lfs, &file) => 0;
+
+ // then read the file
+ BENCH_START();
+ lfs_file_open(&lfs, &file, "file", LFS_O_RDONLY) => 0;
+
+ uint32_t prng = 42;
+ for (lfs_size_t i = 0; i < chunks; i++) {
+ lfs_off_t i_
+ = (ORDER == 0) ? i
+ : (ORDER == 1) ? (chunks-1-i)
+ : BENCH_PRNG(&prng) % chunks;
+ lfs_file_seek(&lfs, &file, i_*CHUNK_SIZE, LFS_SEEK_SET)
+ => i_*CHUNK_SIZE;
+ lfs_file_read(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
+
+ uint32_t chunk_prng = i_;
+ for (lfs_size_t j = 0; j < CHUNK_SIZE; j++) {
+ assert(buffer[j] == BENCH_PRNG(&chunk_prng));
+ }
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ BENCH_STOP();
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[cases.bench_file_write]
+# 0 = in-order
+# 1 = reversed-order
+# 2 = random-order
+defines.ORDER = [0, 1, 2]
+defines.SIZE = '128*1024'
+defines.CHUNK_SIZE = 64
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+ lfs_mount(&lfs, cfg) => 0;
+ lfs_size_t chunks = (SIZE+CHUNK_SIZE-1)/CHUNK_SIZE;
+
+ BENCH_START();
+ lfs_file_t file;
+ lfs_file_open(&lfs, &file, "file",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+
+ uint8_t buffer[CHUNK_SIZE];
+ uint32_t prng = 42;
+ for (lfs_size_t i = 0; i < chunks; i++) {
+ lfs_off_t i_
+ = (ORDER == 0) ? i
+ : (ORDER == 1) ? (chunks-1-i)
+ : BENCH_PRNG(&prng) % chunks;
+ uint32_t chunk_prng = i_;
+ for (lfs_size_t j = 0; j < CHUNK_SIZE; j++) {
+ buffer[j] = BENCH_PRNG(&chunk_prng);
+ }
+
+ lfs_file_seek(&lfs, &file, i_*CHUNK_SIZE, LFS_SEEK_SET)
+ => i_*CHUNK_SIZE;
+ lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ BENCH_STOP();
+
+ lfs_unmount(&lfs) => 0;
+'''
diff --git a/packages/littlefs-v2.11.2/benches/bench_superblock.toml b/packages/littlefs-v2.11.2/benches/bench_superblock.toml
new file mode 100644
index 0000000..37659d4
--- /dev/null
+++ b/packages/littlefs-v2.11.2/benches/bench_superblock.toml
@@ -0,0 +1,56 @@
+[cases.bench_superblocks_found]
+# support benchmarking with files
+defines.N = [0, 1024]
+defines.FILE_SIZE = 8
+defines.CHUNK_SIZE = 8
+code = '''
+ lfs_t lfs;
+ lfs_format(&lfs, cfg) => 0;
+
+ // create files?
+ lfs_mount(&lfs, cfg) => 0;
+ char name[256];
+ uint8_t buffer[CHUNK_SIZE];
+ for (lfs_size_t i = 0; i < N; i++) {
+ sprintf(name, "file%08x", i);
+ lfs_file_t file;
+ lfs_file_open(&lfs, &file, name,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+
+ for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
+ for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
+ buffer[k] = i+j+k;
+ }
+ lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ BENCH_START();
+ lfs_mount(&lfs, cfg) => 0;
+ BENCH_STOP();
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[cases.bench_superblocks_missing]
+code = '''
+ lfs_t lfs;
+
+ BENCH_START();
+ int err = lfs_mount(&lfs, cfg);
+ assert(err != 0);
+ BENCH_STOP();
+'''
+
+[cases.bench_superblocks_format]
+code = '''
+ lfs_t lfs;
+
+ BENCH_START();
+ lfs_format(&lfs, cfg) => 0;
+ BENCH_STOP();
+'''
+
diff --git a/packages/littlefs-v2.11.2/dfs_lfs.c b/packages/littlefs-v2.11.2/dfs_lfs.c
new file mode 100644
index 0000000..4b01314
--- /dev/null
+++ b/packages/littlefs-v2.11.2/dfs_lfs.c
@@ -0,0 +1,977 @@
+#include
+#include
+
+#include
+#include
+
+#include "lfs.h"
+
+#include
+#include
+
+#if defined(RT_VERSION_CHECK) && (RTTHREAD_VERSION >= RT_VERSION_CHECK(5, 0, 2))
+#define DFS_LFS_RW_RETURN_TYPE ssize_t
+#define DFS_LFS_LSK_RETURN_TYPE off_t
+#define DFS_LFS_MKFS(dev_id, fs_name) _dfs_lfs_mkfs(dev_id, fs_name)
+#else
+#define DFS_LFS_RW_RETURN_TYPE int
+#define DFS_LFS_LSK_RETURN_TYPE int
+#define DFS_LFS_MKFS(dev_id, fs_name) _dfs_lfs_mkfs(dev_id)
+#endif
+
+#ifndef RT_DEF_LFS_DRIVERS
+ #define RT_DEF_LFS_DRIVERS 1
+#endif
+
+#if (RT_DEF_LFS_DRIVERS < 1)
+ #error "#define RT_DEF_LFS_DRIVERS must > 0"
+#endif
+
+#ifndef LFS_READ_SIZE
+ #define LFS_READ_SIZE 256
+#endif
+
+#ifndef LFS_PROG_SIZE
+ #define LFS_PROG_SIZE 256
+#endif
+
+#ifndef LFS_BLOCK_SIZE
+ #define LFS_BLOCK_SIZE 4096
+#endif
+
+#ifndef LFS_CACHE_SIZE
+ #define LFS_CACHE_SIZE LFS_PROG_SIZE
+#endif
+
+#ifndef LFS_BLOCK_CYCLES
+ #define LFS_BLOCK_CYCLES (-1)
+#endif
+
+#ifndef LFS_LOOKAHEAD_MAX
+ #define LFS_LOOKAHEAD_MAX 128
+#endif
+
+#define ATTR_TIMESTAMP 0x74
+
+typedef struct _dfs_lfs_s
+{
+ struct lfs lfs;
+ struct lfs_config cfg;
+ struct rt_mutex lock;
+} dfs_lfs_t;
+
+typedef struct _dfs_lfs_fd_s
+{
+ struct lfs* lfs;
+ union
+ {
+ struct lfs_file file;
+ struct lfs_dir dir;
+ } u;
+} dfs_lfs_fd_t;
+
+static struct _dfs_lfs_s* _lfs_mount_tbl[RT_DEF_LFS_DRIVERS] = {0};
+
+#ifdef LFS_THREADSAFE
+// Lock the underlying block device. Negative error codes
+// are propogated to the user.
+int _lfs_lock(const struct lfs_config *c)
+{
+ dfs_lfs_t *dfs_lfs = rt_container_of(c, dfs_lfs_t, cfg);
+
+ if (rt_mutex_take(&dfs_lfs->lock, RT_WAITING_FOREVER) != RT_EOK)
+ {
+ return -1;
+ }
+
+ return 0;
+}
+
+// Unlock the underlying block device. Negative error codes
+// are propogated to the user.
+int _lfs_unlock(const struct lfs_config *c)
+{
+ dfs_lfs_t *dfs_lfs = rt_container_of(c, dfs_lfs_t, cfg);
+
+ if (rt_mutex_release(&dfs_lfs->lock) != RT_EOK)
+ {
+ return -1;
+ }
+
+ return 0;
+}
+#endif
+
+// Read a region in a block. Negative error codes are propogated
+// to the user.
+static int _lfs_flash_read(const struct lfs_config* c, lfs_block_t block, lfs_off_t off, void* buffer, lfs_size_t size)
+{
+ struct rt_mtd_nor_device* mtd_nor;
+
+ RT_ASSERT(c != RT_NULL);
+ RT_ASSERT(c->context != RT_NULL);
+
+ mtd_nor = (struct rt_mtd_nor_device*)c->context;
+ if (rt_mtd_nor_read(mtd_nor, block * c->block_size + off, buffer, size) != size)
+ {
+ return LFS_ERR_IO;
+ }
+
+ return LFS_ERR_OK;
+}
+
+// Program a region in a block. The block must have previously
+// been erased. Negative error codes are propogated to the user.
+// May return LFS_ERR_CORRUPT if the block should be considered bad.
+static int _lfs_flash_prog(const struct lfs_config* c, lfs_block_t block, lfs_off_t off, const void* buffer, lfs_size_t size)
+{
+ struct rt_mtd_nor_device* mtd_nor;
+
+ RT_ASSERT(c != RT_NULL);
+ RT_ASSERT(c->context != RT_NULL);
+
+ mtd_nor = (struct rt_mtd_nor_device*)c->context;
+ if (rt_mtd_nor_write(mtd_nor, block * c->block_size + off, buffer, size) != size)
+ {
+ return LFS_ERR_IO;
+ }
+
+ return LFS_ERR_OK;
+}
+
+// Erase a block. A block must be erased before being programmed.
+// The state of an erased block is undefined. Negative error codes
+// are propogated to the user.
+// May return LFS_ERR_CORRUPT if the block should be considered bad.
+static int _lfs_flash_erase(const struct lfs_config* c, lfs_block_t block)
+{
+ struct rt_mtd_nor_device* mtd_nor;
+
+ RT_ASSERT(c != RT_NULL);
+ RT_ASSERT(c->context != RT_NULL);
+
+ mtd_nor = (struct rt_mtd_nor_device*)c->context;
+ if (rt_mtd_nor_erase_block(mtd_nor, block * c->block_size, c->block_size) != RT_EOK)
+ {
+ return LFS_ERR_IO;
+ }
+
+ return LFS_ERR_OK;
+}
+
+// Sync the state of the underlying block device. Negative error codes
+// are propogated to the user.
+static int _lfs_flash_sync(const struct lfs_config* c)
+{
+ return LFS_ERR_OK;
+}
+
+/* results:
+ * -1, no space to install fatfs driver
+ * >= 0, there is an space to install littlefs driver
+ */
+static int _get_disk(rt_device_t dev_id)
+{
+ int index;
+
+ if (dev_id == RT_NULL)
+ {
+ for (index = 0; index < RT_DEF_LFS_DRIVERS; index ++)
+ {
+ if(_lfs_mount_tbl[index] == RT_NULL)
+ {
+ return index;
+ }
+ }
+ }
+ else
+ {
+ for (index = 0; index < RT_DEF_LFS_DRIVERS; index ++)
+ {
+ if ((_lfs_mount_tbl[index] != RT_NULL) && (_lfs_mount_tbl[index]->cfg.context == (void *)dev_id))
+ {
+ return index;
+ }
+ }
+ }
+
+ return -1;
+}
+
+static int _lfs_result_to_dfs(int result)
+{
+ int status = 0;
+
+ switch (result)
+ {
+ case LFS_ERR_OK:
+ break;
+
+ case LFS_ERR_IO:
+ status = -EIO;
+ break; // Error during device operation
+
+ case LFS_ERR_NOENT:
+ status = -ENOENT;
+ break; // No directory entry
+
+ case LFS_ERR_EXIST:
+ status = -EEXIST;
+ break; // Entry already exists
+
+ case LFS_ERR_NOTDIR:
+ status = -ENOTDIR;
+ break; // Entry is not a dir
+
+ case LFS_ERR_ISDIR:
+ status = -EISDIR;
+ break; // Entry is a dir
+
+ case LFS_ERR_NOTEMPTY:
+ status = -ENOTEMPTY;
+ break; // Dir is not empty
+
+ case LFS_ERR_BADF:
+ status = -EBADF;
+ break; // Bad file number
+
+ case LFS_ERR_INVAL:
+ status = -EINVAL;
+ break; // Invalid parameter
+
+ case LFS_ERR_NOSPC:
+ status = -ENOSPC;
+ break; // No space left on device
+
+ case LFS_ERR_NOMEM:
+ status = -ENOMEM;
+ break; // No more memory available
+
+ case LFS_ERR_CORRUPT:
+ status = -52;
+ break; // Corrupted
+
+ default:
+ status = -EIO;
+ break;
+ }
+
+ return status;
+}
+
+static void _lfs_load_config(struct lfs_config* lfs_cfg, struct rt_mtd_nor_device* mtd_nor)
+{
+ uint64_t mtd_size;
+
+ lfs_cfg->context = (void*)mtd_nor;
+
+ lfs_cfg->read_size = LFS_READ_SIZE;
+ lfs_cfg->prog_size = LFS_PROG_SIZE;
+
+ lfs_cfg->block_size = mtd_nor->block_size;
+ if (lfs_cfg->block_size < LFS_BLOCK_SIZE)
+ {
+ lfs_cfg->block_size = LFS_BLOCK_SIZE;
+ }
+
+ lfs_cfg->cache_size = LFS_CACHE_SIZE;
+ lfs_cfg->block_cycles = LFS_BLOCK_CYCLES;
+
+ mtd_size = mtd_nor->block_end - mtd_nor->block_start;
+ mtd_size *= mtd_nor->block_size;
+ lfs_cfg->block_count = mtd_size / lfs_cfg->block_size;
+
+ lfs_cfg->lookahead_size = 32 * ((lfs_cfg->block_count + 31) / 32);
+ if (lfs_cfg->lookahead_size > LFS_LOOKAHEAD_MAX)
+ {
+ lfs_cfg->lookahead_size = LFS_LOOKAHEAD_MAX;
+ }
+#ifdef LFS_THREADSAFE
+ lfs_cfg->lock = _lfs_lock;
+ lfs_cfg->unlock = _lfs_unlock;
+#endif
+ lfs_cfg->read = _lfs_flash_read;
+ lfs_cfg->prog = _lfs_flash_prog;
+ lfs_cfg->erase = _lfs_flash_erase;
+ lfs_cfg->sync = _lfs_flash_sync;
+}
+
+static int _dfs_lfs_mount(struct dfs_filesystem* dfs, unsigned long rwflag, const void* data)
+{
+ int result;
+ int index;
+ dfs_lfs_t* dfs_lfs;
+
+ /* Check Device Type */
+ if (dfs->dev_id->type != RT_Device_Class_MTD)
+ {
+ rt_kprintf("The flash device type must be MTD!\n");
+ return -EINVAL;
+ }
+
+ /* get an empty position */
+ index = _get_disk(RT_NULL);
+ if (index == -1)
+ {
+ return -EIO;
+ }
+
+ /*create lfs handle */
+ dfs_lfs = (dfs_lfs_t*)rt_malloc(sizeof(dfs_lfs_t));
+ if (dfs_lfs == RT_NULL)
+ {
+ rt_kprintf("ERROR:no memory!\n");
+ return -ENOMEM;
+ }
+ rt_memset(dfs_lfs, 0, sizeof(dfs_lfs_t));
+ rt_mutex_init(&dfs_lfs->lock, "lfslock", RT_IPC_FLAG_PRIO);
+ _lfs_load_config(&dfs_lfs->cfg, (struct rt_mtd_nor_device*)dfs->dev_id);
+
+ /* mount lfs*/
+ result = lfs_mount(&dfs_lfs->lfs, &dfs_lfs->cfg);
+ if (result != LFS_ERR_OK)
+ {
+ rt_mutex_detach(&dfs_lfs->lock);
+ /* release memory */
+ rt_free(dfs_lfs);
+
+ return -EIO;
+ }
+
+ /* mount succeed! */
+ dfs->data = (void*)dfs_lfs;
+ _lfs_mount_tbl[index] = dfs_lfs;
+ return RT_EOK;
+}
+
+static int _dfs_lfs_unmount(struct dfs_filesystem* dfs)
+{
+ int result;
+ int index;
+ dfs_lfs_t* dfs_lfs;
+
+ RT_ASSERT(dfs != RT_NULL);
+ RT_ASSERT(dfs->data != RT_NULL);
+
+ /* find the device index and then umount it */
+ index = _get_disk(dfs->dev_id);
+ if (index == -1)
+ {
+ return -ENOENT;
+ }
+ _lfs_mount_tbl[index] = RT_NULL;
+
+ dfs_lfs = (dfs_lfs_t*)dfs->data;
+ dfs->data = RT_NULL;
+
+ result = lfs_unmount(&dfs_lfs->lfs);
+ rt_mutex_detach(&dfs_lfs->lock);
+ rt_free(dfs_lfs);
+
+ return _lfs_result_to_dfs(result);
+}
+
+#ifndef LFS_READONLY
+static int DFS_LFS_MKFS(rt_device_t dev_id, const char *fs_name)
+{
+ int result;
+ int index;
+ dfs_lfs_t* dfs_lfs;
+
+ if (dev_id == RT_NULL)
+ {
+ return -EINVAL;
+ }
+
+ /* Check Device Type */
+ if (dev_id->type != RT_Device_Class_MTD)
+ {
+ rt_kprintf("The flash device type must be MTD!\n");
+ return -EINVAL;
+ }
+
+ index = _get_disk(dev_id);
+ if (index == -1)
+ {
+ /* create lfs handle */
+ dfs_lfs = rt_malloc(sizeof(dfs_lfs_t));
+ if (dfs_lfs == RT_NULL)
+ {
+ rt_kprintf("ERROR:no memory!\n");
+ return -ENOMEM;
+ }
+ rt_memset(dfs_lfs, 0, sizeof(dfs_lfs_t));
+ rt_mutex_init(&dfs_lfs->lock, "lfslock", RT_IPC_FLAG_PRIO);
+ _lfs_load_config(&dfs_lfs->cfg, (struct rt_mtd_nor_device*)dev_id);
+
+ /* format flash device */
+ result = lfs_format(&dfs_lfs->lfs, &dfs_lfs->cfg);
+ rt_mutex_detach(&dfs_lfs->lock);
+ rt_free(dfs_lfs);
+ return _lfs_result_to_dfs(result);
+ }
+
+ dfs_lfs = _lfs_mount_tbl[index];
+
+ /* unmount it */
+ result = lfs_unmount(&dfs_lfs->lfs);
+ if (result != LFS_ERR_OK)
+ {
+ return _lfs_result_to_dfs(result);
+ }
+
+ _lfs_mount_tbl[index] = RT_NULL;
+
+ /* format flash device */
+ result = lfs_format(&dfs_lfs->lfs, &dfs_lfs->cfg);
+ if (result != LFS_ERR_OK)
+ {
+ return _lfs_result_to_dfs(result);
+ }
+
+ _lfs_load_config(&dfs_lfs->cfg, (struct rt_mtd_nor_device*)dev_id);
+
+ /* mount lfs*/
+ result = lfs_mount(&dfs_lfs->lfs, &dfs_lfs->cfg);
+ if (result == LFS_ERR_OK)
+ {
+ _lfs_mount_tbl[index] = dfs_lfs;
+ }
+
+ return _lfs_result_to_dfs(result);
+}
+#endif
+
+static int _dfs_lfs_statfs_count(void* p, lfs_block_t b)
+{
+ *(lfs_size_t*)p += 1;
+ return 0;
+}
+
+static int _dfs_lfs_statfs(struct dfs_filesystem* dfs, struct statfs* buf)
+{
+ dfs_lfs_t* dfs_lfs;
+ int result;
+ lfs_size_t in_use = 0;
+
+ RT_ASSERT(buf != RT_NULL);
+ RT_ASSERT(dfs != RT_NULL);
+ RT_ASSERT(dfs->data != RT_NULL);
+
+ dfs_lfs = (dfs_lfs_t*)dfs->data;
+
+ /* Get total sectors and free sectors */
+ result = lfs_fs_traverse(&dfs_lfs->lfs, _dfs_lfs_statfs_count, &in_use);
+ if (result != LFS_ERR_OK)
+ {
+ return _lfs_result_to_dfs(result);
+ }
+
+ buf->f_bsize = dfs_lfs->cfg.block_size;
+ buf->f_blocks = dfs_lfs->cfg.block_count;
+ buf->f_bfree = dfs_lfs->cfg.block_count - in_use;
+
+ return RT_EOK;
+}
+
+#ifndef LFS_READONLY
+static int _dfs_lfs_unlink(struct dfs_filesystem* dfs, const char* path)
+{
+ dfs_lfs_t* dfs_lfs;
+ int result;
+
+ RT_ASSERT(dfs != RT_NULL);
+ RT_ASSERT(dfs->data != RT_NULL);
+
+ dfs_lfs = (dfs_lfs_t*)dfs->data;
+ result = lfs_remove(&dfs_lfs->lfs, path);
+
+ return _lfs_result_to_dfs(result);
+}
+#endif
+
+static void _dfs_lfs_tostat(struct stat* st, struct lfs_info* info, time_t mtime)
+{
+ memset(st, 0, sizeof(struct stat));
+
+ /* convert to dfs stat structure */
+ st->st_dev = 0;
+ st->st_size = info->size;
+ st->st_mode = S_IRWXU | S_IRWXG | S_IRWXO;
+
+ switch (info->type)
+ {
+ case LFS_TYPE_DIR:
+ st->st_mode |= S_IFDIR;
+ break;
+
+ case LFS_TYPE_REG:
+ st->st_mode |= S_IFREG;
+ break;
+ }
+
+ st->st_mtime = mtime;
+}
+
+static int _dfs_lfs_stat(struct dfs_filesystem* dfs, const char* path, struct stat* st)
+{
+ dfs_lfs_t* dfs_lfs;
+ int result;
+ struct lfs_info info;
+
+ RT_ASSERT(dfs != RT_NULL);
+ RT_ASSERT(dfs->data != RT_NULL);
+
+ dfs_lfs = (dfs_lfs_t*)dfs->data;
+ result = lfs_stat(&dfs_lfs->lfs, path, &info);
+
+ if (result != LFS_ERR_OK)
+ {
+ return _lfs_result_to_dfs(result);
+ }
+
+ time_t mtime = 0;
+ lfs_getattr(&dfs_lfs->lfs, path, ATTR_TIMESTAMP, &mtime, sizeof(time_t));
+
+ _dfs_lfs_tostat(st, &info, mtime);
+ return 0;
+}
+
+#ifndef LFS_READONLY
+static int _dfs_lfs_rename(struct dfs_filesystem* dfs, const char* from, const char* to)
+{
+ dfs_lfs_t* dfs_lfs;
+ int result;
+
+ RT_ASSERT(dfs != RT_NULL);
+ RT_ASSERT(dfs->data != RT_NULL);
+
+ dfs_lfs = (dfs_lfs_t*)dfs->data;
+ result = lfs_rename(&dfs_lfs->lfs, from, to);
+
+ return _lfs_result_to_dfs(result);
+}
+#endif
+
+/******************************************************************************
+ * file operations
+ ******************************************************************************/
+static int _dfs_lfs_open(struct dfs_file* file)
+{
+ struct dfs_filesystem* dfs;
+ dfs_lfs_t* dfs_lfs;
+ int result;
+ int flags = 0;
+
+ RT_ASSERT(file != RT_NULL);
+
+ dfs = (struct dfs_filesystem*)file->vnode->fs;
+
+ RT_ASSERT(file->vnode->ref_count > 0);
+ if (file->vnode->ref_count > 1)
+ {
+ if (file->vnode->type == FT_DIRECTORY
+ && !(file->flags & O_DIRECTORY))
+ {
+ return -ENOENT;
+ }
+ file->pos = 0;
+ return 0;
+ }
+
+ dfs_lfs = (dfs_lfs_t*)dfs->data;
+
+ if (file->flags & O_DIRECTORY)
+ {
+ dfs_lfs_fd_t* dfs_lfs_fd = rt_malloc(sizeof(dfs_lfs_fd_t));
+ if (dfs_lfs_fd == RT_NULL)
+ {
+ rt_kprintf("ERROR:no memory!\n");
+ result = -ENOMEM;
+
+ goto _error_dir;
+ }
+ rt_memset(dfs_lfs_fd, 0, sizeof(dfs_lfs_fd_t));
+ dfs_lfs_fd->lfs = &dfs_lfs->lfs;
+
+ if (file->flags & O_CREAT)
+ {
+#ifndef LFS_READONLY
+ result = lfs_mkdir(dfs_lfs_fd->lfs, file->vnode->path);
+#else
+ result = -EINVAL;
+#endif
+ if (result != LFS_ERR_OK)
+ {
+ goto _error_dir;
+ }
+ else
+ {
+ time_t now = time(RT_NULL);
+ lfs_setattr(dfs_lfs_fd->lfs, file->vnode->path, ATTR_TIMESTAMP, &now, sizeof(time_t));
+ }
+ }
+
+ result = lfs_dir_open(dfs_lfs_fd->lfs, &dfs_lfs_fd->u.dir, file->vnode->path);
+ if (result != LFS_ERR_OK)
+ {
+ goto _error_dir;
+ }
+ else
+ {
+ file->data = (void*)dfs_lfs_fd;
+ return RT_EOK;
+ }
+
+ _error_dir:
+ if (dfs_lfs_fd != RT_NULL)
+ {
+ rt_free(dfs_lfs_fd);
+ }
+
+ return _lfs_result_to_dfs(result);
+ }
+ else
+ {
+ dfs_lfs_fd_t* dfs_lfs_fd = rt_malloc(sizeof(dfs_lfs_fd_t));
+ if (dfs_lfs_fd == RT_NULL)
+ {
+ rt_kprintf("ERROR:no memory!\n");
+ result = -ENOMEM;
+
+ goto _error_file;
+ }
+ rt_memset(dfs_lfs_fd, 0, sizeof(dfs_lfs_fd_t));
+ dfs_lfs_fd->lfs = &dfs_lfs->lfs;
+
+ if ((file->flags & 3) == O_RDONLY)
+ flags |= LFS_O_RDONLY;
+ if ((file->flags & 3) == O_WRONLY)
+ flags |= LFS_O_WRONLY;
+ if ((file->flags & 3) == O_RDWR)
+ flags |= LFS_O_RDWR;
+ if (file->flags & O_CREAT)
+ flags |= LFS_O_CREAT;
+ if (file->flags & O_EXCL)
+ flags |= LFS_O_EXCL;
+ if (file->flags & O_TRUNC)
+ flags |= LFS_O_TRUNC;
+ if (file->flags & O_APPEND)
+ flags |= LFS_O_APPEND;
+
+ result = lfs_file_open(dfs_lfs_fd->lfs, &dfs_lfs_fd->u.file, file->vnode->path, flags);
+ if (result != LFS_ERR_OK)
+ {
+ goto _error_file;
+ }
+ else
+ {
+ file->data = (void*)dfs_lfs_fd;
+ file->pos = dfs_lfs_fd->u.file.pos;
+ file->vnode->size = dfs_lfs_fd->u.file.ctz.size;
+ return RT_EOK;
+ }
+
+ _error_file:
+ if (dfs_lfs_fd != RT_NULL)
+ {
+ rt_free(dfs_lfs_fd);
+ }
+
+ return _lfs_result_to_dfs(result);
+ }
+}
+
+static int _dfs_lfs_close(struct dfs_file* file)
+{
+ int result;
+ dfs_lfs_fd_t* dfs_lfs_fd;
+ uint8_t need_time_update;
+ RT_ASSERT(file != RT_NULL);
+ RT_ASSERT(file->data != RT_NULL);
+
+ RT_ASSERT(file->vnode->ref_count > 0);
+ if (file->vnode->ref_count > 1)
+ {
+ return 0;
+ }
+
+ dfs_lfs_fd = (dfs_lfs_fd_t*)file->data;
+
+ if (file->vnode->type == FT_DIRECTORY)
+ {
+ result = lfs_dir_close(dfs_lfs_fd->lfs, &dfs_lfs_fd->u.dir);
+ }
+ else
+ {
+ need_time_update = (dfs_lfs_fd->u.file.flags & LFS_F_DIRTY) || (dfs_lfs_fd->u.file.flags & LFS_F_WRITING);
+ result = lfs_file_close(dfs_lfs_fd->lfs, &dfs_lfs_fd->u.file);
+ if (result == LFS_ERR_OK && need_time_update)
+ {
+ time_t now = time(RT_NULL);
+ lfs_setattr(dfs_lfs_fd->lfs, file->vnode->path, ATTR_TIMESTAMP, &now, sizeof(time_t));
+ }
+ }
+
+ rt_free(dfs_lfs_fd);
+
+ return _lfs_result_to_dfs(result);
+}
+
+static int _dfs_lfs_ioctl(struct dfs_file* file, int cmd, void* args)
+{
+ return -ENOSYS;
+}
+
+static DFS_LFS_RW_RETURN_TYPE _dfs_lfs_read(struct dfs_file* file, void* buf, size_t len)
+{
+ lfs_ssize_t ssize;
+ dfs_lfs_fd_t* dfs_lfs_fd;
+
+ RT_ASSERT(file != RT_NULL);
+ RT_ASSERT(file->data != RT_NULL);
+
+ if (file->vnode->type == FT_DIRECTORY)
+ {
+ return -EISDIR;
+ }
+
+ dfs_lfs_fd = (dfs_lfs_fd_t*)file->data;
+
+#if 0
+ if (lfs_file_tell(dfs_lfs_fd->lfs, &dfs_lfs_fd->u.file) != file->pos)
+ {
+ lfs_soff_t soff = lfs_file_seek(dfs_lfs_fd->lfs, &dfs_lfs_fd->u.file, file->pos, LFS_SEEK_SET);
+ if (soff < 0)
+ {
+ return _lfs_result_to_dfs(soff);
+ }
+ }
+#endif
+
+ ssize = lfs_file_read(dfs_lfs_fd->lfs, &dfs_lfs_fd->u.file, buf, len);
+ if (ssize < 0)
+ {
+ return _lfs_result_to_dfs(ssize);
+ }
+
+ /* update position */
+ file->pos = dfs_lfs_fd->u.file.pos;
+
+ return ssize;
+}
+
+#ifndef LFS_READONLY
+static DFS_LFS_RW_RETURN_TYPE _dfs_lfs_write(struct dfs_file* file, const void* buf, size_t len)
+{
+ lfs_ssize_t ssize;
+ dfs_lfs_fd_t* dfs_lfs_fd;
+ RT_ASSERT(file != RT_NULL);
+ RT_ASSERT(file->data != RT_NULL);
+
+ if (file->vnode->type == FT_DIRECTORY)
+ {
+ return -EISDIR;
+ }
+
+ dfs_lfs_fd = (dfs_lfs_fd_t*)file->data;
+
+#if 0
+ if (lfs_file_tell(dfs_lfs_fd->lfs, &dfs_lfs_fd->u.file) != file->pos)
+ {
+ lfs_soff_t soff = lfs_file_seek(dfs_lfs_fd->lfs, &dfs_lfs_fd->u.file, file->pos, LFS_SEEK_SET);
+ if (soff < 0)
+ {
+ return _lfs_result_to_dfs(soff);
+ }
+ }
+#endif
+
+ ssize = lfs_file_write(dfs_lfs_fd->lfs, &dfs_lfs_fd->u.file, buf, len);
+ if (ssize < 0)
+ {
+ return _lfs_result_to_dfs(ssize);
+ }
+
+ /* update position and file size */
+ file->pos = dfs_lfs_fd->u.file.pos;
+ file->vnode->size = dfs_lfs_fd->u.file.ctz.size;
+
+ return ssize;
+}
+#endif
+
+static int _dfs_lfs_flush(struct dfs_file* file)
+{
+ int result;
+ dfs_lfs_fd_t* dfs_lfs_fd;
+ uint8_t need_time_update;
+
+ RT_ASSERT(file != RT_NULL);
+ RT_ASSERT(file->data != RT_NULL);
+
+ dfs_lfs_fd = (dfs_lfs_fd_t*)file->data;
+ need_time_update = (dfs_lfs_fd->u.file.flags & LFS_F_DIRTY) || (dfs_lfs_fd->u.file.flags & LFS_F_WRITING);
+ result = lfs_file_sync(dfs_lfs_fd->lfs, &dfs_lfs_fd->u.file);
+ if (result == LFS_ERR_OK && need_time_update)
+ {
+ time_t now = time(RT_NULL);
+ lfs_setattr(dfs_lfs_fd->lfs, file->vnode->path, ATTR_TIMESTAMP, &now, sizeof(time_t));
+ }
+
+ return _lfs_result_to_dfs(result);
+}
+
+static DFS_LFS_LSK_RETURN_TYPE _dfs_lfs_lseek(struct dfs_file* file, rt_off_t offset)
+{
+ dfs_lfs_fd_t* dfs_lfs_fd;
+
+ RT_ASSERT(file != RT_NULL);
+ RT_ASSERT(file->data != RT_NULL);
+
+ dfs_lfs_fd = (dfs_lfs_fd_t*)file->data;
+
+ if (file->vnode->type == FT_REGULAR)
+ {
+ lfs_soff_t soff = lfs_file_seek(dfs_lfs_fd->lfs, &dfs_lfs_fd->u.file, offset, LFS_SEEK_SET);
+ if (soff < 0)
+ {
+ return _lfs_result_to_dfs(soff);
+ }
+
+ file->pos = dfs_lfs_fd->u.file.pos;
+ }
+ else if (file->vnode->type == FT_DIRECTORY)
+ {
+ lfs_soff_t soff = lfs_dir_seek(dfs_lfs_fd->lfs, &dfs_lfs_fd->u.dir, offset);
+ if (soff < 0)
+ {
+ return _lfs_result_to_dfs(soff);
+ }
+
+ file->pos = dfs_lfs_fd->u.dir.pos;
+ }
+
+ return (file->pos);
+}
+
+static int _dfs_lfs_getdents(struct dfs_file* file, struct dirent* dirp, uint32_t count)
+{
+ dfs_lfs_fd_t* dfs_lfs_fd;
+ int result;
+ int index;
+ struct dirent* d;
+ struct lfs_info info;
+
+ RT_ASSERT(file->data != RT_NULL);
+
+ dfs_lfs_fd = (dfs_lfs_fd_t*)(file->data);
+
+ /* make integer count */
+ count = (count / sizeof(struct dirent)) * sizeof(struct dirent);
+ if (count == 0)
+ {
+ return -EINVAL;
+ }
+
+ index = 0;
+ while (1)
+ {
+ d = dirp + index;
+
+ result = lfs_dir_read(dfs_lfs_fd->lfs, &dfs_lfs_fd->u.dir, &info);
+ if ((result != 1) || (info.name[0] == 0))
+ {
+ break;
+ }
+
+ if (rt_strcmp(info.name, ".") == 0)
+ {
+ continue;
+ }
+ else if (rt_strcmp(info.name, "..") == 0)
+ {
+ continue;
+ }
+
+ d->d_type = DT_UNKNOWN;
+
+ switch (info.type)
+ {
+ case LFS_TYPE_DIR:
+ d->d_type |= DT_DIR;
+ break;
+
+ case LFS_TYPE_REG:
+ d->d_type |= DT_REG;
+ break;
+ }
+
+ d->d_namlen = (rt_uint8_t)rt_strlen(info.name);
+ d->d_reclen = (rt_uint16_t)sizeof(struct dirent);
+ rt_strncpy(d->d_name, info.name, DFS_PATH_MAX);
+
+ index++;
+ if (index * sizeof(struct dirent) >= count)
+ {
+ break;
+ }
+ }
+
+ if (index == 0)
+ {
+ return _lfs_result_to_dfs(result);
+ }
+
+ file->pos += index * sizeof(struct dirent);
+
+ return index * sizeof(struct dirent);
+}
+
+static const struct dfs_file_ops _dfs_lfs_fops = {
+ _dfs_lfs_open,
+ _dfs_lfs_close,
+ _dfs_lfs_ioctl,
+ _dfs_lfs_read,
+#ifndef LFS_READONLY
+ _dfs_lfs_write,
+#else
+ NULL,
+#endif
+ _dfs_lfs_flush,
+ _dfs_lfs_lseek,
+ _dfs_lfs_getdents,
+ // RT_NULL, /* poll interface */
+};
+
+static const struct dfs_filesystem_ops _dfs_lfs_ops = {
+ "lfs",
+ DFS_FS_FLAG_DEFAULT,
+ &_dfs_lfs_fops,
+ _dfs_lfs_mount,
+ _dfs_lfs_unmount,
+#ifndef LFS_READONLY
+ _dfs_lfs_mkfs,
+#else
+ NULL,
+#endif
+ _dfs_lfs_statfs,
+#ifndef LFS_READONLY
+ _dfs_lfs_unlink,
+#else
+ NULL,
+#endif
+ _dfs_lfs_stat,
+#ifndef LFS_READONLY
+ _dfs_lfs_rename,
+#else
+ NULL,
+#endif
+};
+
+int dfs_lfs_init(void)
+{
+ /* register ram file system */
+ return dfs_register(&_dfs_lfs_ops);
+}
+INIT_COMPONENT_EXPORT(dfs_lfs_init);
diff --git a/packages/littlefs-v2.11.2/lfs.c b/packages/littlefs-v2.11.2/lfs.c
new file mode 100644
index 0000000..da4bfca
--- /dev/null
+++ b/packages/littlefs-v2.11.2/lfs.c
@@ -0,0 +1,6549 @@
+/*
+ * The little filesystem
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include "lfs.h"
+#include "lfs_util.h"
+
+
+// some constants used throughout the code
+#define LFS_BLOCK_NULL ((lfs_block_t)-1)
+#define LFS_BLOCK_INLINE ((lfs_block_t)-2)
+
+enum {
+ LFS_OK_RELOCATED = 1,
+ LFS_OK_DROPPED = 2,
+ LFS_OK_ORPHANED = 3,
+};
+
+enum {
+ LFS_CMP_EQ = 0,
+ LFS_CMP_LT = 1,
+ LFS_CMP_GT = 2,
+};
+
+
+/// Caching block device operations ///
+
+static inline void lfs_cache_drop(lfs_t *lfs, lfs_cache_t *rcache) {
+ // do not zero, cheaper if cache is readonly or only going to be
+ // written with identical data (during relocates)
+ (void)lfs;
+ rcache->block = LFS_BLOCK_NULL;
+}
+
+static inline void lfs_cache_zero(lfs_t *lfs, lfs_cache_t *pcache) {
+ // zero to avoid information leak
+ memset(pcache->buffer, 0xff, lfs->cfg->cache_size);
+ pcache->block = LFS_BLOCK_NULL;
+}
+
+static int lfs_bd_read(lfs_t *lfs,
+ const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
+ lfs_block_t block, lfs_off_t off,
+ void *buffer, lfs_size_t size) {
+ uint8_t *data = buffer;
+ if (off+size > lfs->cfg->block_size
+ || (lfs->block_count && block >= lfs->block_count)) {
+ return LFS_ERR_CORRUPT;
+ }
+
+ while (size > 0) {
+ lfs_size_t diff = size;
+
+ if (pcache && block == pcache->block &&
+ off < pcache->off + pcache->size) {
+ if (off >= pcache->off) {
+ // is already in pcache?
+ diff = lfs_min(diff, pcache->size - (off-pcache->off));
+ memcpy(data, &pcache->buffer[off-pcache->off], diff);
+
+ data += diff;
+ off += diff;
+ size -= diff;
+ continue;
+ }
+
+ // pcache takes priority
+ diff = lfs_min(diff, pcache->off-off);
+ }
+
+ if (block == rcache->block &&
+ off < rcache->off + rcache->size) {
+ if (off >= rcache->off) {
+ // is already in rcache?
+ diff = lfs_min(diff, rcache->size - (off-rcache->off));
+ memcpy(data, &rcache->buffer[off-rcache->off], diff);
+
+ data += diff;
+ off += diff;
+ size -= diff;
+ continue;
+ }
+
+ // rcache takes priority
+ diff = lfs_min(diff, rcache->off-off);
+ }
+
+ if (size >= hint && off % lfs->cfg->read_size == 0 &&
+ size >= lfs->cfg->read_size) {
+ // bypass cache?
+ diff = lfs_aligndown(diff, lfs->cfg->read_size);
+ int err = lfs->cfg->read(lfs->cfg, block, off, data, diff);
+ LFS_ASSERT(err <= 0);
+ if (err) {
+ return err;
+ }
+
+ data += diff;
+ off += diff;
+ size -= diff;
+ continue;
+ }
+
+ // load to cache, first condition can no longer fail
+ LFS_ASSERT(!lfs->block_count || block < lfs->block_count);
+ rcache->block = block;
+ rcache->off = lfs_aligndown(off, lfs->cfg->read_size);
+ rcache->size = lfs_min(
+ lfs_min(
+ lfs_alignup(off+hint, lfs->cfg->read_size),
+ lfs->cfg->block_size)
+ - rcache->off,
+ lfs->cfg->cache_size);
+ int err = lfs->cfg->read(lfs->cfg, rcache->block,
+ rcache->off, rcache->buffer, rcache->size);
+ LFS_ASSERT(err <= 0);
+ if (err) {
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int lfs_bd_cmp(lfs_t *lfs,
+ const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
+ lfs_block_t block, lfs_off_t off,
+ const void *buffer, lfs_size_t size) {
+ const uint8_t *data = buffer;
+ lfs_size_t diff = 0;
+
+ for (lfs_off_t i = 0; i < size; i += diff) {
+ uint8_t dat[8];
+
+ diff = lfs_min(size-i, sizeof(dat));
+ int err = lfs_bd_read(lfs,
+ pcache, rcache, hint-i,
+ block, off+i, &dat, diff);
+ if (err) {
+ return err;
+ }
+
+ int res = memcmp(dat, data + i, diff);
+ if (res) {
+ return res < 0 ? LFS_CMP_LT : LFS_CMP_GT;
+ }
+ }
+
+ return LFS_CMP_EQ;
+}
+
+static int lfs_bd_crc(lfs_t *lfs,
+ const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
+ lfs_block_t block, lfs_off_t off, lfs_size_t size, uint32_t *crc) {
+ lfs_size_t diff = 0;
+
+ for (lfs_off_t i = 0; i < size; i += diff) {
+ uint8_t dat[8];
+ diff = lfs_min(size-i, sizeof(dat));
+ int err = lfs_bd_read(lfs,
+ pcache, rcache, hint-i,
+ block, off+i, &dat, diff);
+ if (err) {
+ return err;
+ }
+
+ *crc = lfs_crc(*crc, &dat, diff);
+ }
+
+ return 0;
+}
+
+#ifndef LFS_READONLY
+static int lfs_bd_flush(lfs_t *lfs,
+ lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
+ if (pcache->block != LFS_BLOCK_NULL && pcache->block != LFS_BLOCK_INLINE) {
+ LFS_ASSERT(pcache->block < lfs->block_count);
+ lfs_size_t diff = lfs_alignup(pcache->size, lfs->cfg->prog_size);
+ int err = lfs->cfg->prog(lfs->cfg, pcache->block,
+ pcache->off, pcache->buffer, diff);
+ LFS_ASSERT(err <= 0);
+ if (err) {
+ return err;
+ }
+
+ if (validate) {
+ // check data on disk
+ lfs_cache_drop(lfs, rcache);
+ int res = lfs_bd_cmp(lfs,
+ NULL, rcache, diff,
+ pcache->block, pcache->off, pcache->buffer, diff);
+ if (res < 0) {
+ return res;
+ }
+
+ if (res != LFS_CMP_EQ) {
+ return LFS_ERR_CORRUPT;
+ }
+ }
+
+ lfs_cache_zero(lfs, pcache);
+ }
+
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_bd_sync(lfs_t *lfs,
+ lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
+ lfs_cache_drop(lfs, rcache);
+
+ int err = lfs_bd_flush(lfs, pcache, rcache, validate);
+ if (err) {
+ return err;
+ }
+
+ err = lfs->cfg->sync(lfs->cfg);
+ LFS_ASSERT(err <= 0);
+ return err;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_bd_prog(lfs_t *lfs,
+ lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate,
+ lfs_block_t block, lfs_off_t off,
+ const void *buffer, lfs_size_t size) {
+ const uint8_t *data = buffer;
+ LFS_ASSERT(block == LFS_BLOCK_INLINE || block < lfs->block_count);
+ LFS_ASSERT(off + size <= lfs->cfg->block_size);
+
+ while (size > 0) {
+ if (block == pcache->block &&
+ off >= pcache->off &&
+ off < pcache->off + lfs->cfg->cache_size) {
+ // already fits in pcache?
+ lfs_size_t diff = lfs_min(size,
+ lfs->cfg->cache_size - (off-pcache->off));
+ memcpy(&pcache->buffer[off-pcache->off], data, diff);
+
+ data += diff;
+ off += diff;
+ size -= diff;
+
+ pcache->size = lfs_max(pcache->size, off - pcache->off);
+ if (pcache->size == lfs->cfg->cache_size) {
+ // eagerly flush out pcache if we fill up
+ int err = lfs_bd_flush(lfs, pcache, rcache, validate);
+ if (err) {
+ return err;
+ }
+ }
+
+ continue;
+ }
+
+ // pcache must have been flushed, either by programming and
+ // entire block or manually flushing the pcache
+ LFS_ASSERT(pcache->block == LFS_BLOCK_NULL);
+
+ // prepare pcache, first condition can no longer fail
+ pcache->block = block;
+ pcache->off = lfs_aligndown(off, lfs->cfg->prog_size);
+ pcache->size = 0;
+ }
+
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_bd_erase(lfs_t *lfs, lfs_block_t block) {
+ LFS_ASSERT(block < lfs->block_count);
+ int err = lfs->cfg->erase(lfs->cfg, block);
+ LFS_ASSERT(err <= 0);
+ return err;
+}
+#endif
+
+
+/// Small type-level utilities ///
+
+// some operations on paths
+static inline lfs_size_t lfs_path_namelen(const char *path) {
+ return strcspn(path, "/");
+}
+
+static inline bool lfs_path_islast(const char *path) {
+ lfs_size_t namelen = lfs_path_namelen(path);
+ return path[namelen + strspn(path + namelen, "/")] == '\0';
+}
+
+static inline bool lfs_path_isdir(const char *path) {
+ return path[lfs_path_namelen(path)] != '\0';
+}
+
+// operations on block pairs
+static inline void lfs_pair_swap(lfs_block_t pair[2]) {
+ lfs_block_t t = pair[0];
+ pair[0] = pair[1];
+ pair[1] = t;
+}
+
+static inline bool lfs_pair_isnull(const lfs_block_t pair[2]) {
+ return pair[0] == LFS_BLOCK_NULL || pair[1] == LFS_BLOCK_NULL;
+}
+
+static inline int lfs_pair_cmp(
+ const lfs_block_t paira[2],
+ const lfs_block_t pairb[2]) {
+ return !(paira[0] == pairb[0] || paira[1] == pairb[1] ||
+ paira[0] == pairb[1] || paira[1] == pairb[0]);
+}
+
+static inline bool lfs_pair_issync(
+ const lfs_block_t paira[2],
+ const lfs_block_t pairb[2]) {
+ return (paira[0] == pairb[0] && paira[1] == pairb[1]) ||
+ (paira[0] == pairb[1] && paira[1] == pairb[0]);
+}
+
+static inline void lfs_pair_fromle32(lfs_block_t pair[2]) {
+ pair[0] = lfs_fromle32(pair[0]);
+ pair[1] = lfs_fromle32(pair[1]);
+}
+
+#ifndef LFS_READONLY
+static inline void lfs_pair_tole32(lfs_block_t pair[2]) {
+ pair[0] = lfs_tole32(pair[0]);
+ pair[1] = lfs_tole32(pair[1]);
+}
+#endif
+
+// operations on 32-bit entry tags
+typedef uint32_t lfs_tag_t;
+typedef int32_t lfs_stag_t;
+
+#define LFS_MKTAG(type, id, size) \
+ (((lfs_tag_t)(type) << 20) | ((lfs_tag_t)(id) << 10) | (lfs_tag_t)(size))
+
+#define LFS_MKTAG_IF(cond, type, id, size) \
+ ((cond) ? LFS_MKTAG(type, id, size) : LFS_MKTAG(LFS_FROM_NOOP, 0, 0))
+
+#define LFS_MKTAG_IF_ELSE(cond, type1, id1, size1, type2, id2, size2) \
+ ((cond) ? LFS_MKTAG(type1, id1, size1) : LFS_MKTAG(type2, id2, size2))
+
+static inline bool lfs_tag_isvalid(lfs_tag_t tag) {
+ return !(tag & 0x80000000);
+}
+
+static inline bool lfs_tag_isdelete(lfs_tag_t tag) {
+ return ((int32_t)(tag << 22) >> 22) == -1;
+}
+
+static inline uint16_t lfs_tag_type1(lfs_tag_t tag) {
+ return (tag & 0x70000000) >> 20;
+}
+
+static inline uint16_t lfs_tag_type2(lfs_tag_t tag) {
+ return (tag & 0x78000000) >> 20;
+}
+
+static inline uint16_t lfs_tag_type3(lfs_tag_t tag) {
+ return (tag & 0x7ff00000) >> 20;
+}
+
+static inline uint8_t lfs_tag_chunk(lfs_tag_t tag) {
+ return (tag & 0x0ff00000) >> 20;
+}
+
+static inline int8_t lfs_tag_splice(lfs_tag_t tag) {
+ return (int8_t)lfs_tag_chunk(tag);
+}
+
+static inline uint16_t lfs_tag_id(lfs_tag_t tag) {
+ return (tag & 0x000ffc00) >> 10;
+}
+
+static inline lfs_size_t lfs_tag_size(lfs_tag_t tag) {
+ return tag & 0x000003ff;
+}
+
+static inline lfs_size_t lfs_tag_dsize(lfs_tag_t tag) {
+ return sizeof(tag) + lfs_tag_size(tag + lfs_tag_isdelete(tag));
+}
+
+// operations on attributes in attribute lists
+struct lfs_mattr {
+ lfs_tag_t tag;
+ const void *buffer;
+};
+
+struct lfs_diskoff {
+ lfs_block_t block;
+ lfs_off_t off;
+};
+
+#define LFS_MKATTRS(...) \
+ (struct lfs_mattr[]){__VA_ARGS__}, \
+ sizeof((struct lfs_mattr[]){__VA_ARGS__}) / sizeof(struct lfs_mattr)
+
+// operations on global state
+static inline void lfs_gstate_xor(lfs_gstate_t *a, const lfs_gstate_t *b) {
+ a->tag ^= b->tag;
+ a->pair[0] ^= b->pair[0];
+ a->pair[1] ^= b->pair[1];
+}
+
+static inline bool lfs_gstate_iszero(const lfs_gstate_t *a) {
+ return a->tag == 0
+ && a->pair[0] == 0
+ && a->pair[1] == 0;
+}
+
+#ifndef LFS_READONLY
+static inline bool lfs_gstate_hasorphans(const lfs_gstate_t *a) {
+ return lfs_tag_size(a->tag);
+}
+
+static inline uint8_t lfs_gstate_getorphans(const lfs_gstate_t *a) {
+ return lfs_tag_size(a->tag) & 0x1ff;
+}
+
+static inline bool lfs_gstate_hasmove(const lfs_gstate_t *a) {
+ return lfs_tag_type1(a->tag);
+}
+#endif
+
+static inline bool lfs_gstate_needssuperblock(const lfs_gstate_t *a) {
+ return lfs_tag_size(a->tag) >> 9;
+}
+
+static inline bool lfs_gstate_hasmovehere(const lfs_gstate_t *a,
+ const lfs_block_t *pair) {
+ return lfs_tag_type1(a->tag) && lfs_pair_cmp(a->pair, pair) == 0;
+}
+
+static inline void lfs_gstate_fromle32(lfs_gstate_t *a) {
+ a->tag = lfs_fromle32(a->tag);
+ a->pair[0] = lfs_fromle32(a->pair[0]);
+ a->pair[1] = lfs_fromle32(a->pair[1]);
+}
+
+#ifndef LFS_READONLY
+static inline void lfs_gstate_tole32(lfs_gstate_t *a) {
+ a->tag = lfs_tole32(a->tag);
+ a->pair[0] = lfs_tole32(a->pair[0]);
+ a->pair[1] = lfs_tole32(a->pair[1]);
+}
+#endif
+
+// operations on forward-CRCs used to track erased state
+struct lfs_fcrc {
+ lfs_size_t size;
+ uint32_t crc;
+};
+
+static void lfs_fcrc_fromle32(struct lfs_fcrc *fcrc) {
+ fcrc->size = lfs_fromle32(fcrc->size);
+ fcrc->crc = lfs_fromle32(fcrc->crc);
+}
+
+#ifndef LFS_READONLY
+static void lfs_fcrc_tole32(struct lfs_fcrc *fcrc) {
+ fcrc->size = lfs_tole32(fcrc->size);
+ fcrc->crc = lfs_tole32(fcrc->crc);
+}
+#endif
+
+// other endianness operations
+static void lfs_ctz_fromle32(struct lfs_ctz *ctz) {
+ ctz->head = lfs_fromle32(ctz->head);
+ ctz->size = lfs_fromle32(ctz->size);
+}
+
+#ifndef LFS_READONLY
+static void lfs_ctz_tole32(struct lfs_ctz *ctz) {
+ ctz->head = lfs_tole32(ctz->head);
+ ctz->size = lfs_tole32(ctz->size);
+}
+#endif
+
+static inline void lfs_superblock_fromle32(lfs_superblock_t *superblock) {
+ superblock->version = lfs_fromle32(superblock->version);
+ superblock->block_size = lfs_fromle32(superblock->block_size);
+ superblock->block_count = lfs_fromle32(superblock->block_count);
+ superblock->name_max = lfs_fromle32(superblock->name_max);
+ superblock->file_max = lfs_fromle32(superblock->file_max);
+ superblock->attr_max = lfs_fromle32(superblock->attr_max);
+}
+
+#ifndef LFS_READONLY
+static inline void lfs_superblock_tole32(lfs_superblock_t *superblock) {
+ superblock->version = lfs_tole32(superblock->version);
+ superblock->block_size = lfs_tole32(superblock->block_size);
+ superblock->block_count = lfs_tole32(superblock->block_count);
+ superblock->name_max = lfs_tole32(superblock->name_max);
+ superblock->file_max = lfs_tole32(superblock->file_max);
+ superblock->attr_max = lfs_tole32(superblock->attr_max);
+}
+#endif
+
+#ifndef LFS_NO_ASSERT
+static bool lfs_mlist_isopen(struct lfs_mlist *head,
+ struct lfs_mlist *node) {
+ for (struct lfs_mlist **p = &head; *p; p = &(*p)->next) {
+ if (*p == (struct lfs_mlist*)node) {
+ return true;
+ }
+ }
+
+ return false;
+}
+#endif
+
+static void lfs_mlist_remove(lfs_t *lfs, struct lfs_mlist *mlist) {
+ for (struct lfs_mlist **p = &lfs->mlist; *p; p = &(*p)->next) {
+ if (*p == mlist) {
+ *p = (*p)->next;
+ break;
+ }
+ }
+}
+
+static void lfs_mlist_append(lfs_t *lfs, struct lfs_mlist *mlist) {
+ mlist->next = lfs->mlist;
+ lfs->mlist = mlist;
+}
+
+// some other filesystem operations
+static uint32_t lfs_fs_disk_version(lfs_t *lfs) {
+ (void)lfs;
+#ifdef LFS_MULTIVERSION
+ if (lfs->cfg->disk_version) {
+ return lfs->cfg->disk_version;
+ } else
+#endif
+ {
+ return LFS_DISK_VERSION;
+ }
+}
+
+static uint16_t lfs_fs_disk_version_major(lfs_t *lfs) {
+ return 0xffff & (lfs_fs_disk_version(lfs) >> 16);
+
+}
+
+static uint16_t lfs_fs_disk_version_minor(lfs_t *lfs) {
+ return 0xffff & (lfs_fs_disk_version(lfs) >> 0);
+}
+
+
+/// Internal operations predeclared here ///
+#ifndef LFS_READONLY
+static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
+ const struct lfs_mattr *attrs, int attrcount);
+static int lfs_dir_compact(lfs_t *lfs,
+ lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
+ lfs_mdir_t *source, uint16_t begin, uint16_t end);
+static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
+ const void *buffer, lfs_size_t size);
+static lfs_ssize_t lfs_file_write_(lfs_t *lfs, lfs_file_t *file,
+ const void *buffer, lfs_size_t size);
+static int lfs_file_sync_(lfs_t *lfs, lfs_file_t *file);
+static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file);
+static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file);
+
+static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss);
+static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans);
+static void lfs_fs_prepmove(lfs_t *lfs,
+ uint16_t id, const lfs_block_t pair[2]);
+static int lfs_fs_pred(lfs_t *lfs, const lfs_block_t dir[2],
+ lfs_mdir_t *pdir);
+static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t dir[2],
+ lfs_mdir_t *parent);
+static int lfs_fs_forceconsistency(lfs_t *lfs);
+#endif
+
+static void lfs_fs_prepsuperblock(lfs_t *lfs, bool needssuperblock);
+
+#ifdef LFS_MIGRATE
+static int lfs1_traverse(lfs_t *lfs,
+ int (*cb)(void*, lfs_block_t), void *data);
+#endif
+
+static int lfs_dir_rewind_(lfs_t *lfs, lfs_dir_t *dir);
+
+static lfs_ssize_t lfs_file_flushedread(lfs_t *lfs, lfs_file_t *file,
+ void *buffer, lfs_size_t size);
+static lfs_ssize_t lfs_file_read_(lfs_t *lfs, lfs_file_t *file,
+ void *buffer, lfs_size_t size);
+static int lfs_file_close_(lfs_t *lfs, lfs_file_t *file);
+static lfs_soff_t lfs_file_size_(lfs_t *lfs, lfs_file_t *file);
+
+static lfs_ssize_t lfs_fs_size_(lfs_t *lfs);
+static int lfs_fs_traverse_(lfs_t *lfs,
+ int (*cb)(void *data, lfs_block_t block), void *data,
+ bool includeorphans);
+
+static int lfs_deinit(lfs_t *lfs);
+static int lfs_unmount_(lfs_t *lfs);
+
+
+/// Block allocator ///
+
+// allocations should call this when all allocated blocks are committed to
+// the filesystem
+//
+// after a checkpoint, the block allocator may realloc any untracked blocks
+static void lfs_alloc_ckpoint(lfs_t *lfs) {
+ lfs->lookahead.ckpoint = lfs->block_count;
+}
+
+// drop the lookahead buffer, this is done during mounting and failed
+// traversals in order to avoid invalid lookahead state
+static void lfs_alloc_drop(lfs_t *lfs) {
+ lfs->lookahead.size = 0;
+ lfs->lookahead.next = 0;
+ lfs_alloc_ckpoint(lfs);
+}
+
+#ifndef LFS_READONLY
+static int lfs_alloc_lookahead(void *p, lfs_block_t block) {
+ lfs_t *lfs = (lfs_t*)p;
+ lfs_block_t off = ((block - lfs->lookahead.start)
+ + lfs->block_count) % lfs->block_count;
+
+ if (off < lfs->lookahead.size) {
+ lfs->lookahead.buffer[off / 8] |= 1U << (off % 8);
+ }
+
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_alloc_scan(lfs_t *lfs) {
+ // move lookahead buffer to the first unused block
+ //
+ // note we limit the lookahead buffer to at most the amount of blocks
+ // checkpointed, this prevents the math in lfs_alloc from underflowing
+ lfs->lookahead.start = (lfs->lookahead.start + lfs->lookahead.next)
+ % lfs->block_count;
+ lfs->lookahead.next = 0;
+ lfs->lookahead.size = lfs_min(
+ 8*lfs->cfg->lookahead_size,
+ lfs->lookahead.ckpoint);
+
+ // find mask of free blocks from tree
+ memset(lfs->lookahead.buffer, 0, lfs->cfg->lookahead_size);
+ int err = lfs_fs_traverse_(lfs, lfs_alloc_lookahead, lfs, true);
+ if (err) {
+ lfs_alloc_drop(lfs);
+ return err;
+ }
+
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) {
+ while (true) {
+ // scan our lookahead buffer for free blocks
+ while (lfs->lookahead.next < lfs->lookahead.size) {
+ if (!(lfs->lookahead.buffer[lfs->lookahead.next / 8]
+ & (1U << (lfs->lookahead.next % 8)))) {
+ // found a free block
+ *block = (lfs->lookahead.start + lfs->lookahead.next)
+ % lfs->block_count;
+
+ // eagerly find next free block to maximize how many blocks
+ // lfs_alloc_ckpoint makes available for scanning
+ while (true) {
+ lfs->lookahead.next += 1;
+ lfs->lookahead.ckpoint -= 1;
+
+ if (lfs->lookahead.next >= lfs->lookahead.size
+ || !(lfs->lookahead.buffer[lfs->lookahead.next / 8]
+ & (1U << (lfs->lookahead.next % 8)))) {
+ return 0;
+ }
+ }
+ }
+
+ lfs->lookahead.next += 1;
+ lfs->lookahead.ckpoint -= 1;
+ }
+
+ // In order to keep our block allocator from spinning forever when our
+ // filesystem is full, we mark points where there are no in-flight
+ // allocations with a checkpoint before starting a set of allocations.
+ //
+ // If we've looked at all blocks since the last checkpoint, we report
+ // the filesystem as out of storage.
+ //
+ if (lfs->lookahead.ckpoint <= 0) {
+ LFS_ERROR("No more free space 0x%"PRIx32,
+ (lfs->lookahead.start + lfs->lookahead.next)
+ % lfs->block_count);
+ return LFS_ERR_NOSPC;
+ }
+
+ // No blocks in our lookahead buffer, we need to scan the filesystem for
+ // unused blocks in the next lookahead window.
+ int err = lfs_alloc_scan(lfs);
+ if(err) {
+ return err;
+ }
+ }
+}
+#endif
+
+/// Metadata pair and directory operations ///
+static lfs_stag_t lfs_dir_getslice(lfs_t *lfs, const lfs_mdir_t *dir,
+ lfs_tag_t gmask, lfs_tag_t gtag,
+ lfs_off_t goff, void *gbuffer, lfs_size_t gsize) {
+ lfs_off_t off = dir->off;
+ lfs_tag_t ntag = dir->etag;
+ lfs_stag_t gdiff = 0;
+
+ // synthetic moves
+ if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair) &&
+ lfs_tag_id(gmask) != 0) {
+ if (lfs_tag_id(lfs->gdisk.tag) == lfs_tag_id(gtag)) {
+ return LFS_ERR_NOENT;
+ } else if (lfs_tag_id(lfs->gdisk.tag) < lfs_tag_id(gtag)) {
+ gdiff -= LFS_MKTAG(0, 1, 0);
+ }
+ }
+
+ // iterate over dir block backwards (for faster lookups)
+ while (off >= sizeof(lfs_tag_t) + lfs_tag_dsize(ntag)) {
+ off -= lfs_tag_dsize(ntag);
+ lfs_tag_t tag = ntag;
+ int err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, sizeof(ntag),
+ dir->pair[0], off, &ntag, sizeof(ntag));
+ LFS_ASSERT(err <= 0);
+ if (err) {
+ return err;
+ }
+
+ ntag = (lfs_frombe32(ntag) ^ tag) & 0x7fffffff;
+
+ if (lfs_tag_id(gmask) != 0 &&
+ lfs_tag_type1(tag) == LFS_TYPE_SPLICE &&
+ lfs_tag_id(tag) <= lfs_tag_id(gtag - gdiff)) {
+ if (tag == (LFS_MKTAG(LFS_TYPE_CREATE, 0, 0) |
+ (LFS_MKTAG(0, 0x3ff, 0) & (gtag - gdiff)))) {
+ // found where we were created
+ return LFS_ERR_NOENT;
+ }
+
+ // move around splices
+ gdiff += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
+ }
+
+ if ((gmask & tag) == (gmask & (gtag - gdiff))) {
+ if (lfs_tag_isdelete(tag)) {
+ return LFS_ERR_NOENT;
+ }
+
+ lfs_size_t diff = lfs_min(lfs_tag_size(tag), gsize);
+ err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, diff,
+ dir->pair[0], off+sizeof(tag)+goff, gbuffer, diff);
+ LFS_ASSERT(err <= 0);
+ if (err) {
+ return err;
+ }
+
+ memset((uint8_t*)gbuffer + diff, 0, gsize - diff);
+
+ return tag + gdiff;
+ }
+ }
+
+ return LFS_ERR_NOENT;
+}
+
+static lfs_stag_t lfs_dir_get(lfs_t *lfs, const lfs_mdir_t *dir,
+ lfs_tag_t gmask, lfs_tag_t gtag, void *buffer) {
+ return lfs_dir_getslice(lfs, dir,
+ gmask, gtag,
+ 0, buffer, lfs_tag_size(gtag));
+}
+
+static int lfs_dir_getread(lfs_t *lfs, const lfs_mdir_t *dir,
+ const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
+ lfs_tag_t gmask, lfs_tag_t gtag,
+ lfs_off_t off, void *buffer, lfs_size_t size) {
+ uint8_t *data = buffer;
+ if (off+size > lfs->cfg->block_size) {
+ return LFS_ERR_CORRUPT;
+ }
+
+ while (size > 0) {
+ lfs_size_t diff = size;
+
+ if (pcache && pcache->block == LFS_BLOCK_INLINE &&
+ off < pcache->off + pcache->size) {
+ if (off >= pcache->off) {
+ // is already in pcache?
+ diff = lfs_min(diff, pcache->size - (off-pcache->off));
+ memcpy(data, &pcache->buffer[off-pcache->off], diff);
+
+ data += diff;
+ off += diff;
+ size -= diff;
+ continue;
+ }
+
+ // pcache takes priority
+ diff = lfs_min(diff, pcache->off-off);
+ }
+
+ if (rcache->block == LFS_BLOCK_INLINE &&
+ off < rcache->off + rcache->size) {
+ if (off >= rcache->off) {
+ // is already in rcache?
+ diff = lfs_min(diff, rcache->size - (off-rcache->off));
+ memcpy(data, &rcache->buffer[off-rcache->off], diff);
+
+ data += diff;
+ off += diff;
+ size -= diff;
+ continue;
+ }
+ }
+
+ // load to cache, first condition can no longer fail
+ rcache->block = LFS_BLOCK_INLINE;
+ rcache->off = lfs_aligndown(off, lfs->cfg->read_size);
+ rcache->size = lfs_min(lfs_alignup(off+hint, lfs->cfg->read_size),
+ lfs->cfg->cache_size);
+ int err = lfs_dir_getslice(lfs, dir, gmask, gtag,
+ rcache->off, rcache->buffer, rcache->size);
+ if (err < 0) {
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+#ifndef LFS_READONLY
+static int lfs_dir_traverse_filter(void *p,
+ lfs_tag_t tag, const void *buffer) {
+ lfs_tag_t *filtertag = p;
+ (void)buffer;
+
+ // which mask depends on unique bit in tag structure
+ uint32_t mask = (tag & LFS_MKTAG(0x100, 0, 0))
+ ? LFS_MKTAG(0x7ff, 0x3ff, 0)
+ : LFS_MKTAG(0x700, 0x3ff, 0);
+
+ // check for redundancy
+ if ((mask & tag) == (mask & *filtertag) ||
+ lfs_tag_isdelete(*filtertag) ||
+ (LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) == (
+ LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) |
+ (LFS_MKTAG(0, 0x3ff, 0) & *filtertag))) {
+ *filtertag = LFS_MKTAG(LFS_FROM_NOOP, 0, 0);
+ return true;
+ }
+
+ // check if we need to adjust for created/deleted tags
+ if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE &&
+ lfs_tag_id(tag) <= lfs_tag_id(*filtertag)) {
+ *filtertag += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
+ }
+
+ return false;
+}
+#endif
+
+#ifndef LFS_READONLY
+// maximum recursive depth of lfs_dir_traverse, the deepest call:
+//
+// traverse with commit
+// '-> traverse with move
+// '-> traverse with filter
+//
+#define LFS_DIR_TRAVERSE_DEPTH 3
+
+struct lfs_dir_traverse {
+ const lfs_mdir_t *dir;
+ lfs_off_t off;
+ lfs_tag_t ptag;
+ const struct lfs_mattr *attrs;
+ int attrcount;
+
+ lfs_tag_t tmask;
+ lfs_tag_t ttag;
+ uint16_t begin;
+ uint16_t end;
+ int16_t diff;
+
+ int (*cb)(void *data, lfs_tag_t tag, const void *buffer);
+ void *data;
+
+ lfs_tag_t tag;
+ const void *buffer;
+ struct lfs_diskoff disk;
+};
+
+static int lfs_dir_traverse(lfs_t *lfs,
+ const lfs_mdir_t *dir, lfs_off_t off, lfs_tag_t ptag,
+ const struct lfs_mattr *attrs, int attrcount,
+ lfs_tag_t tmask, lfs_tag_t ttag,
+ uint16_t begin, uint16_t end, int16_t diff,
+ int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) {
+ // This function in inherently recursive, but bounded. To allow tool-based
+ // analysis without unnecessary code-cost we use an explicit stack
+ struct lfs_dir_traverse stack[LFS_DIR_TRAVERSE_DEPTH-1];
+ unsigned sp = 0;
+ int res;
+
+ // iterate over directory and attrs
+ lfs_tag_t tag;
+ const void *buffer;
+ struct lfs_diskoff disk = {0};
+ while (true) {
+ {
+ if (off+lfs_tag_dsize(ptag) < dir->off) {
+ off += lfs_tag_dsize(ptag);
+ int err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, sizeof(tag),
+ dir->pair[0], off, &tag, sizeof(tag));
+ if (err) {
+ return err;
+ }
+
+ tag = (lfs_frombe32(tag) ^ ptag) | 0x80000000;
+ disk.block = dir->pair[0];
+ disk.off = off+sizeof(lfs_tag_t);
+ buffer = &disk;
+ ptag = tag;
+ } else if (attrcount > 0) {
+ tag = attrs[0].tag;
+ buffer = attrs[0].buffer;
+ attrs += 1;
+ attrcount -= 1;
+ } else {
+ // finished traversal, pop from stack?
+ res = 0;
+ break;
+ }
+
+ // do we need to filter?
+ lfs_tag_t mask = LFS_MKTAG(0x7ff, 0, 0);
+ if ((mask & tmask & tag) != (mask & tmask & ttag)) {
+ continue;
+ }
+
+ if (lfs_tag_id(tmask) != 0) {
+ LFS_ASSERT(sp < LFS_DIR_TRAVERSE_DEPTH);
+ // recurse, scan for duplicates, and update tag based on
+ // creates/deletes
+ stack[sp] = (struct lfs_dir_traverse){
+ .dir = dir,
+ .off = off,
+ .ptag = ptag,
+ .attrs = attrs,
+ .attrcount = attrcount,
+ .tmask = tmask,
+ .ttag = ttag,
+ .begin = begin,
+ .end = end,
+ .diff = diff,
+ .cb = cb,
+ .data = data,
+ .tag = tag,
+ .buffer = buffer,
+ .disk = disk,
+ };
+ sp += 1;
+
+ tmask = 0;
+ ttag = 0;
+ begin = 0;
+ end = 0;
+ diff = 0;
+ cb = lfs_dir_traverse_filter;
+ data = &stack[sp-1].tag;
+ continue;
+ }
+ }
+
+popped:
+ // in filter range?
+ if (lfs_tag_id(tmask) != 0 &&
+ !(lfs_tag_id(tag) >= begin && lfs_tag_id(tag) < end)) {
+ continue;
+ }
+
+ // handle special cases for mcu-side operations
+ if (lfs_tag_type3(tag) == LFS_FROM_NOOP) {
+ // do nothing
+ } else if (lfs_tag_type3(tag) == LFS_FROM_MOVE) {
+ // Without this condition, lfs_dir_traverse can exhibit an
+ // extremely expensive O(n^3) of nested loops when renaming.
+ // This happens because lfs_dir_traverse tries to filter tags by
+ // the tags in the source directory, triggering a second
+ // lfs_dir_traverse with its own filter operation.
+ //
+ // traverse with commit
+ // '-> traverse with filter
+ // '-> traverse with move
+ // '-> traverse with filter
+ //
+ // However we don't actually care about filtering the second set of
+ // tags, since duplicate tags have no effect when filtering.
+ //
+ // This check skips this unnecessary recursive filtering explicitly,
+ // reducing this runtime from O(n^3) to O(n^2).
+ if (cb == lfs_dir_traverse_filter) {
+ continue;
+ }
+
+ // recurse into move
+ stack[sp] = (struct lfs_dir_traverse){
+ .dir = dir,
+ .off = off,
+ .ptag = ptag,
+ .attrs = attrs,
+ .attrcount = attrcount,
+ .tmask = tmask,
+ .ttag = ttag,
+ .begin = begin,
+ .end = end,
+ .diff = diff,
+ .cb = cb,
+ .data = data,
+ .tag = LFS_MKTAG(LFS_FROM_NOOP, 0, 0),
+ };
+ sp += 1;
+
+ uint16_t fromid = lfs_tag_size(tag);
+ uint16_t toid = lfs_tag_id(tag);
+ dir = buffer;
+ off = 0;
+ ptag = 0xffffffff;
+ attrs = NULL;
+ attrcount = 0;
+ tmask = LFS_MKTAG(0x600, 0x3ff, 0);
+ ttag = LFS_MKTAG(LFS_TYPE_STRUCT, 0, 0);
+ begin = fromid;
+ end = fromid+1;
+ diff = toid-fromid+diff;
+ } else if (lfs_tag_type3(tag) == LFS_FROM_USERATTRS) {
+ for (unsigned i = 0; i < lfs_tag_size(tag); i++) {
+ const struct lfs_attr *a = buffer;
+ res = cb(data, LFS_MKTAG(LFS_TYPE_USERATTR + a[i].type,
+ lfs_tag_id(tag) + diff, a[i].size), a[i].buffer);
+ if (res < 0) {
+ return res;
+ }
+
+ if (res) {
+ break;
+ }
+ }
+ } else {
+ res = cb(data, tag + LFS_MKTAG(0, diff, 0), buffer);
+ if (res < 0) {
+ return res;
+ }
+
+ if (res) {
+ break;
+ }
+ }
+ }
+
+ if (sp > 0) {
+ // pop from the stack and return, fortunately all pops share
+ // a destination
+ dir = stack[sp-1].dir;
+ off = stack[sp-1].off;
+ ptag = stack[sp-1].ptag;
+ attrs = stack[sp-1].attrs;
+ attrcount = stack[sp-1].attrcount;
+ tmask = stack[sp-1].tmask;
+ ttag = stack[sp-1].ttag;
+ begin = stack[sp-1].begin;
+ end = stack[sp-1].end;
+ diff = stack[sp-1].diff;
+ cb = stack[sp-1].cb;
+ data = stack[sp-1].data;
+ tag = stack[sp-1].tag;
+ buffer = stack[sp-1].buffer;
+ disk = stack[sp-1].disk;
+ sp -= 1;
+ goto popped;
+ } else {
+ return res;
+ }
+}
+#endif
+
+static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
+ lfs_mdir_t *dir, const lfs_block_t pair[2],
+ lfs_tag_t fmask, lfs_tag_t ftag, uint16_t *id,
+ int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) {
+ // we can find tag very efficiently during a fetch, since we're already
+ // scanning the entire directory
+ lfs_stag_t besttag = -1;
+
+ // if either block address is invalid we return LFS_ERR_CORRUPT here,
+ // otherwise later writes to the pair could fail
+ if (lfs->block_count
+ && (pair[0] >= lfs->block_count || pair[1] >= lfs->block_count)) {
+ return LFS_ERR_CORRUPT;
+ }
+
+ // find the block with the most recent revision
+ uint32_t revs[2] = {0, 0};
+ int r = 0;
+ for (int i = 0; i < 2; i++) {
+ int err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, sizeof(revs[i]),
+ pair[i], 0, &revs[i], sizeof(revs[i]));
+ revs[i] = lfs_fromle32(revs[i]);
+ if (err && err != LFS_ERR_CORRUPT) {
+ return err;
+ }
+
+ if (err != LFS_ERR_CORRUPT &&
+ lfs_scmp(revs[i], revs[(i+1)%2]) > 0) {
+ r = i;
+ }
+ }
+
+ dir->pair[0] = pair[(r+0)%2];
+ dir->pair[1] = pair[(r+1)%2];
+ dir->rev = revs[(r+0)%2];
+ dir->off = 0; // nonzero = found some commits
+
+ // now scan tags to fetch the actual dir and find possible match
+ for (int i = 0; i < 2; i++) {
+ lfs_off_t off = 0;
+ lfs_tag_t ptag = 0xffffffff;
+
+ uint16_t tempcount = 0;
+ lfs_block_t temptail[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
+ bool tempsplit = false;
+ lfs_stag_t tempbesttag = besttag;
+
+ // assume not erased until proven otherwise
+ bool maybeerased = false;
+ bool hasfcrc = false;
+ struct lfs_fcrc fcrc;
+
+ dir->rev = lfs_tole32(dir->rev);
+ uint32_t crc = lfs_crc(0xffffffff, &dir->rev, sizeof(dir->rev));
+ dir->rev = lfs_fromle32(dir->rev);
+
+ while (true) {
+ // extract next tag
+ lfs_tag_t tag;
+ off += lfs_tag_dsize(ptag);
+ int err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, lfs->cfg->block_size,
+ dir->pair[0], off, &tag, sizeof(tag));
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ // can't continue?
+ break;
+ }
+ return err;
+ }
+
+ crc = lfs_crc(crc, &tag, sizeof(tag));
+ tag = lfs_frombe32(tag) ^ ptag;
+
+ // next commit not yet programmed?
+ if (!lfs_tag_isvalid(tag)) {
+ // we only might be erased if the last tag was a crc
+ maybeerased = (lfs_tag_type2(ptag) == LFS_TYPE_CCRC);
+ break;
+ // out of range?
+ } else if (off + lfs_tag_dsize(tag) > lfs->cfg->block_size) {
+ break;
+ }
+
+ ptag = tag;
+
+ if (lfs_tag_type2(tag) == LFS_TYPE_CCRC) {
+ // check the crc attr
+ uint32_t dcrc;
+ err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, lfs->cfg->block_size,
+ dir->pair[0], off+sizeof(tag), &dcrc, sizeof(dcrc));
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ break;
+ }
+ return err;
+ }
+ dcrc = lfs_fromle32(dcrc);
+
+ if (crc != dcrc) {
+ break;
+ }
+
+ // reset the next bit if we need to
+ ptag ^= (lfs_tag_t)(lfs_tag_chunk(tag) & 1U) << 31;
+
+ // toss our crc into the filesystem seed for
+ // pseudorandom numbers, note we use another crc here
+ // as a collection function because it is sufficiently
+ // random and convenient
+ lfs->seed = lfs_crc(lfs->seed, &crc, sizeof(crc));
+
+ // update with what's found so far
+ besttag = tempbesttag;
+ dir->off = off + lfs_tag_dsize(tag);
+ dir->etag = ptag;
+ dir->count = tempcount;
+ dir->tail[0] = temptail[0];
+ dir->tail[1] = temptail[1];
+ dir->split = tempsplit;
+
+ // reset crc, hasfcrc
+ crc = 0xffffffff;
+ continue;
+ }
+
+ // crc the entry first, hopefully leaving it in the cache
+ err = lfs_bd_crc(lfs,
+ NULL, &lfs->rcache, lfs->cfg->block_size,
+ dir->pair[0], off+sizeof(tag),
+ lfs_tag_dsize(tag)-sizeof(tag), &crc);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ break;
+ }
+ return err;
+ }
+
+ // directory modification tags?
+ if (lfs_tag_type1(tag) == LFS_TYPE_NAME) {
+ // increase count of files if necessary
+ if (lfs_tag_id(tag) >= tempcount) {
+ tempcount = lfs_tag_id(tag) + 1;
+ }
+ } else if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE) {
+ tempcount += lfs_tag_splice(tag);
+
+ if (tag == (LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) |
+ (LFS_MKTAG(0, 0x3ff, 0) & tempbesttag))) {
+ tempbesttag |= 0x80000000;
+ } else if (tempbesttag != -1 &&
+ lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) {
+ tempbesttag += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
+ }
+ } else if (lfs_tag_type1(tag) == LFS_TYPE_TAIL) {
+ tempsplit = (lfs_tag_chunk(tag) & 1);
+
+ err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, lfs->cfg->block_size,
+ dir->pair[0], off+sizeof(tag), &temptail, 8);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ break;
+ }
+ return err;
+ }
+ lfs_pair_fromle32(temptail);
+ } else if (lfs_tag_type3(tag) == LFS_TYPE_FCRC) {
+ err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, lfs->cfg->block_size,
+ dir->pair[0], off+sizeof(tag),
+ &fcrc, sizeof(fcrc));
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ break;
+ }
+ return err;
+ }
+
+ lfs_fcrc_fromle32(&fcrc);
+ hasfcrc = true;
+ }
+
+ // found a match for our fetcher?
+ if ((fmask & tag) == (fmask & ftag)) {
+ int res = cb(data, tag, &(struct lfs_diskoff){
+ dir->pair[0], off+sizeof(tag)});
+ if (res < 0) {
+ if (res == LFS_ERR_CORRUPT) {
+ break;
+ }
+ return res;
+ }
+
+ if (res == LFS_CMP_EQ) {
+ // found a match
+ tempbesttag = tag;
+ } else if ((LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) ==
+ (LFS_MKTAG(0x7ff, 0x3ff, 0) & tempbesttag)) {
+ // found an identical tag, but contents didn't match
+ // this must mean that our besttag has been overwritten
+ tempbesttag = -1;
+ } else if (res == LFS_CMP_GT &&
+ lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) {
+ // found a greater match, keep track to keep things sorted
+ tempbesttag = tag | 0x80000000;
+ }
+ }
+ }
+
+ // found no valid commits?
+ if (dir->off == 0) {
+ // try the other block?
+ lfs_pair_swap(dir->pair);
+ dir->rev = revs[(r+1)%2];
+ continue;
+ }
+
+ // did we end on a valid commit? we may have an erased block
+ dir->erased = false;
+ if (maybeerased && dir->off % lfs->cfg->prog_size == 0) {
+ #ifdef LFS_MULTIVERSION
+ // note versions < lfs2.1 did not have fcrc tags, if
+ // we're < lfs2.1 treat missing fcrc as erased data
+ //
+ // we don't strictly need to do this, but otherwise writing
+ // to lfs2.0 disks becomes very inefficient
+ if (lfs_fs_disk_version(lfs) < 0x00020001) {
+ dir->erased = true;
+
+ } else
+ #endif
+ if (hasfcrc) {
+ // check for an fcrc matching the next prog's erased state, if
+ // this failed most likely a previous prog was interrupted, we
+ // need a new erase
+ uint32_t fcrc_ = 0xffffffff;
+ int err = lfs_bd_crc(lfs,
+ NULL, &lfs->rcache, lfs->cfg->block_size,
+ dir->pair[0], dir->off, fcrc.size, &fcrc_);
+ if (err && err != LFS_ERR_CORRUPT) {
+ return err;
+ }
+
+ // found beginning of erased part?
+ dir->erased = (fcrc_ == fcrc.crc);
+ }
+ }
+
+ // synthetic move
+ if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair)) {
+ if (lfs_tag_id(lfs->gdisk.tag) == lfs_tag_id(besttag)) {
+ besttag |= 0x80000000;
+ } else if (besttag != -1 &&
+ lfs_tag_id(lfs->gdisk.tag) < lfs_tag_id(besttag)) {
+ besttag -= LFS_MKTAG(0, 1, 0);
+ }
+ }
+
+ // found tag? or found best id?
+ if (id) {
+ *id = lfs_min(lfs_tag_id(besttag), dir->count);
+ }
+
+ if (lfs_tag_isvalid(besttag)) {
+ return besttag;
+ } else if (lfs_tag_id(besttag) < dir->count) {
+ return LFS_ERR_NOENT;
+ } else {
+ return 0;
+ }
+ }
+
+ LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}",
+ dir->pair[0], dir->pair[1]);
+ return LFS_ERR_CORRUPT;
+}
+
+static int lfs_dir_fetch(lfs_t *lfs,
+ lfs_mdir_t *dir, const lfs_block_t pair[2]) {
+ // note, mask=-1, tag=-1 can never match a tag since this
+ // pattern has the invalid bit set
+ return (int)lfs_dir_fetchmatch(lfs, dir, pair,
+ (lfs_tag_t)-1, (lfs_tag_t)-1, NULL, NULL, NULL);
+}
+
+static int lfs_dir_getgstate(lfs_t *lfs, const lfs_mdir_t *dir,
+ lfs_gstate_t *gstate) {
+ lfs_gstate_t temp;
+ lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x7ff, 0, 0),
+ LFS_MKTAG(LFS_TYPE_MOVESTATE, 0, sizeof(temp)), &temp);
+ if (res < 0 && res != LFS_ERR_NOENT) {
+ return res;
+ }
+
+ if (res != LFS_ERR_NOENT) {
+ // xor together to find resulting gstate
+ lfs_gstate_fromle32(&temp);
+ lfs_gstate_xor(gstate, &temp);
+ }
+
+ return 0;
+}
+
+static int lfs_dir_getinfo(lfs_t *lfs, lfs_mdir_t *dir,
+ uint16_t id, struct lfs_info *info) {
+ if (id == 0x3ff) {
+ // special case for root
+ strcpy(info->name, "/");
+ info->type = LFS_TYPE_DIR;
+ return 0;
+ }
+
+ lfs_stag_t tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x780, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_NAME, id, lfs->name_max+1), info->name);
+ if (tag < 0) {
+ return (int)tag;
+ }
+
+ info->type = lfs_tag_type3(tag);
+
+ struct lfs_ctz ctz;
+ tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz);
+ if (tag < 0) {
+ return (int)tag;
+ }
+ lfs_ctz_fromle32(&ctz);
+
+ if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) {
+ info->size = ctz.size;
+ } else if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) {
+ info->size = lfs_tag_size(tag);
+ }
+
+ return 0;
+}
+
+struct lfs_dir_find_match {
+ lfs_t *lfs;
+ const void *name;
+ lfs_size_t size;
+};
+
+static int lfs_dir_find_match(void *data,
+ lfs_tag_t tag, const void *buffer) {
+ struct lfs_dir_find_match *name = data;
+ lfs_t *lfs = name->lfs;
+ const struct lfs_diskoff *disk = buffer;
+
+ // compare with disk
+ lfs_size_t diff = lfs_min(name->size, lfs_tag_size(tag));
+ int res = lfs_bd_cmp(lfs,
+ NULL, &lfs->rcache, diff,
+ disk->block, disk->off, name->name, diff);
+ if (res != LFS_CMP_EQ) {
+ return res;
+ }
+
+ // only equal if our size is still the same
+ if (name->size != lfs_tag_size(tag)) {
+ return (name->size < lfs_tag_size(tag)) ? LFS_CMP_LT : LFS_CMP_GT;
+ }
+
+ // found a match!
+ return LFS_CMP_EQ;
+}
+
+// lfs_dir_find tries to set path and id even if file is not found
+//
+// returns:
+// - 0 if file is found
+// - LFS_ERR_NOENT if file or parent is not found
+// - LFS_ERR_NOTDIR if parent is not a dir
+static lfs_stag_t lfs_dir_find(lfs_t *lfs, lfs_mdir_t *dir,
+ const char **path, uint16_t *id) {
+ // we reduce path to a single name if we can find it
+ const char *name = *path;
+
+ // default to root dir
+ lfs_stag_t tag = LFS_MKTAG(LFS_TYPE_DIR, 0x3ff, 0);
+ dir->tail[0] = lfs->root[0];
+ dir->tail[1] = lfs->root[1];
+
+ // empty paths are not allowed
+ if (*name == '\0') {
+ return LFS_ERR_INVAL;
+ }
+
+ while (true) {
+nextname:
+ // skip slashes if we're a directory
+ if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
+ name += strspn(name, "/");
+ }
+ lfs_size_t namelen = strcspn(name, "/");
+
+ // skip '.'
+ if (namelen == 1 && memcmp(name, ".", 1) == 0) {
+ name += namelen;
+ goto nextname;
+ }
+
+ // error on unmatched '..', trying to go above root?
+ if (namelen == 2 && memcmp(name, "..", 2) == 0) {
+ return LFS_ERR_INVAL;
+ }
+
+ // skip if matched by '..' in name
+ const char *suffix = name + namelen;
+ lfs_size_t sufflen;
+ int depth = 1;
+ while (true) {
+ suffix += strspn(suffix, "/");
+ sufflen = strcspn(suffix, "/");
+ if (sufflen == 0) {
+ break;
+ }
+
+ if (sufflen == 1 && memcmp(suffix, ".", 1) == 0) {
+ // noop
+ } else if (sufflen == 2 && memcmp(suffix, "..", 2) == 0) {
+ depth -= 1;
+ if (depth == 0) {
+ name = suffix + sufflen;
+ goto nextname;
+ }
+ } else {
+ depth += 1;
+ }
+
+ suffix += sufflen;
+ }
+
+ // found path
+ if (*name == '\0') {
+ return tag;
+ }
+
+ // update what we've found so far
+ *path = name;
+
+ // only continue if we're a directory
+ if (lfs_tag_type3(tag) != LFS_TYPE_DIR) {
+ return LFS_ERR_NOTDIR;
+ }
+
+ // grab the entry data
+ if (lfs_tag_id(tag) != 0x3ff) {
+ lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), dir->tail);
+ if (res < 0) {
+ return res;
+ }
+ lfs_pair_fromle32(dir->tail);
+ }
+
+ // find entry matching name
+ while (true) {
+ tag = lfs_dir_fetchmatch(lfs, dir, dir->tail,
+ LFS_MKTAG(0x780, 0, 0),
+ LFS_MKTAG(LFS_TYPE_NAME, 0, namelen),
+ id,
+ lfs_dir_find_match, &(struct lfs_dir_find_match){
+ lfs, name, namelen});
+ if (tag < 0) {
+ return tag;
+ }
+
+ if (tag) {
+ break;
+ }
+
+ if (!dir->split) {
+ return LFS_ERR_NOENT;
+ }
+ }
+
+ // to next name
+ name += namelen;
+ }
+}
+
+// commit logic
+struct lfs_commit {
+ lfs_block_t block;
+ lfs_off_t off;
+ lfs_tag_t ptag;
+ uint32_t crc;
+
+ lfs_off_t begin;
+ lfs_off_t end;
+};
+
+#ifndef LFS_READONLY
+static int lfs_dir_commitprog(lfs_t *lfs, struct lfs_commit *commit,
+ const void *buffer, lfs_size_t size) {
+ int err = lfs_bd_prog(lfs,
+ &lfs->pcache, &lfs->rcache, false,
+ commit->block, commit->off ,
+ (const uint8_t*)buffer, size);
+ if (err) {
+ return err;
+ }
+
+ commit->crc = lfs_crc(commit->crc, buffer, size);
+ commit->off += size;
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_commitattr(lfs_t *lfs, struct lfs_commit *commit,
+ lfs_tag_t tag, const void *buffer) {
+ // check if we fit
+ lfs_size_t dsize = lfs_tag_dsize(tag);
+ if (commit->off + dsize > commit->end) {
+ return LFS_ERR_NOSPC;
+ }
+
+ // write out tag
+ lfs_tag_t ntag = lfs_tobe32((tag & 0x7fffffff) ^ commit->ptag);
+ int err = lfs_dir_commitprog(lfs, commit, &ntag, sizeof(ntag));
+ if (err) {
+ return err;
+ }
+
+ if (!(tag & 0x80000000)) {
+ // from memory
+ err = lfs_dir_commitprog(lfs, commit, buffer, dsize-sizeof(tag));
+ if (err) {
+ return err;
+ }
+ } else {
+ // from disk
+ const struct lfs_diskoff *disk = buffer;
+ for (lfs_off_t i = 0; i < dsize-sizeof(tag); i++) {
+ // rely on caching to make this efficient
+ uint8_t dat;
+ err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, dsize-sizeof(tag)-i,
+ disk->block, disk->off+i, &dat, 1);
+ if (err) {
+ return err;
+ }
+
+ err = lfs_dir_commitprog(lfs, commit, &dat, 1);
+ if (err) {
+ return err;
+ }
+ }
+ }
+
+ commit->ptag = tag & 0x7fffffff;
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+
+static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
+ // align to program units
+ //
+ // this gets a bit complex as we have two types of crcs:
+ // - 5-word crc with fcrc to check following prog (middle of block)
+ // - 2-word crc with no following prog (end of block)
+ const lfs_off_t end = lfs_alignup(
+ lfs_min(commit->off + 5*sizeof(uint32_t), lfs->cfg->block_size),
+ lfs->cfg->prog_size);
+
+ lfs_off_t off1 = 0;
+ uint32_t crc1 = 0;
+
+ // create crc tags to fill up remainder of commit, note that
+ // padding is not crced, which lets fetches skip padding but
+ // makes committing a bit more complicated
+ while (commit->off < end) {
+ lfs_off_t noff = (
+ lfs_min(end - (commit->off+sizeof(lfs_tag_t)), 0x3fe)
+ + (commit->off+sizeof(lfs_tag_t)));
+ // too large for crc tag? need padding commits
+ if (noff < end) {
+ noff = lfs_min(noff, end - 5*sizeof(uint32_t));
+ }
+
+ // space for fcrc?
+ uint8_t eperturb = (uint8_t)-1;
+ if (noff >= end && noff <= lfs->cfg->block_size - lfs->cfg->prog_size) {
+ // first read the leading byte, this always contains a bit
+ // we can perturb to avoid writes that don't change the fcrc
+ int err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, lfs->cfg->prog_size,
+ commit->block, noff, &eperturb, 1);
+ if (err && err != LFS_ERR_CORRUPT) {
+ return err;
+ }
+
+ #ifdef LFS_MULTIVERSION
+ // unfortunately fcrcs break mdir fetching < lfs2.1, so only write
+ // these if we're a >= lfs2.1 filesystem
+ if (lfs_fs_disk_version(lfs) <= 0x00020000) {
+ // don't write fcrc
+ } else
+ #endif
+ {
+ // find the expected fcrc, don't bother avoiding a reread
+ // of the eperturb, it should still be in our cache
+ struct lfs_fcrc fcrc = {
+ .size = lfs->cfg->prog_size,
+ .crc = 0xffffffff
+ };
+ err = lfs_bd_crc(lfs,
+ NULL, &lfs->rcache, lfs->cfg->prog_size,
+ commit->block, noff, fcrc.size, &fcrc.crc);
+ if (err && err != LFS_ERR_CORRUPT) {
+ return err;
+ }
+
+ lfs_fcrc_tole32(&fcrc);
+ err = lfs_dir_commitattr(lfs, commit,
+ LFS_MKTAG(LFS_TYPE_FCRC, 0x3ff, sizeof(struct lfs_fcrc)),
+ &fcrc);
+ if (err) {
+ return err;
+ }
+ }
+ }
+
+ // build commit crc
+ struct {
+ lfs_tag_t tag;
+ uint32_t crc;
+ } ccrc;
+ lfs_tag_t ntag = LFS_MKTAG(
+ LFS_TYPE_CCRC + (((uint8_t)~eperturb) >> 7), 0x3ff,
+ noff - (commit->off+sizeof(lfs_tag_t)));
+ ccrc.tag = lfs_tobe32(ntag ^ commit->ptag);
+ commit->crc = lfs_crc(commit->crc, &ccrc.tag, sizeof(lfs_tag_t));
+ ccrc.crc = lfs_tole32(commit->crc);
+
+ int err = lfs_bd_prog(lfs,
+ &lfs->pcache, &lfs->rcache, false,
+ commit->block, commit->off, &ccrc, sizeof(ccrc));
+ if (err) {
+ return err;
+ }
+
+ // keep track of non-padding checksum to verify
+ if (off1 == 0) {
+ off1 = commit->off + sizeof(lfs_tag_t);
+ crc1 = commit->crc;
+ }
+
+ commit->off = noff;
+ // perturb valid bit?
+ commit->ptag = ntag ^ ((0x80UL & ~eperturb) << 24);
+ // reset crc for next commit
+ commit->crc = 0xffffffff;
+
+ // manually flush here since we don't prog the padding, this confuses
+ // the caching layer
+ if (noff >= end || noff >= lfs->pcache.off + lfs->cfg->cache_size) {
+ // flush buffers
+ int err = lfs_bd_sync(lfs, &lfs->pcache, &lfs->rcache, false);
+ if (err) {
+ return err;
+ }
+ }
+ }
+
+ // successful commit, check checksums to make sure
+ //
+ // note that we don't need to check padding commits, worst
+ // case if they are corrupted we would have had to compact anyways
+ lfs_off_t off = commit->begin;
+ uint32_t crc = 0xffffffff;
+ int err = lfs_bd_crc(lfs,
+ NULL, &lfs->rcache, off1+sizeof(uint32_t),
+ commit->block, off, off1-off, &crc);
+ if (err) {
+ return err;
+ }
+
+ // check non-padding commits against known crc
+ if (crc != crc1) {
+ return LFS_ERR_CORRUPT;
+ }
+
+ // make sure to check crc in case we happen to pick
+ // up an unrelated crc (frozen block?)
+ err = lfs_bd_crc(lfs,
+ NULL, &lfs->rcache, sizeof(uint32_t),
+ commit->block, off1, sizeof(uint32_t), &crc);
+ if (err) {
+ return err;
+ }
+
+ if (crc != 0) {
+ return LFS_ERR_CORRUPT;
+ }
+
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_alloc(lfs_t *lfs, lfs_mdir_t *dir) {
+ // allocate pair of dir blocks (backwards, so we write block 1 first)
+ for (int i = 0; i < 2; i++) {
+ int err = lfs_alloc(lfs, &dir->pair[(i+1)%2]);
+ if (err) {
+ return err;
+ }
+ }
+
+ // zero for reproducibility in case initial block is unreadable
+ dir->rev = 0;
+
+ // rather than clobbering one of the blocks we just pretend
+ // the revision may be valid
+ int err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, sizeof(dir->rev),
+ dir->pair[0], 0, &dir->rev, sizeof(dir->rev));
+ dir->rev = lfs_fromle32(dir->rev);
+ if (err && err != LFS_ERR_CORRUPT) {
+ return err;
+ }
+
+ // to make sure we don't immediately evict, align the new revision count
+ // to our block_cycles modulus, see lfs_dir_compact for why our modulus
+ // is tweaked this way
+ if (lfs->cfg->block_cycles > 0) {
+ dir->rev = lfs_alignup(dir->rev, ((lfs->cfg->block_cycles+1)|1));
+ }
+
+ // set defaults
+ dir->off = sizeof(dir->rev);
+ dir->etag = 0xffffffff;
+ dir->count = 0;
+ dir->tail[0] = LFS_BLOCK_NULL;
+ dir->tail[1] = LFS_BLOCK_NULL;
+ dir->erased = false;
+ dir->split = false;
+
+ // don't write out yet, let caller take care of that
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_drop(lfs_t *lfs, lfs_mdir_t *dir, lfs_mdir_t *tail) {
+ // steal state
+ int err = lfs_dir_getgstate(lfs, tail, &lfs->gdelta);
+ if (err) {
+ return err;
+ }
+
+ // steal tail
+ lfs_pair_tole32(tail->tail);
+ err = lfs_dir_commit(lfs, dir, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_TAIL + tail->split, 0x3ff, 8), tail->tail}));
+ lfs_pair_fromle32(tail->tail);
+ if (err) {
+ return err;
+ }
+
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_split(lfs_t *lfs,
+ lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
+ lfs_mdir_t *source, uint16_t split, uint16_t end) {
+ // create tail metadata pair
+ lfs_mdir_t tail;
+ int err = lfs_dir_alloc(lfs, &tail);
+ if (err) {
+ return err;
+ }
+
+ tail.split = dir->split;
+ tail.tail[0] = dir->tail[0];
+ tail.tail[1] = dir->tail[1];
+
+ // note we don't care about LFS_OK_RELOCATED
+ int res = lfs_dir_compact(lfs, &tail, attrs, attrcount, source, split, end);
+ if (res < 0) {
+ return res;
+ }
+
+ dir->tail[0] = tail.pair[0];
+ dir->tail[1] = tail.pair[1];
+ dir->split = true;
+
+ // update root if needed
+ if (lfs_pair_cmp(dir->pair, lfs->root) == 0 && split == 0) {
+ lfs->root[0] = tail.pair[0];
+ lfs->root[1] = tail.pair[1];
+ }
+
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_commit_size(void *p, lfs_tag_t tag, const void *buffer) {
+ lfs_size_t *size = p;
+ (void)buffer;
+
+ *size += lfs_tag_dsize(tag);
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+struct lfs_dir_commit_commit {
+ lfs_t *lfs;
+ struct lfs_commit *commit;
+};
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_commit_commit(void *p, lfs_tag_t tag, const void *buffer) {
+ struct lfs_dir_commit_commit *commit = p;
+ return lfs_dir_commitattr(commit->lfs, commit->commit, tag, buffer);
+}
+#endif
+
+#ifndef LFS_READONLY
+static bool lfs_dir_needsrelocation(lfs_t *lfs, lfs_mdir_t *dir) {
+ // If our revision count == n * block_cycles, we should force a relocation,
+ // this is how littlefs wear-levels at the metadata-pair level. Note that we
+ // actually use (block_cycles+1)|1, this is to avoid two corner cases:
+ // 1. block_cycles = 1, which would prevent relocations from terminating
+ // 2. block_cycles = 2n, which, due to aliasing, would only ever relocate
+ // one metadata block in the pair, effectively making this useless
+ return (lfs->cfg->block_cycles > 0
+ && ((dir->rev + 1) % ((lfs->cfg->block_cycles+1)|1) == 0));
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_compact(lfs_t *lfs,
+ lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
+ lfs_mdir_t *source, uint16_t begin, uint16_t end) {
+ // save some state in case block is bad
+ bool relocated = false;
+ bool tired = lfs_dir_needsrelocation(lfs, dir);
+
+ // increment revision count
+ dir->rev += 1;
+
+ // do not proactively relocate blocks during migrations, this
+ // can cause a number of failure states such: clobbering the
+ // v1 superblock if we relocate root, and invalidating directory
+ // pointers if we relocate the head of a directory. On top of
+ // this, relocations increase the overall complexity of
+ // lfs_migration, which is already a delicate operation.
+#ifdef LFS_MIGRATE
+ if (lfs->lfs1) {
+ tired = false;
+ }
+#endif
+
+ if (tired && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) != 0) {
+ // we're writing too much, time to relocate
+ goto relocate;
+ }
+
+ // begin loop to commit compaction to blocks until a compact sticks
+ while (true) {
+ {
+ // setup commit state
+ struct lfs_commit commit = {
+ .block = dir->pair[1],
+ .off = 0,
+ .ptag = 0xffffffff,
+ .crc = 0xffffffff,
+
+ .begin = 0,
+ .end = (lfs->cfg->metadata_max ?
+ lfs->cfg->metadata_max : lfs->cfg->block_size) - 8,
+ };
+
+ // erase block to write to
+ int err = lfs_bd_erase(lfs, dir->pair[1]);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+
+ // write out header
+ dir->rev = lfs_tole32(dir->rev);
+ err = lfs_dir_commitprog(lfs, &commit,
+ &dir->rev, sizeof(dir->rev));
+ dir->rev = lfs_fromle32(dir->rev);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+
+ // traverse the directory, this time writing out all unique tags
+ err = lfs_dir_traverse(lfs,
+ source, 0, 0xffffffff, attrs, attrcount,
+ LFS_MKTAG(0x400, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
+ begin, end, -begin,
+ lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){
+ lfs, &commit});
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+
+ // commit tail, which may be new after last size check
+ if (!lfs_pair_isnull(dir->tail)) {
+ lfs_pair_tole32(dir->tail);
+ err = lfs_dir_commitattr(lfs, &commit,
+ LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8),
+ dir->tail);
+ lfs_pair_fromle32(dir->tail);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+ }
+
+ // bring over gstate?
+ lfs_gstate_t delta = {0};
+ if (!relocated) {
+ lfs_gstate_xor(&delta, &lfs->gdisk);
+ lfs_gstate_xor(&delta, &lfs->gstate);
+ }
+ lfs_gstate_xor(&delta, &lfs->gdelta);
+ delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff);
+
+ err = lfs_dir_getgstate(lfs, dir, &delta);
+ if (err) {
+ return err;
+ }
+
+ if (!lfs_gstate_iszero(&delta)) {
+ lfs_gstate_tole32(&delta);
+ err = lfs_dir_commitattr(lfs, &commit,
+ LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff,
+ sizeof(delta)), &delta);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+ }
+
+ // complete commit with crc
+ err = lfs_dir_commitcrc(lfs, &commit);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+
+ // successful compaction, swap dir pair to indicate most recent
+ LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0);
+ lfs_pair_swap(dir->pair);
+ dir->count = end - begin;
+ dir->off = commit.off;
+ dir->etag = commit.ptag;
+ // update gstate
+ lfs->gdelta = (lfs_gstate_t){0};
+ if (!relocated) {
+ lfs->gdisk = lfs->gstate;
+ }
+ }
+ break;
+
+relocate:
+ // commit was corrupted, drop caches and prepare to relocate block
+ relocated = true;
+ lfs_cache_drop(lfs, &lfs->pcache);
+ if (!tired) {
+ LFS_DEBUG("Bad block at 0x%"PRIx32, dir->pair[1]);
+ }
+
+ // can't relocate superblock, filesystem is now frozen
+ if (lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
+ LFS_WARN("Superblock 0x%"PRIx32" has become unwritable",
+ dir->pair[1]);
+ return LFS_ERR_NOSPC;
+ }
+
+ // relocate half of pair
+ int err = lfs_alloc(lfs, &dir->pair[1]);
+ if (err && (err != LFS_ERR_NOSPC || !tired)) {
+ return err;
+ }
+
+ tired = false;
+ continue;
+ }
+
+ return relocated ? LFS_OK_RELOCATED : 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_splittingcompact(lfs_t *lfs, lfs_mdir_t *dir,
+ const struct lfs_mattr *attrs, int attrcount,
+ lfs_mdir_t *source, uint16_t begin, uint16_t end) {
+ while (true) {
+ // find size of first split, we do this by halving the split until
+ // the metadata is guaranteed to fit
+ //
+ // Note that this isn't a true binary search, we never increase the
+ // split size. This may result in poorly distributed metadata but isn't
+ // worth the extra code size or performance hit to fix.
+ lfs_size_t split = begin;
+ while (end - split > 1) {
+ lfs_size_t size = 0;
+ int err = lfs_dir_traverse(lfs,
+ source, 0, 0xffffffff, attrs, attrcount,
+ LFS_MKTAG(0x400, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
+ split, end, -split,
+ lfs_dir_commit_size, &size);
+ if (err) {
+ return err;
+ }
+
+ // space is complicated, we need room for:
+ //
+ // - tail: 4+2*4 = 12 bytes
+ // - gstate: 4+3*4 = 16 bytes
+ // - move delete: 4 = 4 bytes
+ // - crc: 4+4 = 8 bytes
+ // total = 40 bytes
+ //
+ // And we cap at half a block to avoid degenerate cases with
+ // nearly-full metadata blocks.
+ //
+ lfs_size_t metadata_max = (lfs->cfg->metadata_max)
+ ? lfs->cfg->metadata_max
+ : lfs->cfg->block_size;
+ if (end - split < 0xff
+ && size <= lfs_min(
+ metadata_max - 40,
+ lfs_alignup(
+ metadata_max/2,
+ lfs->cfg->prog_size))) {
+ break;
+ }
+
+ split = split + ((end - split) / 2);
+ }
+
+ if (split == begin) {
+ // no split needed
+ break;
+ }
+
+ // split into two metadata pairs and continue
+ int err = lfs_dir_split(lfs, dir, attrs, attrcount,
+ source, split, end);
+ if (err && err != LFS_ERR_NOSPC) {
+ return err;
+ }
+
+ if (err) {
+ // we can't allocate a new block, try to compact with degraded
+ // performance
+ LFS_WARN("Unable to split {0x%"PRIx32", 0x%"PRIx32"}",
+ dir->pair[0], dir->pair[1]);
+ break;
+ } else {
+ end = split;
+ }
+ }
+
+ if (lfs_dir_needsrelocation(lfs, dir)
+ && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
+ // oh no! we're writing too much to the superblock,
+ // should we expand?
+ lfs_ssize_t size = lfs_fs_size_(lfs);
+ if (size < 0) {
+ return size;
+ }
+
+ // littlefs cannot reclaim expanded superblocks, so expand cautiously
+ //
+ // if our filesystem is more than ~88% full, don't expand, this is
+ // somewhat arbitrary
+ if (lfs->block_count - size > lfs->block_count/8) {
+ LFS_DEBUG("Expanding superblock at rev %"PRIu32, dir->rev);
+ int err = lfs_dir_split(lfs, dir, attrs, attrcount,
+ source, begin, end);
+ if (err && err != LFS_ERR_NOSPC) {
+ return err;
+ }
+
+ if (err) {
+ // welp, we tried, if we ran out of space there's not much
+ // we can do, we'll error later if we've become frozen
+ LFS_WARN("Unable to expand superblock");
+ } else {
+ // duplicate the superblock entry into the new superblock
+ end = 1;
+ }
+ }
+ }
+
+ return lfs_dir_compact(lfs, dir, attrs, attrcount, source, begin, end);
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_relocatingcommit(lfs_t *lfs, lfs_mdir_t *dir,
+ const lfs_block_t pair[2],
+ const struct lfs_mattr *attrs, int attrcount,
+ lfs_mdir_t *pdir) {
+ int state = 0;
+
+ // calculate changes to the directory
+ bool hasdelete = false;
+ for (int i = 0; i < attrcount; i++) {
+ if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE) {
+ dir->count += 1;
+ } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE) {
+ LFS_ASSERT(dir->count > 0);
+ dir->count -= 1;
+ hasdelete = true;
+ } else if (lfs_tag_type1(attrs[i].tag) == LFS_TYPE_TAIL) {
+ dir->tail[0] = ((lfs_block_t*)attrs[i].buffer)[0];
+ dir->tail[1] = ((lfs_block_t*)attrs[i].buffer)[1];
+ dir->split = (lfs_tag_chunk(attrs[i].tag) & 1);
+ lfs_pair_fromle32(dir->tail);
+ }
+ }
+
+ // should we actually drop the directory block?
+ if (hasdelete && dir->count == 0) {
+ LFS_ASSERT(pdir);
+ int err = lfs_fs_pred(lfs, dir->pair, pdir);
+ if (err && err != LFS_ERR_NOENT) {
+ return err;
+ }
+
+ if (err != LFS_ERR_NOENT && pdir->split) {
+ state = LFS_OK_DROPPED;
+ goto fixmlist;
+ }
+ }
+
+ if (dir->erased && dir->count < 0xff) {
+ // try to commit
+ struct lfs_commit commit = {
+ .block = dir->pair[0],
+ .off = dir->off,
+ .ptag = dir->etag,
+ .crc = 0xffffffff,
+
+ .begin = dir->off,
+ .end = (lfs->cfg->metadata_max ?
+ lfs->cfg->metadata_max : lfs->cfg->block_size) - 8,
+ };
+
+ // traverse attrs that need to be written out
+ lfs_pair_tole32(dir->tail);
+ int err = lfs_dir_traverse(lfs,
+ dir, dir->off, dir->etag, attrs, attrcount,
+ 0, 0, 0, 0, 0,
+ lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){
+ lfs, &commit});
+ lfs_pair_fromle32(dir->tail);
+ if (err) {
+ if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
+ goto compact;
+ }
+ return err;
+ }
+
+ // commit any global diffs if we have any
+ lfs_gstate_t delta = {0};
+ lfs_gstate_xor(&delta, &lfs->gstate);
+ lfs_gstate_xor(&delta, &lfs->gdisk);
+ lfs_gstate_xor(&delta, &lfs->gdelta);
+ delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff);
+ if (!lfs_gstate_iszero(&delta)) {
+ err = lfs_dir_getgstate(lfs, dir, &delta);
+ if (err) {
+ return err;
+ }
+
+ lfs_gstate_tole32(&delta);
+ err = lfs_dir_commitattr(lfs, &commit,
+ LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff,
+ sizeof(delta)), &delta);
+ if (err) {
+ if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
+ goto compact;
+ }
+ return err;
+ }
+ }
+
+ // finalize commit with the crc
+ err = lfs_dir_commitcrc(lfs, &commit);
+ if (err) {
+ if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
+ goto compact;
+ }
+ return err;
+ }
+
+ // successful commit, update dir
+ LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0);
+ dir->off = commit.off;
+ dir->etag = commit.ptag;
+ // and update gstate
+ lfs->gdisk = lfs->gstate;
+ lfs->gdelta = (lfs_gstate_t){0};
+
+ goto fixmlist;
+ }
+
+compact:
+ // fall back to compaction
+ lfs_cache_drop(lfs, &lfs->pcache);
+
+ state = lfs_dir_splittingcompact(lfs, dir, attrs, attrcount,
+ dir, 0, dir->count);
+ if (state < 0) {
+ return state;
+ }
+
+ goto fixmlist;
+
+fixmlist:;
+ // this complicated bit of logic is for fixing up any active
+ // metadata-pairs that we may have affected
+ //
+ // note we have to make two passes since the mdir passed to
+ // lfs_dir_commit could also be in this list, and even then
+ // we need to copy the pair so they don't get clobbered if we refetch
+ // our mdir.
+ lfs_block_t oldpair[2] = {pair[0], pair[1]};
+ for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
+ if (lfs_pair_cmp(d->m.pair, oldpair) == 0) {
+ d->m = *dir;
+ if (d->m.pair != pair) {
+ for (int i = 0; i < attrcount; i++) {
+ if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
+ d->id == lfs_tag_id(attrs[i].tag) &&
+ d->type != LFS_TYPE_DIR) {
+ d->m.pair[0] = LFS_BLOCK_NULL;
+ d->m.pair[1] = LFS_BLOCK_NULL;
+ } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
+ d->id > lfs_tag_id(attrs[i].tag)) {
+ d->id -= 1;
+ if (d->type == LFS_TYPE_DIR) {
+ ((lfs_dir_t*)d)->pos -= 1;
+ }
+ } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE &&
+ d->id >= lfs_tag_id(attrs[i].tag)) {
+ d->id += 1;
+ if (d->type == LFS_TYPE_DIR) {
+ ((lfs_dir_t*)d)->pos += 1;
+ }
+ }
+ }
+ }
+
+ while (d->id >= d->m.count && d->m.split) {
+ // we split and id is on tail now
+ if (lfs_pair_cmp(d->m.tail, lfs->root) != 0) {
+ d->id -= d->m.count;
+ }
+ int err = lfs_dir_fetch(lfs, &d->m, d->m.tail);
+ if (err) {
+ return err;
+ }
+ }
+ }
+ }
+
+ return state;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_orphaningcommit(lfs_t *lfs, lfs_mdir_t *dir,
+ const struct lfs_mattr *attrs, int attrcount) {
+ // check for any inline files that aren't RAM backed and
+ // forcefully evict them, needed for filesystem consistency
+ for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
+ if (dir != &f->m && lfs_pair_cmp(f->m.pair, dir->pair) == 0 &&
+ f->type == LFS_TYPE_REG && (f->flags & LFS_F_INLINE) &&
+ f->ctz.size > lfs->cfg->cache_size) {
+ int err = lfs_file_outline(lfs, f);
+ if (err) {
+ return err;
+ }
+
+ err = lfs_file_flush(lfs, f);
+ if (err) {
+ return err;
+ }
+ }
+ }
+
+ lfs_block_t lpair[2] = {dir->pair[0], dir->pair[1]};
+ lfs_mdir_t ldir = *dir;
+ lfs_mdir_t pdir;
+ int state = lfs_dir_relocatingcommit(lfs, &ldir, dir->pair,
+ attrs, attrcount, &pdir);
+ if (state < 0) {
+ return state;
+ }
+
+ // update if we're not in mlist, note we may have already been
+ // updated if we are in mlist
+ if (lfs_pair_cmp(dir->pair, lpair) == 0) {
+ *dir = ldir;
+ }
+
+ // commit was successful, but may require other changes in the
+ // filesystem, these would normally be tail recursive, but we have
+ // flattened them here avoid unbounded stack usage
+
+ // need to drop?
+ if (state == LFS_OK_DROPPED) {
+ // steal state
+ int err = lfs_dir_getgstate(lfs, dir, &lfs->gdelta);
+ if (err) {
+ return err;
+ }
+
+ // steal tail, note that this can't create a recursive drop
+ lpair[0] = pdir.pair[0];
+ lpair[1] = pdir.pair[1];
+ lfs_pair_tole32(dir->tail);
+ state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8),
+ dir->tail}),
+ NULL);
+ lfs_pair_fromle32(dir->tail);
+ if (state < 0) {
+ return state;
+ }
+
+ ldir = pdir;
+ }
+
+ // need to relocate?
+ bool orphans = false;
+ while (state == LFS_OK_RELOCATED) {
+ LFS_DEBUG("Relocating {0x%"PRIx32", 0x%"PRIx32"} "
+ "-> {0x%"PRIx32", 0x%"PRIx32"}",
+ lpair[0], lpair[1], ldir.pair[0], ldir.pair[1]);
+ state = 0;
+
+ // update internal root
+ if (lfs_pair_cmp(lpair, lfs->root) == 0) {
+ lfs->root[0] = ldir.pair[0];
+ lfs->root[1] = ldir.pair[1];
+ }
+
+ // update internally tracked dirs
+ for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
+ if (lfs_pair_cmp(lpair, d->m.pair) == 0) {
+ d->m.pair[0] = ldir.pair[0];
+ d->m.pair[1] = ldir.pair[1];
+ }
+
+ if (d->type == LFS_TYPE_DIR &&
+ lfs_pair_cmp(lpair, ((lfs_dir_t*)d)->head) == 0) {
+ ((lfs_dir_t*)d)->head[0] = ldir.pair[0];
+ ((lfs_dir_t*)d)->head[1] = ldir.pair[1];
+ }
+ }
+
+ // find parent
+ lfs_stag_t tag = lfs_fs_parent(lfs, lpair, &pdir);
+ if (tag < 0 && tag != LFS_ERR_NOENT) {
+ return tag;
+ }
+
+ bool hasparent = (tag != LFS_ERR_NOENT);
+ if (tag != LFS_ERR_NOENT) {
+ // note that if we have a parent, we must have a pred, so this will
+ // always create an orphan
+ int err = lfs_fs_preporphans(lfs, +1);
+ if (err) {
+ return err;
+ }
+
+ // fix pending move in this pair? this looks like an optimization but
+ // is in fact _required_ since relocating may outdate the move.
+ uint16_t moveid = 0x3ff;
+ if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
+ moveid = lfs_tag_id(lfs->gstate.tag);
+ LFS_DEBUG("Fixing move while relocating "
+ "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
+ pdir.pair[0], pdir.pair[1], moveid);
+ lfs_fs_prepmove(lfs, 0x3ff, NULL);
+ if (moveid < lfs_tag_id(tag)) {
+ tag -= LFS_MKTAG(0, 1, 0);
+ }
+ }
+
+ lfs_block_t ppair[2] = {pdir.pair[0], pdir.pair[1]};
+ lfs_pair_tole32(ldir.pair);
+ state = lfs_dir_relocatingcommit(lfs, &pdir, ppair, LFS_MKATTRS(
+ {LFS_MKTAG_IF(moveid != 0x3ff,
+ LFS_TYPE_DELETE, moveid, 0), NULL},
+ {tag, ldir.pair}),
+ NULL);
+ lfs_pair_fromle32(ldir.pair);
+ if (state < 0) {
+ return state;
+ }
+
+ if (state == LFS_OK_RELOCATED) {
+ lpair[0] = ppair[0];
+ lpair[1] = ppair[1];
+ ldir = pdir;
+ orphans = true;
+ continue;
+ }
+ }
+
+ // find pred
+ int err = lfs_fs_pred(lfs, lpair, &pdir);
+ if (err && err != LFS_ERR_NOENT) {
+ return err;
+ }
+ LFS_ASSERT(!(hasparent && err == LFS_ERR_NOENT));
+
+ // if we can't find dir, it must be new
+ if (err != LFS_ERR_NOENT) {
+ if (lfs_gstate_hasorphans(&lfs->gstate)) {
+ // next step, clean up orphans
+ err = lfs_fs_preporphans(lfs, -(int8_t)hasparent);
+ if (err) {
+ return err;
+ }
+ }
+
+ // fix pending move in this pair? this looks like an optimization
+ // but is in fact _required_ since relocating may outdate the move.
+ uint16_t moveid = 0x3ff;
+ if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
+ moveid = lfs_tag_id(lfs->gstate.tag);
+ LFS_DEBUG("Fixing move while relocating "
+ "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
+ pdir.pair[0], pdir.pair[1], moveid);
+ lfs_fs_prepmove(lfs, 0x3ff, NULL);
+ }
+
+ // replace bad pair, either we clean up desync, or no desync occured
+ lpair[0] = pdir.pair[0];
+ lpair[1] = pdir.pair[1];
+ lfs_pair_tole32(ldir.pair);
+ state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS(
+ {LFS_MKTAG_IF(moveid != 0x3ff,
+ LFS_TYPE_DELETE, moveid, 0), NULL},
+ {LFS_MKTAG(LFS_TYPE_TAIL + pdir.split, 0x3ff, 8),
+ ldir.pair}),
+ NULL);
+ lfs_pair_fromle32(ldir.pair);
+ if (state < 0) {
+ return state;
+ }
+
+ ldir = pdir;
+ }
+ }
+
+ return orphans ? LFS_OK_ORPHANED : 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
+ const struct lfs_mattr *attrs, int attrcount) {
+ int orphans = lfs_dir_orphaningcommit(lfs, dir, attrs, attrcount);
+ if (orphans < 0) {
+ return orphans;
+ }
+
+ if (orphans) {
+ // make sure we've removed all orphans, this is a noop if there
+ // are none, but if we had nested blocks failures we may have
+ // created some
+ int err = lfs_fs_deorphan(lfs, false);
+ if (err) {
+ return err;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+
+/// Top level directory operations ///
+#ifndef LFS_READONLY
+static int lfs_mkdir_(lfs_t *lfs, const char *path) {
+ // deorphan if we haven't yet, needed at most once after poweron
+ int err = lfs_fs_forceconsistency(lfs);
+ if (err) {
+ return err;
+ }
+
+ struct lfs_mlist cwd;
+ cwd.next = lfs->mlist;
+ uint16_t id;
+ err = lfs_dir_find(lfs, &cwd.m, &path, &id);
+ if (!(err == LFS_ERR_NOENT && lfs_path_islast(path))) {
+ return (err < 0) ? err : LFS_ERR_EXIST;
+ }
+
+ // check that name fits
+ lfs_size_t nlen = lfs_path_namelen(path);
+ if (nlen > lfs->name_max) {
+ return LFS_ERR_NAMETOOLONG;
+ }
+
+ // build up new directory
+ lfs_alloc_ckpoint(lfs);
+ lfs_mdir_t dir;
+ err = lfs_dir_alloc(lfs, &dir);
+ if (err) {
+ return err;
+ }
+
+ // find end of list
+ lfs_mdir_t pred = cwd.m;
+ while (pred.split) {
+ err = lfs_dir_fetch(lfs, &pred, pred.tail);
+ if (err) {
+ return err;
+ }
+ }
+
+ // setup dir
+ lfs_pair_tole32(pred.tail);
+ err = lfs_dir_commit(lfs, &dir, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), pred.tail}));
+ lfs_pair_fromle32(pred.tail);
+ if (err) {
+ return err;
+ }
+
+ // current block not end of list?
+ if (cwd.m.split) {
+ // update tails, this creates a desync
+ err = lfs_fs_preporphans(lfs, +1);
+ if (err) {
+ return err;
+ }
+
+ // it's possible our predecessor has to be relocated, and if
+ // our parent is our predecessor's predecessor, this could have
+ // caused our parent to go out of date, fortunately we can hook
+ // ourselves into littlefs to catch this
+ cwd.type = 0;
+ cwd.id = 0;
+ lfs->mlist = &cwd;
+
+ lfs_pair_tole32(dir.pair);
+ err = lfs_dir_commit(lfs, &pred, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair}));
+ lfs_pair_fromle32(dir.pair);
+ if (err) {
+ lfs->mlist = cwd.next;
+ return err;
+ }
+
+ lfs->mlist = cwd.next;
+ err = lfs_fs_preporphans(lfs, -1);
+ if (err) {
+ return err;
+ }
+ }
+
+ // now insert into our parent block
+ lfs_pair_tole32(dir.pair);
+ err = lfs_dir_commit(lfs, &cwd.m, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL},
+ {LFS_MKTAG(LFS_TYPE_DIR, id, nlen), path},
+ {LFS_MKTAG(LFS_TYPE_DIRSTRUCT, id, 8), dir.pair},
+ {LFS_MKTAG_IF(!cwd.m.split,
+ LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair}));
+ lfs_pair_fromle32(dir.pair);
+ if (err) {
+ return err;
+ }
+
+ return 0;
+}
+#endif
+
+static int lfs_dir_open_(lfs_t *lfs, lfs_dir_t *dir, const char *path) {
+ lfs_stag_t tag = lfs_dir_find(lfs, &dir->m, &path, NULL);
+ if (tag < 0) {
+ return tag;
+ }
+
+ if (lfs_tag_type3(tag) != LFS_TYPE_DIR) {
+ return LFS_ERR_NOTDIR;
+ }
+
+ lfs_block_t pair[2];
+ if (lfs_tag_id(tag) == 0x3ff) {
+ // handle root dir separately
+ pair[0] = lfs->root[0];
+ pair[1] = lfs->root[1];
+ } else {
+ // get dir pair from parent
+ lfs_stag_t res = lfs_dir_get(lfs, &dir->m, LFS_MKTAG(0x700, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
+ if (res < 0) {
+ return res;
+ }
+ lfs_pair_fromle32(pair);
+ }
+
+ // fetch first pair
+ int err = lfs_dir_fetch(lfs, &dir->m, pair);
+ if (err) {
+ return err;
+ }
+
+ // setup entry
+ dir->head[0] = dir->m.pair[0];
+ dir->head[1] = dir->m.pair[1];
+ dir->id = 0;
+ dir->pos = 0;
+
+ // add to list of mdirs
+ dir->type = LFS_TYPE_DIR;
+ lfs_mlist_append(lfs, (struct lfs_mlist *)dir);
+
+ return 0;
+}
+
+static int lfs_dir_close_(lfs_t *lfs, lfs_dir_t *dir) {
+ // remove from list of mdirs
+ lfs_mlist_remove(lfs, (struct lfs_mlist *)dir);
+
+ return 0;
+}
+
+static int lfs_dir_read_(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) {
+ memset(info, 0, sizeof(*info));
+
+ // special offset for '.' and '..'
+ if (dir->pos == 0) {
+ info->type = LFS_TYPE_DIR;
+ strcpy(info->name, ".");
+ dir->pos += 1;
+ return true;
+ } else if (dir->pos == 1) {
+ info->type = LFS_TYPE_DIR;
+ strcpy(info->name, "..");
+ dir->pos += 1;
+ return true;
+ }
+
+ while (true) {
+ if (dir->id == dir->m.count) {
+ if (!dir->m.split) {
+ return false;
+ }
+
+ int err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
+ if (err) {
+ return err;
+ }
+
+ dir->id = 0;
+ }
+
+ int err = lfs_dir_getinfo(lfs, &dir->m, dir->id, info);
+ if (err && err != LFS_ERR_NOENT) {
+ return err;
+ }
+
+ dir->id += 1;
+ if (err != LFS_ERR_NOENT) {
+ break;
+ }
+ }
+
+ dir->pos += 1;
+ return true;
+}
+
+static int lfs_dir_seek_(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
+ // simply walk from head dir
+ int err = lfs_dir_rewind_(lfs, dir);
+ if (err) {
+ return err;
+ }
+
+ // first two for ./..
+ dir->pos = lfs_min(2, off);
+ off -= dir->pos;
+
+ // skip superblock entry
+ dir->id = (off > 0 && lfs_pair_cmp(dir->head, lfs->root) == 0);
+
+ while (off > 0) {
+ if (dir->id == dir->m.count) {
+ if (!dir->m.split) {
+ return LFS_ERR_INVAL;
+ }
+
+ err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
+ if (err) {
+ return err;
+ }
+
+ dir->id = 0;
+ }
+
+ int diff = lfs_min(dir->m.count - dir->id, off);
+ dir->id += diff;
+ dir->pos += diff;
+ off -= diff;
+ }
+
+ return 0;
+}
+
+static lfs_soff_t lfs_dir_tell_(lfs_t *lfs, lfs_dir_t *dir) {
+ (void)lfs;
+ return dir->pos;
+}
+
+static int lfs_dir_rewind_(lfs_t *lfs, lfs_dir_t *dir) {
+ // reload the head dir
+ int err = lfs_dir_fetch(lfs, &dir->m, dir->head);
+ if (err) {
+ return err;
+ }
+
+ dir->id = 0;
+ dir->pos = 0;
+ return 0;
+}
+
+
+/// File index list operations ///
+static int lfs_ctz_index(lfs_t *lfs, lfs_off_t *off) {
+ lfs_off_t size = *off;
+ lfs_off_t b = lfs->cfg->block_size - 2*4;
+ lfs_off_t i = size / b;
+ if (i == 0) {
+ return 0;
+ }
+
+ i = (size - 4*(lfs_popc(i-1)+2)) / b;
+ *off = size - b*i - 4*lfs_popc(i);
+ return i;
+}
+
+static int lfs_ctz_find(lfs_t *lfs,
+ const lfs_cache_t *pcache, lfs_cache_t *rcache,
+ lfs_block_t head, lfs_size_t size,
+ lfs_size_t pos, lfs_block_t *block, lfs_off_t *off) {
+ if (size == 0) {
+ *block = LFS_BLOCK_NULL;
+ *off = 0;
+ return 0;
+ }
+
+ lfs_off_t current = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
+ lfs_off_t target = lfs_ctz_index(lfs, &pos);
+
+ while (current > target) {
+ lfs_size_t skip = lfs_min(
+ lfs_npw2(current-target+1) - 1,
+ lfs_ctz(current));
+
+ int err = lfs_bd_read(lfs,
+ pcache, rcache, sizeof(head),
+ head, 4*skip, &head, sizeof(head));
+ head = lfs_fromle32(head);
+ if (err) {
+ return err;
+ }
+
+ current -= 1 << skip;
+ }
+
+ *block = head;
+ *off = pos;
+ return 0;
+}
+
+#ifndef LFS_READONLY
+static int lfs_ctz_extend(lfs_t *lfs,
+ lfs_cache_t *pcache, lfs_cache_t *rcache,
+ lfs_block_t head, lfs_size_t size,
+ lfs_block_t *block, lfs_off_t *off) {
+ while (true) {
+ // go ahead and grab a block
+ lfs_block_t nblock;
+ int err = lfs_alloc(lfs, &nblock);
+ if (err) {
+ return err;
+ }
+
+ {
+ err = lfs_bd_erase(lfs, nblock);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+
+ if (size == 0) {
+ *block = nblock;
+ *off = 0;
+ return 0;
+ }
+
+ lfs_size_t noff = size - 1;
+ lfs_off_t index = lfs_ctz_index(lfs, &noff);
+ noff = noff + 1;
+
+ // just copy out the last block if it is incomplete
+ if (noff != lfs->cfg->block_size) {
+ for (lfs_off_t i = 0; i < noff; i++) {
+ uint8_t data;
+ err = lfs_bd_read(lfs,
+ NULL, rcache, noff-i,
+ head, i, &data, 1);
+ if (err) {
+ return err;
+ }
+
+ err = lfs_bd_prog(lfs,
+ pcache, rcache, true,
+ nblock, i, &data, 1);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+ }
+
+ *block = nblock;
+ *off = noff;
+ return 0;
+ }
+
+ // append block
+ index += 1;
+ lfs_size_t skips = lfs_ctz(index) + 1;
+ lfs_block_t nhead = head;
+ for (lfs_off_t i = 0; i < skips; i++) {
+ nhead = lfs_tole32(nhead);
+ err = lfs_bd_prog(lfs, pcache, rcache, true,
+ nblock, 4*i, &nhead, 4);
+ nhead = lfs_fromle32(nhead);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+
+ if (i != skips-1) {
+ err = lfs_bd_read(lfs,
+ NULL, rcache, sizeof(nhead),
+ nhead, 4*i, &nhead, sizeof(nhead));
+ nhead = lfs_fromle32(nhead);
+ if (err) {
+ return err;
+ }
+ }
+ }
+
+ *block = nblock;
+ *off = 4*skips;
+ return 0;
+ }
+
+relocate:
+ LFS_DEBUG("Bad block at 0x%"PRIx32, nblock);
+
+ // just clear cache and try a new block
+ lfs_cache_drop(lfs, pcache);
+ }
+}
+#endif
+
+static int lfs_ctz_traverse(lfs_t *lfs,
+ const lfs_cache_t *pcache, lfs_cache_t *rcache,
+ lfs_block_t head, lfs_size_t size,
+ int (*cb)(void*, lfs_block_t), void *data) {
+ if (size == 0) {
+ return 0;
+ }
+
+ lfs_off_t index = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
+
+ while (true) {
+ int err = cb(data, head);
+ if (err) {
+ return err;
+ }
+
+ if (index == 0) {
+ return 0;
+ }
+
+ lfs_block_t heads[2];
+ int count = 2 - (index & 1);
+ err = lfs_bd_read(lfs,
+ pcache, rcache, count*sizeof(head),
+ head, 0, &heads, count*sizeof(head));
+ heads[0] = lfs_fromle32(heads[0]);
+ heads[1] = lfs_fromle32(heads[1]);
+ if (err) {
+ return err;
+ }
+
+ for (int i = 0; i < count-1; i++) {
+ err = cb(data, heads[i]);
+ if (err) {
+ return err;
+ }
+ }
+
+ head = heads[count-1];
+ index -= count;
+ }
+}
+
+
+/// Top level file operations ///
+static int lfs_file_opencfg_(lfs_t *lfs, lfs_file_t *file,
+ const char *path, int flags,
+ const struct lfs_file_config *cfg) {
+#ifndef LFS_READONLY
+ // deorphan if we haven't yet, needed at most once after poweron
+ if ((flags & LFS_O_WRONLY) == LFS_O_WRONLY) {
+ int err = lfs_fs_forceconsistency(lfs);
+ if (err) {
+ return err;
+ }
+ }
+#else
+ LFS_ASSERT((flags & LFS_O_RDONLY) == LFS_O_RDONLY);
+#endif
+
+ // setup simple file details
+ int err;
+ file->cfg = cfg;
+ file->flags = flags;
+ file->pos = 0;
+ file->off = 0;
+ file->cache.buffer = NULL;
+
+ // allocate entry for file if it doesn't exist
+ lfs_stag_t tag = lfs_dir_find(lfs, &file->m, &path, &file->id);
+ if (tag < 0 && !(tag == LFS_ERR_NOENT && lfs_path_islast(path))) {
+ err = tag;
+ goto cleanup;
+ }
+
+ // get id, add to list of mdirs to catch update changes
+ file->type = LFS_TYPE_REG;
+ lfs_mlist_append(lfs, (struct lfs_mlist *)file);
+
+#ifdef LFS_READONLY
+ if (tag == LFS_ERR_NOENT) {
+ err = LFS_ERR_NOENT;
+ goto cleanup;
+#else
+ if (tag == LFS_ERR_NOENT) {
+ if (!(flags & LFS_O_CREAT)) {
+ err = LFS_ERR_NOENT;
+ goto cleanup;
+ }
+
+ // don't allow trailing slashes
+ if (lfs_path_isdir(path)) {
+ err = LFS_ERR_NOTDIR;
+ goto cleanup;
+ }
+
+ // check that name fits
+ lfs_size_t nlen = lfs_path_namelen(path);
+ if (nlen > lfs->name_max) {
+ err = LFS_ERR_NAMETOOLONG;
+ goto cleanup;
+ }
+
+ // get next slot and create entry to remember name
+ err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_CREATE, file->id, 0), NULL},
+ {LFS_MKTAG(LFS_TYPE_REG, file->id, nlen), path},
+ {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0), NULL}));
+
+ // it may happen that the file name doesn't fit in the metadata blocks, e.g., a 256 byte file name will
+ // not fit in a 128 byte block.
+ err = (err == LFS_ERR_NOSPC) ? LFS_ERR_NAMETOOLONG : err;
+ if (err) {
+ goto cleanup;
+ }
+
+ tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, 0);
+ } else if (flags & LFS_O_EXCL) {
+ err = LFS_ERR_EXIST;
+ goto cleanup;
+#endif
+ } else if (lfs_tag_type3(tag) != LFS_TYPE_REG) {
+ err = LFS_ERR_ISDIR;
+ goto cleanup;
+#ifndef LFS_READONLY
+ } else if (flags & LFS_O_TRUNC) {
+ // truncate if requested
+ tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0);
+ file->flags |= LFS_F_DIRTY;
+#endif
+ } else {
+ // try to load what's on disk, if it's inlined we'll fix it later
+ tag = lfs_dir_get(lfs, &file->m, LFS_MKTAG(0x700, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_STRUCT, file->id, 8), &file->ctz);
+ if (tag < 0) {
+ err = tag;
+ goto cleanup;
+ }
+ lfs_ctz_fromle32(&file->ctz);
+ }
+
+ // fetch attrs
+ for (unsigned i = 0; i < file->cfg->attr_count; i++) {
+ // if opened for read / read-write operations
+ if ((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY) {
+ lfs_stag_t res = lfs_dir_get(lfs, &file->m,
+ LFS_MKTAG(0x7ff, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_USERATTR + file->cfg->attrs[i].type,
+ file->id, file->cfg->attrs[i].size),
+ file->cfg->attrs[i].buffer);
+ if (res < 0 && res != LFS_ERR_NOENT) {
+ err = res;
+ goto cleanup;
+ }
+ }
+
+#ifndef LFS_READONLY
+ // if opened for write / read-write operations
+ if ((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY) {
+ if (file->cfg->attrs[i].size > lfs->attr_max) {
+ err = LFS_ERR_NOSPC;
+ goto cleanup;
+ }
+
+ file->flags |= LFS_F_DIRTY;
+ }
+#endif
+ }
+
+ // allocate buffer if needed
+ if (file->cfg->buffer) {
+ file->cache.buffer = file->cfg->buffer;
+ } else {
+ file->cache.buffer = lfs_malloc(lfs->cfg->cache_size);
+ if (!file->cache.buffer) {
+ err = LFS_ERR_NOMEM;
+ goto cleanup;
+ }
+ }
+
+ // zero to avoid information leak
+ lfs_cache_zero(lfs, &file->cache);
+
+ if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) {
+ // load inline files
+ file->ctz.head = LFS_BLOCK_INLINE;
+ file->ctz.size = lfs_tag_size(tag);
+ file->flags |= LFS_F_INLINE;
+ file->cache.block = file->ctz.head;
+ file->cache.off = 0;
+ file->cache.size = lfs->cfg->cache_size;
+
+ // don't always read (may be new/trunc file)
+ if (file->ctz.size > 0) {
+ lfs_stag_t res = lfs_dir_get(lfs, &file->m,
+ LFS_MKTAG(0x700, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_STRUCT, file->id,
+ lfs_min(file->cache.size, 0x3fe)),
+ file->cache.buffer);
+ if (res < 0) {
+ err = res;
+ goto cleanup;
+ }
+ }
+ }
+
+ return 0;
+
+cleanup:
+ // clean up lingering resources
+#ifndef LFS_READONLY
+ file->flags |= LFS_F_ERRED;
+#endif
+ lfs_file_close_(lfs, file);
+ return err;
+}
+
+#ifndef LFS_NO_MALLOC
+static int lfs_file_open_(lfs_t *lfs, lfs_file_t *file,
+ const char *path, int flags) {
+ static const struct lfs_file_config defaults = {0};
+ int err = lfs_file_opencfg_(lfs, file, path, flags, &defaults);
+ return err;
+}
+#endif
+
+static int lfs_file_close_(lfs_t *lfs, lfs_file_t *file) {
+#ifndef LFS_READONLY
+ int err = lfs_file_sync_(lfs, file);
+#else
+ int err = 0;
+#endif
+
+ // remove from list of mdirs
+ lfs_mlist_remove(lfs, (struct lfs_mlist*)file);
+
+ // clean up memory
+ if (!file->cfg->buffer) {
+ lfs_free(file->cache.buffer);
+ }
+
+ return err;
+}
+
+
+#ifndef LFS_READONLY
+static int lfs_file_relocate(lfs_t *lfs, lfs_file_t *file) {
+ while (true) {
+ // just relocate what exists into new block
+ lfs_block_t nblock;
+ int err = lfs_alloc(lfs, &nblock);
+ if (err) {
+ return err;
+ }
+
+ err = lfs_bd_erase(lfs, nblock);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+
+ // either read from dirty cache or disk
+ for (lfs_off_t i = 0; i < file->off; i++) {
+ uint8_t data;
+ if (file->flags & LFS_F_INLINE) {
+ err = lfs_dir_getread(lfs, &file->m,
+ // note we evict inline files before they can be dirty
+ NULL, &file->cache, file->off-i,
+ LFS_MKTAG(0xfff, 0x1ff, 0),
+ LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0),
+ i, &data, 1);
+ if (err) {
+ return err;
+ }
+ } else {
+ err = lfs_bd_read(lfs,
+ &file->cache, &lfs->rcache, file->off-i,
+ file->block, i, &data, 1);
+ if (err) {
+ return err;
+ }
+ }
+
+ err = lfs_bd_prog(lfs,
+ &lfs->pcache, &lfs->rcache, true,
+ nblock, i, &data, 1);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+ }
+
+ // copy over new state of file
+ memcpy(file->cache.buffer, lfs->pcache.buffer, lfs->cfg->cache_size);
+ file->cache.block = lfs->pcache.block;
+ file->cache.off = lfs->pcache.off;
+ file->cache.size = lfs->pcache.size;
+ lfs_cache_zero(lfs, &lfs->pcache);
+
+ file->block = nblock;
+ file->flags |= LFS_F_WRITING;
+ return 0;
+
+relocate:
+ LFS_DEBUG("Bad block at 0x%"PRIx32, nblock);
+
+ // just clear cache and try a new block
+ lfs_cache_drop(lfs, &lfs->pcache);
+ }
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file) {
+ file->off = file->pos;
+ lfs_alloc_ckpoint(lfs);
+ int err = lfs_file_relocate(lfs, file);
+ if (err) {
+ return err;
+ }
+
+ file->flags &= ~LFS_F_INLINE;
+ return 0;
+}
+#endif
+
+static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file) {
+ if (file->flags & LFS_F_READING) {
+ if (!(file->flags & LFS_F_INLINE)) {
+ lfs_cache_drop(lfs, &file->cache);
+ }
+ file->flags &= ~LFS_F_READING;
+ }
+
+#ifndef LFS_READONLY
+ if (file->flags & LFS_F_WRITING) {
+ lfs_off_t pos = file->pos;
+
+ if (!(file->flags & LFS_F_INLINE)) {
+ // copy over anything after current branch
+ lfs_file_t orig = {
+ .ctz.head = file->ctz.head,
+ .ctz.size = file->ctz.size,
+ .flags = LFS_O_RDONLY,
+ .pos = file->pos,
+ .cache = lfs->rcache,
+ };
+ lfs_cache_drop(lfs, &lfs->rcache);
+
+ while (file->pos < file->ctz.size) {
+ // copy over a byte at a time, leave it up to caching
+ // to make this efficient
+ uint8_t data;
+ lfs_ssize_t res = lfs_file_flushedread(lfs, &orig, &data, 1);
+ if (res < 0) {
+ return res;
+ }
+
+ res = lfs_file_flushedwrite(lfs, file, &data, 1);
+ if (res < 0) {
+ return res;
+ }
+
+ // keep our reference to the rcache in sync
+ if (lfs->rcache.block != LFS_BLOCK_NULL) {
+ lfs_cache_drop(lfs, &orig.cache);
+ lfs_cache_drop(lfs, &lfs->rcache);
+ }
+ }
+
+ // write out what we have
+ while (true) {
+ int err = lfs_bd_flush(lfs, &file->cache, &lfs->rcache, true);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+
+ break;
+
+relocate:
+ LFS_DEBUG("Bad block at 0x%"PRIx32, file->block);
+ err = lfs_file_relocate(lfs, file);
+ if (err) {
+ return err;
+ }
+ }
+ } else {
+ file->pos = lfs_max(file->pos, file->ctz.size);
+ }
+
+ // actual file updates
+ file->ctz.head = file->block;
+ file->ctz.size = file->pos;
+ file->flags &= ~LFS_F_WRITING;
+ file->flags |= LFS_F_DIRTY;
+
+ file->pos = pos;
+ }
+#endif
+
+ return 0;
+}
+
+#ifndef LFS_READONLY
+static int lfs_file_sync_(lfs_t *lfs, lfs_file_t *file) {
+ if (file->flags & LFS_F_ERRED) {
+ // it's not safe to do anything if our file errored
+ return 0;
+ }
+
+ int err = lfs_file_flush(lfs, file);
+ if (err) {
+ file->flags |= LFS_F_ERRED;
+ return err;
+ }
+
+
+ if ((file->flags & LFS_F_DIRTY) &&
+ !lfs_pair_isnull(file->m.pair)) {
+ // before we commit metadata, we need sync the disk to make sure
+ // data writes don't complete after metadata writes
+ if (!(file->flags & LFS_F_INLINE)) {
+ err = lfs_bd_sync(lfs, &lfs->pcache, &lfs->rcache, false);
+ if (err) {
+ return err;
+ }
+ }
+
+ // update dir entry
+ uint16_t type;
+ const void *buffer;
+ lfs_size_t size;
+ struct lfs_ctz ctz;
+ if (file->flags & LFS_F_INLINE) {
+ // inline the whole file
+ type = LFS_TYPE_INLINESTRUCT;
+ buffer = file->cache.buffer;
+ size = file->ctz.size;
+ } else {
+ // update the ctz reference
+ type = LFS_TYPE_CTZSTRUCT;
+ // copy ctz so alloc will work during a relocate
+ ctz = file->ctz;
+ lfs_ctz_tole32(&ctz);
+ buffer = &ctz;
+ size = sizeof(ctz);
+ }
+
+ // commit file data and attributes
+ err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS(
+ {LFS_MKTAG(type, file->id, size), buffer},
+ {LFS_MKTAG(LFS_FROM_USERATTRS, file->id,
+ file->cfg->attr_count), file->cfg->attrs}));
+ if (err) {
+ file->flags |= LFS_F_ERRED;
+ return err;
+ }
+
+ file->flags &= ~LFS_F_DIRTY;
+ }
+
+ return 0;
+}
+#endif
+
+static lfs_ssize_t lfs_file_flushedread(lfs_t *lfs, lfs_file_t *file,
+ void *buffer, lfs_size_t size) {
+ uint8_t *data = buffer;
+ lfs_size_t nsize = size;
+
+ if (file->pos >= file->ctz.size) {
+ // eof if past end
+ return 0;
+ }
+
+ size = lfs_min(size, file->ctz.size - file->pos);
+ nsize = size;
+
+ while (nsize > 0) {
+ // check if we need a new block
+ if (!(file->flags & LFS_F_READING) ||
+ file->off == lfs->cfg->block_size) {
+ if (!(file->flags & LFS_F_INLINE)) {
+ int err = lfs_ctz_find(lfs, NULL, &file->cache,
+ file->ctz.head, file->ctz.size,
+ file->pos, &file->block, &file->off);
+ if (err) {
+ return err;
+ }
+ } else {
+ file->block = LFS_BLOCK_INLINE;
+ file->off = file->pos;
+ }
+
+ file->flags |= LFS_F_READING;
+ }
+
+ // read as much as we can in current block
+ lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
+ if (file->flags & LFS_F_INLINE) {
+ int err = lfs_dir_getread(lfs, &file->m,
+ NULL, &file->cache, lfs->cfg->block_size,
+ LFS_MKTAG(0xfff, 0x1ff, 0),
+ LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0),
+ file->off, data, diff);
+ if (err) {
+ return err;
+ }
+ } else {
+ int err = lfs_bd_read(lfs,
+ NULL, &file->cache, lfs->cfg->block_size,
+ file->block, file->off, data, diff);
+ if (err) {
+ return err;
+ }
+ }
+
+ file->pos += diff;
+ file->off += diff;
+ data += diff;
+ nsize -= diff;
+ }
+
+ return size;
+}
+
+static lfs_ssize_t lfs_file_read_(lfs_t *lfs, lfs_file_t *file,
+ void *buffer, lfs_size_t size) {
+ LFS_ASSERT((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY);
+
+#ifndef LFS_READONLY
+ if (file->flags & LFS_F_WRITING) {
+ // flush out any writes
+ int err = lfs_file_flush(lfs, file);
+ if (err) {
+ return err;
+ }
+ }
+#endif
+
+ return lfs_file_flushedread(lfs, file, buffer, size);
+}
+
+
+#ifndef LFS_READONLY
+static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
+ const void *buffer, lfs_size_t size) {
+ const uint8_t *data = buffer;
+ lfs_size_t nsize = size;
+
+ if ((file->flags & LFS_F_INLINE) &&
+ lfs_max(file->pos+nsize, file->ctz.size) > lfs->inline_max) {
+ // inline file doesn't fit anymore
+ int err = lfs_file_outline(lfs, file);
+ if (err) {
+ file->flags |= LFS_F_ERRED;
+ return err;
+ }
+ }
+
+ while (nsize > 0) {
+ // check if we need a new block
+ if (!(file->flags & LFS_F_WRITING) ||
+ file->off == lfs->cfg->block_size) {
+ if (!(file->flags & LFS_F_INLINE)) {
+ if (!(file->flags & LFS_F_WRITING) && file->pos > 0) {
+ // find out which block we're extending from
+ int err = lfs_ctz_find(lfs, NULL, &file->cache,
+ file->ctz.head, file->ctz.size,
+ file->pos-1, &file->block, &(lfs_off_t){0});
+ if (err) {
+ file->flags |= LFS_F_ERRED;
+ return err;
+ }
+
+ // mark cache as dirty since we may have read data into it
+ lfs_cache_zero(lfs, &file->cache);
+ }
+
+ // extend file with new blocks
+ lfs_alloc_ckpoint(lfs);
+ int err = lfs_ctz_extend(lfs, &file->cache, &lfs->rcache,
+ file->block, file->pos,
+ &file->block, &file->off);
+ if (err) {
+ file->flags |= LFS_F_ERRED;
+ return err;
+ }
+ } else {
+ file->block = LFS_BLOCK_INLINE;
+ file->off = file->pos;
+ }
+
+ file->flags |= LFS_F_WRITING;
+ }
+
+ // program as much as we can in current block
+ lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
+ while (true) {
+ int err = lfs_bd_prog(lfs, &file->cache, &lfs->rcache, true,
+ file->block, file->off, data, diff);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ file->flags |= LFS_F_ERRED;
+ return err;
+ }
+
+ break;
+relocate:
+ err = lfs_file_relocate(lfs, file);
+ if (err) {
+ file->flags |= LFS_F_ERRED;
+ return err;
+ }
+ }
+
+ file->pos += diff;
+ file->off += diff;
+ data += diff;
+ nsize -= diff;
+
+ lfs_alloc_ckpoint(lfs);
+ }
+
+ return size;
+}
+
+static lfs_ssize_t lfs_file_write_(lfs_t *lfs, lfs_file_t *file,
+ const void *buffer, lfs_size_t size) {
+ LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY);
+
+ if (file->flags & LFS_F_READING) {
+ // drop any reads
+ int err = lfs_file_flush(lfs, file);
+ if (err) {
+ return err;
+ }
+ }
+
+ if ((file->flags & LFS_O_APPEND) && file->pos < file->ctz.size) {
+ file->pos = file->ctz.size;
+ }
+
+ if (file->pos + size > lfs->file_max) {
+ // Larger than file limit?
+ return LFS_ERR_FBIG;
+ }
+
+ if (!(file->flags & LFS_F_WRITING) && file->pos > file->ctz.size) {
+ // fill with zeros
+ lfs_off_t pos = file->pos;
+ file->pos = file->ctz.size;
+
+ while (file->pos < pos) {
+ lfs_ssize_t res = lfs_file_flushedwrite(lfs, file, &(uint8_t){0}, 1);
+ if (res < 0) {
+ return res;
+ }
+ }
+ }
+
+ lfs_ssize_t nsize = lfs_file_flushedwrite(lfs, file, buffer, size);
+ if (nsize < 0) {
+ return nsize;
+ }
+
+ file->flags &= ~LFS_F_ERRED;
+ return nsize;
+}
+#endif
+
+static lfs_soff_t lfs_file_seek_(lfs_t *lfs, lfs_file_t *file,
+ lfs_soff_t off, int whence) {
+ // find new pos
+ //
+ // fortunately for us, littlefs is limited to 31-bit file sizes, so we
+ // don't have to worry too much about integer overflow
+ lfs_off_t npos = file->pos;
+ if (whence == LFS_SEEK_SET) {
+ npos = off;
+ } else if (whence == LFS_SEEK_CUR) {
+ npos = file->pos + (lfs_off_t)off;
+ } else if (whence == LFS_SEEK_END) {
+ npos = (lfs_off_t)lfs_file_size_(lfs, file) + (lfs_off_t)off;
+ }
+
+ if (npos > lfs->file_max) {
+ // file position out of range
+ return LFS_ERR_INVAL;
+ }
+
+ if (file->pos == npos) {
+ // noop - position has not changed
+ return npos;
+ }
+
+ // if we're only reading and our new offset is still in the file's cache
+ // we can avoid flushing and needing to reread the data
+ if ((file->flags & LFS_F_READING)
+ && file->off != lfs->cfg->block_size) {
+ int oindex = lfs_ctz_index(lfs, &(lfs_off_t){file->pos});
+ lfs_off_t noff = npos;
+ int nindex = lfs_ctz_index(lfs, &noff);
+ if (oindex == nindex
+ && noff >= file->cache.off
+ && noff < file->cache.off + file->cache.size) {
+ file->pos = npos;
+ file->off = noff;
+ return npos;
+ }
+ }
+
+ // write out everything beforehand, may be noop if rdonly
+ int err = lfs_file_flush(lfs, file);
+ if (err) {
+ return err;
+ }
+
+ // update pos
+ file->pos = npos;
+ return npos;
+}
+
+#ifndef LFS_READONLY
+static int lfs_file_truncate_(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
+ LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY);
+
+ if (size > LFS_FILE_MAX) {
+ return LFS_ERR_INVAL;
+ }
+
+ lfs_off_t pos = file->pos;
+ lfs_off_t oldsize = lfs_file_size_(lfs, file);
+ if (size < oldsize) {
+ // revert to inline file?
+ if (size <= lfs->inline_max) {
+ // flush+seek to head
+ lfs_soff_t res = lfs_file_seek_(lfs, file, 0, LFS_SEEK_SET);
+ if (res < 0) {
+ return (int)res;
+ }
+
+ // read our data into rcache temporarily
+ lfs_cache_drop(lfs, &lfs->rcache);
+ res = lfs_file_flushedread(lfs, file,
+ lfs->rcache.buffer, size);
+ if (res < 0) {
+ return (int)res;
+ }
+
+ file->ctz.head = LFS_BLOCK_INLINE;
+ file->ctz.size = size;
+ file->flags |= LFS_F_DIRTY | LFS_F_READING | LFS_F_INLINE;
+ file->cache.block = file->ctz.head;
+ file->cache.off = 0;
+ file->cache.size = lfs->cfg->cache_size;
+ memcpy(file->cache.buffer, lfs->rcache.buffer, size);
+
+ } else {
+ // need to flush since directly changing metadata
+ int err = lfs_file_flush(lfs, file);
+ if (err) {
+ return err;
+ }
+
+ // lookup new head in ctz skip list
+ err = lfs_ctz_find(lfs, NULL, &file->cache,
+ file->ctz.head, file->ctz.size,
+ size-1, &file->block, &(lfs_off_t){0});
+ if (err) {
+ return err;
+ }
+
+ // need to set pos/block/off consistently so seeking back to
+ // the old position does not get confused
+ file->pos = size;
+ file->ctz.head = file->block;
+ file->ctz.size = size;
+ file->flags |= LFS_F_DIRTY | LFS_F_READING;
+ }
+ } else if (size > oldsize) {
+ // flush+seek if not already at end
+ lfs_soff_t res = lfs_file_seek_(lfs, file, 0, LFS_SEEK_END);
+ if (res < 0) {
+ return (int)res;
+ }
+
+ // fill with zeros
+ while (file->pos < size) {
+ res = lfs_file_write_(lfs, file, &(uint8_t){0}, 1);
+ if (res < 0) {
+ return (int)res;
+ }
+ }
+ }
+
+ // restore pos
+ lfs_soff_t res = lfs_file_seek_(lfs, file, pos, LFS_SEEK_SET);
+ if (res < 0) {
+ return (int)res;
+ }
+
+ return 0;
+}
+#endif
+
+static lfs_soff_t lfs_file_tell_(lfs_t *lfs, lfs_file_t *file) {
+ (void)lfs;
+ return file->pos;
+}
+
+static int lfs_file_rewind_(lfs_t *lfs, lfs_file_t *file) {
+ lfs_soff_t res = lfs_file_seek_(lfs, file, 0, LFS_SEEK_SET);
+ if (res < 0) {
+ return (int)res;
+ }
+
+ return 0;
+}
+
+static lfs_soff_t lfs_file_size_(lfs_t *lfs, lfs_file_t *file) {
+ (void)lfs;
+
+#ifndef LFS_READONLY
+ if (file->flags & LFS_F_WRITING) {
+ return lfs_max(file->pos, file->ctz.size);
+ }
+#endif
+
+ return file->ctz.size;
+}
+
+
+/// General fs operations ///
+static int lfs_stat_(lfs_t *lfs, const char *path, struct lfs_info *info) {
+ lfs_mdir_t cwd;
+ lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
+ if (tag < 0) {
+ return (int)tag;
+ }
+
+ // only allow trailing slashes on dirs
+ if (strchr(path, '/') != NULL
+ && lfs_tag_type3(tag) != LFS_TYPE_DIR) {
+ return LFS_ERR_NOTDIR;
+ }
+
+ return lfs_dir_getinfo(lfs, &cwd, lfs_tag_id(tag), info);
+}
+
+#ifndef LFS_READONLY
+static int lfs_remove_(lfs_t *lfs, const char *path) {
+ // deorphan if we haven't yet, needed at most once after poweron
+ int err = lfs_fs_forceconsistency(lfs);
+ if (err) {
+ return err;
+ }
+
+ lfs_mdir_t cwd;
+ lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
+ if (tag < 0 || lfs_tag_id(tag) == 0x3ff) {
+ return (tag < 0) ? (int)tag : LFS_ERR_INVAL;
+ }
+
+ struct lfs_mlist dir;
+ dir.next = lfs->mlist;
+ if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
+ // must be empty before removal
+ lfs_block_t pair[2];
+ lfs_stag_t res = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x700, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
+ if (res < 0) {
+ return (int)res;
+ }
+ lfs_pair_fromle32(pair);
+
+ err = lfs_dir_fetch(lfs, &dir.m, pair);
+ if (err) {
+ return err;
+ }
+
+ if (dir.m.count > 0 || dir.m.split) {
+ return LFS_ERR_NOTEMPTY;
+ }
+
+ // mark fs as orphaned
+ err = lfs_fs_preporphans(lfs, +1);
+ if (err) {
+ return err;
+ }
+
+ // I know it's crazy but yes, dir can be changed by our parent's
+ // commit (if predecessor is child)
+ dir.type = 0;
+ dir.id = 0;
+ lfs->mlist = &dir;
+ }
+
+ // delete the entry
+ err = lfs_dir_commit(lfs, &cwd, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(tag), 0), NULL}));
+ if (err) {
+ lfs->mlist = dir.next;
+ return err;
+ }
+
+ lfs->mlist = dir.next;
+ if (lfs_gstate_hasorphans(&lfs->gstate)) {
+ LFS_ASSERT(lfs_tag_type3(tag) == LFS_TYPE_DIR);
+
+ // fix orphan
+ err = lfs_fs_preporphans(lfs, -1);
+ if (err) {
+ return err;
+ }
+
+ err = lfs_fs_pred(lfs, dir.m.pair, &cwd);
+ if (err) {
+ return err;
+ }
+
+ err = lfs_dir_drop(lfs, &cwd, &dir.m);
+ if (err) {
+ return err;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_rename_(lfs_t *lfs, const char *oldpath, const char *newpath) {
+ // deorphan if we haven't yet, needed at most once after poweron
+ int err = lfs_fs_forceconsistency(lfs);
+ if (err) {
+ return err;
+ }
+
+ // find old entry
+ lfs_mdir_t oldcwd;
+ lfs_stag_t oldtag = lfs_dir_find(lfs, &oldcwd, &oldpath, NULL);
+ if (oldtag < 0 || lfs_tag_id(oldtag) == 0x3ff) {
+ return (oldtag < 0) ? (int)oldtag : LFS_ERR_INVAL;
+ }
+
+ // find new entry
+ lfs_mdir_t newcwd;
+ uint16_t newid;
+ lfs_stag_t prevtag = lfs_dir_find(lfs, &newcwd, &newpath, &newid);
+ if ((prevtag < 0 || lfs_tag_id(prevtag) == 0x3ff) &&
+ !(prevtag == LFS_ERR_NOENT && lfs_path_islast(newpath))) {
+ return (prevtag < 0) ? (int)prevtag : LFS_ERR_INVAL;
+ }
+
+ // if we're in the same pair there's a few special cases...
+ bool samepair = (lfs_pair_cmp(oldcwd.pair, newcwd.pair) == 0);
+ uint16_t newoldid = lfs_tag_id(oldtag);
+
+ struct lfs_mlist prevdir;
+ prevdir.next = lfs->mlist;
+ if (prevtag == LFS_ERR_NOENT) {
+ // if we're a file, don't allow trailing slashes
+ if (lfs_path_isdir(newpath)
+ && lfs_tag_type3(oldtag) != LFS_TYPE_DIR) {
+ return LFS_ERR_NOTDIR;
+ }
+
+ // check that name fits
+ lfs_size_t nlen = lfs_path_namelen(newpath);
+ if (nlen > lfs->name_max) {
+ return LFS_ERR_NAMETOOLONG;
+ }
+
+ // there is a small chance we are being renamed in the same
+ // directory/ to an id less than our old id, the global update
+ // to handle this is a bit messy
+ if (samepair && newid <= newoldid) {
+ newoldid += 1;
+ }
+ } else if (lfs_tag_type3(prevtag) != lfs_tag_type3(oldtag)) {
+ return (lfs_tag_type3(prevtag) == LFS_TYPE_DIR)
+ ? LFS_ERR_ISDIR
+ : LFS_ERR_NOTDIR;
+ } else if (samepair && newid == newoldid) {
+ // we're renaming to ourselves??
+ return 0;
+ } else if (lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
+ // must be empty before removal
+ lfs_block_t prevpair[2];
+ lfs_stag_t res = lfs_dir_get(lfs, &newcwd, LFS_MKTAG(0x700, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_STRUCT, newid, 8), prevpair);
+ if (res < 0) {
+ return (int)res;
+ }
+ lfs_pair_fromle32(prevpair);
+
+ // must be empty before removal
+ err = lfs_dir_fetch(lfs, &prevdir.m, prevpair);
+ if (err) {
+ return err;
+ }
+
+ if (prevdir.m.count > 0 || prevdir.m.split) {
+ return LFS_ERR_NOTEMPTY;
+ }
+
+ // mark fs as orphaned
+ err = lfs_fs_preporphans(lfs, +1);
+ if (err) {
+ return err;
+ }
+
+ // I know it's crazy but yes, dir can be changed by our parent's
+ // commit (if predecessor is child)
+ prevdir.type = 0;
+ prevdir.id = 0;
+ lfs->mlist = &prevdir;
+ }
+
+ if (!samepair) {
+ lfs_fs_prepmove(lfs, newoldid, oldcwd.pair);
+ }
+
+ // move over all attributes
+ err = lfs_dir_commit(lfs, &newcwd, LFS_MKATTRS(
+ {LFS_MKTAG_IF(prevtag != LFS_ERR_NOENT,
+ LFS_TYPE_DELETE, newid, 0), NULL},
+ {LFS_MKTAG(LFS_TYPE_CREATE, newid, 0), NULL},
+ {LFS_MKTAG(lfs_tag_type3(oldtag),
+ newid, lfs_path_namelen(newpath)), newpath},
+ {LFS_MKTAG(LFS_FROM_MOVE, newid, lfs_tag_id(oldtag)), &oldcwd},
+ {LFS_MKTAG_IF(samepair,
+ LFS_TYPE_DELETE, newoldid, 0), NULL}));
+ if (err) {
+ lfs->mlist = prevdir.next;
+ return err;
+ }
+
+ // let commit clean up after move (if we're different! otherwise move
+ // logic already fixed it for us)
+ if (!samepair && lfs_gstate_hasmove(&lfs->gstate)) {
+ // prep gstate and delete move id
+ lfs_fs_prepmove(lfs, 0x3ff, NULL);
+ err = lfs_dir_commit(lfs, &oldcwd, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(oldtag), 0), NULL}));
+ if (err) {
+ lfs->mlist = prevdir.next;
+ return err;
+ }
+ }
+
+ lfs->mlist = prevdir.next;
+ if (lfs_gstate_hasorphans(&lfs->gstate)) {
+ LFS_ASSERT(prevtag != LFS_ERR_NOENT
+ && lfs_tag_type3(prevtag) == LFS_TYPE_DIR);
+
+ // fix orphan
+ err = lfs_fs_preporphans(lfs, -1);
+ if (err) {
+ return err;
+ }
+
+ err = lfs_fs_pred(lfs, prevdir.m.pair, &newcwd);
+ if (err) {
+ return err;
+ }
+
+ err = lfs_dir_drop(lfs, &newcwd, &prevdir.m);
+ if (err) {
+ return err;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static lfs_ssize_t lfs_getattr_(lfs_t *lfs, const char *path,
+ uint8_t type, void *buffer, lfs_size_t size) {
+ lfs_mdir_t cwd;
+ lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
+ if (tag < 0) {
+ return tag;
+ }
+
+ uint16_t id = lfs_tag_id(tag);
+ if (id == 0x3ff) {
+ // special case for root
+ id = 0;
+ int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
+ if (err) {
+ return err;
+ }
+ }
+
+ tag = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x7ff, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_USERATTR + type,
+ id, lfs_min(size, lfs->attr_max)),
+ buffer);
+ if (tag < 0) {
+ if (tag == LFS_ERR_NOENT) {
+ return LFS_ERR_NOATTR;
+ }
+
+ return tag;
+ }
+
+ return lfs_tag_size(tag);
+}
+
+#ifndef LFS_READONLY
+static int lfs_commitattr(lfs_t *lfs, const char *path,
+ uint8_t type, const void *buffer, lfs_size_t size) {
+ lfs_mdir_t cwd;
+ lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
+ if (tag < 0) {
+ return tag;
+ }
+
+ uint16_t id = lfs_tag_id(tag);
+ if (id == 0x3ff) {
+ // special case for root
+ id = 0;
+ int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
+ if (err) {
+ return err;
+ }
+ }
+
+ return lfs_dir_commit(lfs, &cwd, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_USERATTR + type, id, size), buffer}));
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_setattr_(lfs_t *lfs, const char *path,
+ uint8_t type, const void *buffer, lfs_size_t size) {
+ if (size > lfs->attr_max) {
+ return LFS_ERR_NOSPC;
+ }
+
+ return lfs_commitattr(lfs, path, type, buffer, size);
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_removeattr_(lfs_t *lfs, const char *path, uint8_t type) {
+ return lfs_commitattr(lfs, path, type, NULL, 0x3ff);
+}
+#endif
+
+
+/// Filesystem operations ///
+
+// compile time checks, see lfs.h for why these limits exist
+#if LFS_NAME_MAX > 1022
+#error "Invalid LFS_NAME_MAX, must be <= 1022"
+#endif
+
+#if LFS_FILE_MAX > 2147483647
+#error "Invalid LFS_FILE_MAX, must be <= 2147483647"
+#endif
+
+#if LFS_ATTR_MAX > 1022
+#error "Invalid LFS_ATTR_MAX, must be <= 1022"
+#endif
+
+// common filesystem initialization
+static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
+ lfs->cfg = cfg;
+ lfs->block_count = cfg->block_count; // May be 0
+ int err = 0;
+
+#ifdef LFS_MULTIVERSION
+ // this driver only supports minor version < current minor version
+ LFS_ASSERT(!lfs->cfg->disk_version || (
+ (0xffff & (lfs->cfg->disk_version >> 16))
+ == LFS_DISK_VERSION_MAJOR
+ && (0xffff & (lfs->cfg->disk_version >> 0))
+ <= LFS_DISK_VERSION_MINOR));
+#endif
+
+ // check that bool is a truthy-preserving type
+ //
+ // note the most common reason for this failure is a before-c99 compiler,
+ // which littlefs currently does not support
+ LFS_ASSERT((bool)0x80000000);
+
+ // check that the required io functions are provided
+ LFS_ASSERT(lfs->cfg->read != NULL);
+#ifndef LFS_READONLY
+ LFS_ASSERT(lfs->cfg->prog != NULL);
+ LFS_ASSERT(lfs->cfg->erase != NULL);
+ LFS_ASSERT(lfs->cfg->sync != NULL);
+#endif
+
+ // validate that the lfs-cfg sizes were initiated properly before
+ // performing any arithmetic logics with them
+ LFS_ASSERT(lfs->cfg->read_size != 0);
+ LFS_ASSERT(lfs->cfg->prog_size != 0);
+ LFS_ASSERT(lfs->cfg->cache_size != 0);
+
+ // check that block size is a multiple of cache size is a multiple
+ // of prog and read sizes
+ LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->read_size == 0);
+ LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->prog_size == 0);
+ LFS_ASSERT(lfs->cfg->block_size % lfs->cfg->cache_size == 0);
+
+ // check that the block size is large enough to fit all ctz pointers
+ LFS_ASSERT(lfs->cfg->block_size >= 128);
+ // this is the exact calculation for all ctz pointers, if this fails
+ // and the simpler assert above does not, math must be broken
+ LFS_ASSERT(4*lfs_npw2(0xffffffff / (lfs->cfg->block_size-2*4))
+ <= lfs->cfg->block_size);
+
+ // block_cycles = 0 is no longer supported.
+ //
+ // block_cycles is the number of erase cycles before littlefs evicts
+ // metadata logs as a part of wear leveling. Suggested values are in the
+ // range of 100-1000, or set block_cycles to -1 to disable block-level
+ // wear-leveling.
+ LFS_ASSERT(lfs->cfg->block_cycles != 0);
+
+ // check that compact_thresh makes sense
+ //
+ // metadata can't be compacted below block_size/2, and metadata can't
+ // exceed a block_size
+ LFS_ASSERT(lfs->cfg->compact_thresh == 0
+ || lfs->cfg->compact_thresh >= lfs->cfg->block_size/2);
+ LFS_ASSERT(lfs->cfg->compact_thresh == (lfs_size_t)-1
+ || lfs->cfg->compact_thresh <= lfs->cfg->block_size);
+
+ // check that metadata_max is a multiple of read_size and prog_size,
+ // and a factor of the block_size
+ LFS_ASSERT(!lfs->cfg->metadata_max
+ || lfs->cfg->metadata_max % lfs->cfg->read_size == 0);
+ LFS_ASSERT(!lfs->cfg->metadata_max
+ || lfs->cfg->metadata_max % lfs->cfg->prog_size == 0);
+ LFS_ASSERT(!lfs->cfg->metadata_max
+ || lfs->cfg->block_size % lfs->cfg->metadata_max == 0);
+
+ // setup read cache
+ if (lfs->cfg->read_buffer) {
+ lfs->rcache.buffer = lfs->cfg->read_buffer;
+ } else {
+ lfs->rcache.buffer = lfs_malloc(lfs->cfg->cache_size);
+ if (!lfs->rcache.buffer) {
+ err = LFS_ERR_NOMEM;
+ goto cleanup;
+ }
+ }
+
+ // setup program cache
+ if (lfs->cfg->prog_buffer) {
+ lfs->pcache.buffer = lfs->cfg->prog_buffer;
+ } else {
+ lfs->pcache.buffer = lfs_malloc(lfs->cfg->cache_size);
+ if (!lfs->pcache.buffer) {
+ err = LFS_ERR_NOMEM;
+ goto cleanup;
+ }
+ }
+
+ // zero to avoid information leaks
+ lfs_cache_zero(lfs, &lfs->rcache);
+ lfs_cache_zero(lfs, &lfs->pcache);
+
+ // setup lookahead buffer, note mount finishes initializing this after
+ // we establish a decent pseudo-random seed
+ LFS_ASSERT(lfs->cfg->lookahead_size > 0);
+ if (lfs->cfg->lookahead_buffer) {
+ lfs->lookahead.buffer = lfs->cfg->lookahead_buffer;
+ } else {
+ lfs->lookahead.buffer = lfs_malloc(lfs->cfg->lookahead_size);
+ if (!lfs->lookahead.buffer) {
+ err = LFS_ERR_NOMEM;
+ goto cleanup;
+ }
+ }
+
+ // check that the size limits are sane
+ LFS_ASSERT(lfs->cfg->name_max <= LFS_NAME_MAX);
+ lfs->name_max = lfs->cfg->name_max;
+ if (!lfs->name_max) {
+ lfs->name_max = LFS_NAME_MAX;
+ }
+
+ LFS_ASSERT(lfs->cfg->file_max <= LFS_FILE_MAX);
+ lfs->file_max = lfs->cfg->file_max;
+ if (!lfs->file_max) {
+ lfs->file_max = LFS_FILE_MAX;
+ }
+
+ LFS_ASSERT(lfs->cfg->attr_max <= LFS_ATTR_MAX);
+ lfs->attr_max = lfs->cfg->attr_max;
+ if (!lfs->attr_max) {
+ lfs->attr_max = LFS_ATTR_MAX;
+ }
+
+ LFS_ASSERT(lfs->cfg->metadata_max <= lfs->cfg->block_size);
+
+ LFS_ASSERT(lfs->cfg->inline_max == (lfs_size_t)-1
+ || lfs->cfg->inline_max <= lfs->cfg->cache_size);
+ LFS_ASSERT(lfs->cfg->inline_max == (lfs_size_t)-1
+ || lfs->cfg->inline_max <= lfs->attr_max);
+ LFS_ASSERT(lfs->cfg->inline_max == (lfs_size_t)-1
+ || lfs->cfg->inline_max <= ((lfs->cfg->metadata_max)
+ ? lfs->cfg->metadata_max
+ : lfs->cfg->block_size)/8);
+ lfs->inline_max = lfs->cfg->inline_max;
+ if (lfs->inline_max == (lfs_size_t)-1) {
+ lfs->inline_max = 0;
+ } else if (lfs->inline_max == 0) {
+ lfs->inline_max = lfs_min(
+ lfs->cfg->cache_size,
+ lfs_min(
+ lfs->attr_max,
+ ((lfs->cfg->metadata_max)
+ ? lfs->cfg->metadata_max
+ : lfs->cfg->block_size)/8));
+ }
+
+ // setup default state
+ lfs->root[0] = LFS_BLOCK_NULL;
+ lfs->root[1] = LFS_BLOCK_NULL;
+ lfs->mlist = NULL;
+ lfs->seed = 0;
+ lfs->gdisk = (lfs_gstate_t){0};
+ lfs->gstate = (lfs_gstate_t){0};
+ lfs->gdelta = (lfs_gstate_t){0};
+#ifdef LFS_MIGRATE
+ lfs->lfs1 = NULL;
+#endif
+
+ return 0;
+
+cleanup:
+ lfs_deinit(lfs);
+ return err;
+}
+
+static int lfs_deinit(lfs_t *lfs) {
+ // free allocated memory
+ if (!lfs->cfg->read_buffer) {
+ lfs_free(lfs->rcache.buffer);
+ }
+
+ if (!lfs->cfg->prog_buffer) {
+ lfs_free(lfs->pcache.buffer);
+ }
+
+ if (!lfs->cfg->lookahead_buffer) {
+ lfs_free(lfs->lookahead.buffer);
+ }
+
+ return 0;
+}
+
+
+
+#ifndef LFS_READONLY
+static int lfs_format_(lfs_t *lfs, const struct lfs_config *cfg) {
+ int err = 0;
+ {
+ err = lfs_init(lfs, cfg);
+ if (err) {
+ return err;
+ }
+
+ LFS_ASSERT(cfg->block_count != 0);
+
+ // create free lookahead
+ memset(lfs->lookahead.buffer, 0, lfs->cfg->lookahead_size);
+ lfs->lookahead.start = 0;
+ lfs->lookahead.size = lfs_min(8*lfs->cfg->lookahead_size,
+ lfs->block_count);
+ lfs->lookahead.next = 0;
+ lfs_alloc_ckpoint(lfs);
+
+ // create root dir
+ lfs_mdir_t root;
+ err = lfs_dir_alloc(lfs, &root);
+ if (err) {
+ goto cleanup;
+ }
+
+ // write one superblock
+ lfs_superblock_t superblock = {
+ .version = lfs_fs_disk_version(lfs),
+ .block_size = lfs->cfg->block_size,
+ .block_count = lfs->block_count,
+ .name_max = lfs->name_max,
+ .file_max = lfs->file_max,
+ .attr_max = lfs->attr_max,
+ };
+
+ lfs_superblock_tole32(&superblock);
+ err = lfs_dir_commit(lfs, &root, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL},
+ {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
+ {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
+ &superblock}));
+ if (err) {
+ goto cleanup;
+ }
+
+ // force compaction to prevent accidentally mounting any
+ // older version of littlefs that may live on disk
+ root.erased = false;
+ err = lfs_dir_commit(lfs, &root, NULL, 0);
+ if (err) {
+ goto cleanup;
+ }
+
+ // sanity check that fetch works
+ err = lfs_dir_fetch(lfs, &root, (const lfs_block_t[2]){0, 1});
+ if (err) {
+ goto cleanup;
+ }
+ }
+
+cleanup:
+ lfs_deinit(lfs);
+ return err;
+
+}
+#endif
+
+struct lfs_tortoise_t {
+ lfs_block_t pair[2];
+ lfs_size_t i;
+ lfs_size_t period;
+};
+
+static int lfs_tortoise_detectcycles(
+ const lfs_mdir_t *dir, struct lfs_tortoise_t *tortoise) {
+ // detect cycles with Brent's algorithm
+ if (lfs_pair_issync(dir->tail, tortoise->pair)) {
+ LFS_WARN("Cycle detected in tail list");
+ return LFS_ERR_CORRUPT;
+ }
+ if (tortoise->i == tortoise->period) {
+ tortoise->pair[0] = dir->tail[0];
+ tortoise->pair[1] = dir->tail[1];
+ tortoise->i = 0;
+ tortoise->period *= 2;
+ }
+ tortoise->i += 1;
+
+ return LFS_ERR_OK;
+}
+
+static int lfs_mount_(lfs_t *lfs, const struct lfs_config *cfg) {
+ int err = lfs_init(lfs, cfg);
+ if (err) {
+ return err;
+ }
+
+ // scan directory blocks for superblock and any global updates
+ lfs_mdir_t dir = {.tail = {0, 1}};
+ struct lfs_tortoise_t tortoise = {
+ .pair = {LFS_BLOCK_NULL, LFS_BLOCK_NULL},
+ .i = 1,
+ .period = 1,
+ };
+ while (!lfs_pair_isnull(dir.tail)) {
+ err = lfs_tortoise_detectcycles(&dir, &tortoise);
+ if (err < 0) {
+ goto cleanup;
+ }
+
+ // fetch next block in tail list
+ lfs_stag_t tag = lfs_dir_fetchmatch(lfs, &dir, dir.tail,
+ LFS_MKTAG(0x7ff, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8),
+ NULL,
+ lfs_dir_find_match, &(struct lfs_dir_find_match){
+ lfs, "littlefs", 8});
+ if (tag < 0) {
+ err = tag;
+ goto cleanup;
+ }
+
+ // has superblock?
+ if (tag && !lfs_tag_isdelete(tag)) {
+ // update root
+ lfs->root[0] = dir.pair[0];
+ lfs->root[1] = dir.pair[1];
+
+ // grab superblock
+ lfs_superblock_t superblock;
+ tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x7ff, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
+ &superblock);
+ if (tag < 0) {
+ err = tag;
+ goto cleanup;
+ }
+ lfs_superblock_fromle32(&superblock);
+
+ // check version
+ uint16_t major_version = (0xffff & (superblock.version >> 16));
+ uint16_t minor_version = (0xffff & (superblock.version >> 0));
+ if (major_version != lfs_fs_disk_version_major(lfs)
+ || minor_version > lfs_fs_disk_version_minor(lfs)) {
+ LFS_ERROR("Invalid version "
+ "v%"PRIu16".%"PRIu16" != v%"PRIu16".%"PRIu16,
+ major_version,
+ minor_version,
+ lfs_fs_disk_version_major(lfs),
+ lfs_fs_disk_version_minor(lfs));
+ err = LFS_ERR_INVAL;
+ goto cleanup;
+ }
+
+ // found older minor version? set an in-device only bit in the
+ // gstate so we know we need to rewrite the superblock before
+ // the first write
+ bool needssuperblock = false;
+ if (minor_version < lfs_fs_disk_version_minor(lfs)) {
+ LFS_DEBUG("Found older minor version "
+ "v%"PRIu16".%"PRIu16" < v%"PRIu16".%"PRIu16,
+ major_version,
+ minor_version,
+ lfs_fs_disk_version_major(lfs),
+ lfs_fs_disk_version_minor(lfs));
+ needssuperblock = true;
+ }
+ // note this bit is reserved on disk, so fetching more gstate
+ // will not interfere here
+ lfs_fs_prepsuperblock(lfs, needssuperblock);
+
+ // check superblock configuration
+ if (superblock.name_max) {
+ if (superblock.name_max > lfs->name_max) {
+ LFS_ERROR("Unsupported name_max (%"PRIu32" > %"PRIu32")",
+ superblock.name_max, lfs->name_max);
+ err = LFS_ERR_INVAL;
+ goto cleanup;
+ }
+
+ lfs->name_max = superblock.name_max;
+ }
+
+ if (superblock.file_max) {
+ if (superblock.file_max > lfs->file_max) {
+ LFS_ERROR("Unsupported file_max (%"PRIu32" > %"PRIu32")",
+ superblock.file_max, lfs->file_max);
+ err = LFS_ERR_INVAL;
+ goto cleanup;
+ }
+
+ lfs->file_max = superblock.file_max;
+ }
+
+ if (superblock.attr_max) {
+ if (superblock.attr_max > lfs->attr_max) {
+ LFS_ERROR("Unsupported attr_max (%"PRIu32" > %"PRIu32")",
+ superblock.attr_max, lfs->attr_max);
+ err = LFS_ERR_INVAL;
+ goto cleanup;
+ }
+
+ lfs->attr_max = superblock.attr_max;
+
+ // we also need to update inline_max in case attr_max changed
+ lfs->inline_max = lfs_min(lfs->inline_max, lfs->attr_max);
+ }
+
+ // this is where we get the block_count from disk if block_count=0
+ if (lfs->cfg->block_count
+ && superblock.block_count != lfs->cfg->block_count) {
+ LFS_ERROR("Invalid block count (%"PRIu32" != %"PRIu32")",
+ superblock.block_count, lfs->cfg->block_count);
+ err = LFS_ERR_INVAL;
+ goto cleanup;
+ }
+
+ lfs->block_count = superblock.block_count;
+
+ if (superblock.block_size != lfs->cfg->block_size) {
+ LFS_ERROR("Invalid block size (%"PRIu32" != %"PRIu32")",
+ superblock.block_size, lfs->cfg->block_size);
+ err = LFS_ERR_INVAL;
+ goto cleanup;
+ }
+ }
+
+ // has gstate?
+ err = lfs_dir_getgstate(lfs, &dir, &lfs->gstate);
+ if (err) {
+ goto cleanup;
+ }
+ }
+
+ // update littlefs with gstate
+ if (!lfs_gstate_iszero(&lfs->gstate)) {
+ LFS_DEBUG("Found pending gstate 0x%08"PRIx32"%08"PRIx32"%08"PRIx32,
+ lfs->gstate.tag,
+ lfs->gstate.pair[0],
+ lfs->gstate.pair[1]);
+ }
+ lfs->gstate.tag += !lfs_tag_isvalid(lfs->gstate.tag);
+ lfs->gdisk = lfs->gstate;
+
+ // setup free lookahead, to distribute allocations uniformly across
+ // boots, we start the allocator at a random location
+ lfs->lookahead.start = lfs->seed % lfs->block_count;
+ lfs_alloc_drop(lfs);
+
+ return 0;
+
+cleanup:
+ lfs_unmount_(lfs);
+ return err;
+}
+
+static int lfs_unmount_(lfs_t *lfs) {
+ return lfs_deinit(lfs);
+}
+
+
+/// Filesystem filesystem operations ///
+static int lfs_fs_stat_(lfs_t *lfs, struct lfs_fsinfo *fsinfo) {
+ // if the superblock is up-to-date, we must be on the most recent
+ // minor version of littlefs
+ if (!lfs_gstate_needssuperblock(&lfs->gstate)) {
+ fsinfo->disk_version = lfs_fs_disk_version(lfs);
+
+ // otherwise we need to read the minor version on disk
+ } else {
+ // fetch the superblock
+ lfs_mdir_t dir;
+ int err = lfs_dir_fetch(lfs, &dir, lfs->root);
+ if (err) {
+ return err;
+ }
+
+ lfs_superblock_t superblock;
+ lfs_stag_t tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x7ff, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
+ &superblock);
+ if (tag < 0) {
+ return tag;
+ }
+ lfs_superblock_fromle32(&superblock);
+
+ // read the on-disk version
+ fsinfo->disk_version = superblock.version;
+ }
+
+ // filesystem geometry
+ fsinfo->block_size = lfs->cfg->block_size;
+ fsinfo->block_count = lfs->block_count;
+
+ // other on-disk configuration, we cache all of these for internal use
+ fsinfo->name_max = lfs->name_max;
+ fsinfo->file_max = lfs->file_max;
+ fsinfo->attr_max = lfs->attr_max;
+
+ return 0;
+}
+
+int lfs_fs_traverse_(lfs_t *lfs,
+ int (*cb)(void *data, lfs_block_t block), void *data,
+ bool includeorphans) {
+ // iterate over metadata pairs
+ lfs_mdir_t dir = {.tail = {0, 1}};
+
+#ifdef LFS_MIGRATE
+ // also consider v1 blocks during migration
+ if (lfs->lfs1) {
+ int err = lfs1_traverse(lfs, cb, data);
+ if (err) {
+ return err;
+ }
+
+ dir.tail[0] = lfs->root[0];
+ dir.tail[1] = lfs->root[1];
+ }
+#endif
+
+ struct lfs_tortoise_t tortoise = {
+ .pair = {LFS_BLOCK_NULL, LFS_BLOCK_NULL},
+ .i = 1,
+ .period = 1,
+ };
+ int err = LFS_ERR_OK;
+ while (!lfs_pair_isnull(dir.tail)) {
+ err = lfs_tortoise_detectcycles(&dir, &tortoise);
+ if (err < 0) {
+ return LFS_ERR_CORRUPT;
+ }
+
+ for (int i = 0; i < 2; i++) {
+ int err = cb(data, dir.tail[i]);
+ if (err) {
+ return err;
+ }
+ }
+
+ // iterate through ids in directory
+ int err = lfs_dir_fetch(lfs, &dir, dir.tail);
+ if (err) {
+ return err;
+ }
+
+ for (uint16_t id = 0; id < dir.count; id++) {
+ struct lfs_ctz ctz;
+ lfs_stag_t tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x700, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz);
+ if (tag < 0) {
+ if (tag == LFS_ERR_NOENT) {
+ continue;
+ }
+ return tag;
+ }
+ lfs_ctz_fromle32(&ctz);
+
+ if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) {
+ err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache,
+ ctz.head, ctz.size, cb, data);
+ if (err) {
+ return err;
+ }
+ } else if (includeorphans &&
+ lfs_tag_type3(tag) == LFS_TYPE_DIRSTRUCT) {
+ for (int i = 0; i < 2; i++) {
+ err = cb(data, (&ctz.head)[i]);
+ if (err) {
+ return err;
+ }
+ }
+ }
+ }
+ }
+
+#ifndef LFS_READONLY
+ // iterate over any open files
+ for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
+ if (f->type != LFS_TYPE_REG) {
+ continue;
+ }
+
+ if ((f->flags & LFS_F_DIRTY) && !(f->flags & LFS_F_INLINE)) {
+ int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache,
+ f->ctz.head, f->ctz.size, cb, data);
+ if (err) {
+ return err;
+ }
+ }
+
+ if ((f->flags & LFS_F_WRITING) && !(f->flags & LFS_F_INLINE)) {
+ int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache,
+ f->block, f->pos, cb, data);
+ if (err) {
+ return err;
+ }
+ }
+ }
+#endif
+
+ return 0;
+}
+
+#ifndef LFS_READONLY
+static int lfs_fs_pred(lfs_t *lfs,
+ const lfs_block_t pair[2], lfs_mdir_t *pdir) {
+ // iterate over all directory directory entries
+ pdir->tail[0] = 0;
+ pdir->tail[1] = 1;
+ struct lfs_tortoise_t tortoise = {
+ .pair = {LFS_BLOCK_NULL, LFS_BLOCK_NULL},
+ .i = 1,
+ .period = 1,
+ };
+ int err = LFS_ERR_OK;
+ while (!lfs_pair_isnull(pdir->tail)) {
+ err = lfs_tortoise_detectcycles(pdir, &tortoise);
+ if (err < 0) {
+ return LFS_ERR_CORRUPT;
+ }
+
+ if (lfs_pair_cmp(pdir->tail, pair) == 0) {
+ return 0;
+ }
+
+ int err = lfs_dir_fetch(lfs, pdir, pdir->tail);
+ if (err) {
+ return err;
+ }
+ }
+
+ return LFS_ERR_NOENT;
+}
+#endif
+
+#ifndef LFS_READONLY
+struct lfs_fs_parent_match {
+ lfs_t *lfs;
+ const lfs_block_t pair[2];
+};
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_fs_parent_match(void *data,
+ lfs_tag_t tag, const void *buffer) {
+ struct lfs_fs_parent_match *find = data;
+ lfs_t *lfs = find->lfs;
+ const struct lfs_diskoff *disk = buffer;
+ (void)tag;
+
+ lfs_block_t child[2];
+ int err = lfs_bd_read(lfs,
+ &lfs->pcache, &lfs->rcache, lfs->cfg->block_size,
+ disk->block, disk->off, &child, sizeof(child));
+ if (err) {
+ return err;
+ }
+
+ lfs_pair_fromle32(child);
+ return (lfs_pair_cmp(child, find->pair) == 0) ? LFS_CMP_EQ : LFS_CMP_LT;
+}
+#endif
+
+#ifndef LFS_READONLY
+static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t pair[2],
+ lfs_mdir_t *parent) {
+ // use fetchmatch with callback to find pairs
+ parent->tail[0] = 0;
+ parent->tail[1] = 1;
+ struct lfs_tortoise_t tortoise = {
+ .pair = {LFS_BLOCK_NULL, LFS_BLOCK_NULL},
+ .i = 1,
+ .period = 1,
+ };
+ int err = LFS_ERR_OK;
+ while (!lfs_pair_isnull(parent->tail)) {
+ err = lfs_tortoise_detectcycles(parent, &tortoise);
+ if (err < 0) {
+ return err;
+ }
+
+ lfs_stag_t tag = lfs_dir_fetchmatch(lfs, parent, parent->tail,
+ LFS_MKTAG(0x7ff, 0, 0x3ff),
+ LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 0, 8),
+ NULL,
+ lfs_fs_parent_match, &(struct lfs_fs_parent_match){
+ lfs, {pair[0], pair[1]}});
+ if (tag && tag != LFS_ERR_NOENT) {
+ return tag;
+ }
+ }
+
+ return LFS_ERR_NOENT;
+}
+#endif
+
+static void lfs_fs_prepsuperblock(lfs_t *lfs, bool needssuperblock) {
+ lfs->gstate.tag = (lfs->gstate.tag & ~LFS_MKTAG(0, 0, 0x200))
+ | (uint32_t)needssuperblock << 9;
+}
+
+#ifndef LFS_READONLY
+static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans) {
+ LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) > 0x000 || orphans >= 0);
+ LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) < 0x1ff || orphans <= 0);
+ lfs->gstate.tag += orphans;
+ lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x800, 0, 0)) |
+ ((uint32_t)lfs_gstate_hasorphans(&lfs->gstate) << 31));
+
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static void lfs_fs_prepmove(lfs_t *lfs,
+ uint16_t id, const lfs_block_t pair[2]) {
+ lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x7ff, 0x3ff, 0)) |
+ ((id != 0x3ff) ? LFS_MKTAG(LFS_TYPE_DELETE, id, 0) : 0));
+ lfs->gstate.pair[0] = (id != 0x3ff) ? pair[0] : 0;
+ lfs->gstate.pair[1] = (id != 0x3ff) ? pair[1] : 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_fs_desuperblock(lfs_t *lfs) {
+ if (!lfs_gstate_needssuperblock(&lfs->gstate)) {
+ return 0;
+ }
+
+ LFS_DEBUG("Rewriting superblock {0x%"PRIx32", 0x%"PRIx32"}",
+ lfs->root[0],
+ lfs->root[1]);
+
+ lfs_mdir_t root;
+ int err = lfs_dir_fetch(lfs, &root, lfs->root);
+ if (err) {
+ return err;
+ }
+
+ // write a new superblock
+ lfs_superblock_t superblock = {
+ .version = lfs_fs_disk_version(lfs),
+ .block_size = lfs->cfg->block_size,
+ .block_count = lfs->block_count,
+ .name_max = lfs->name_max,
+ .file_max = lfs->file_max,
+ .attr_max = lfs->attr_max,
+ };
+
+ lfs_superblock_tole32(&superblock);
+ err = lfs_dir_commit(lfs, &root, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
+ &superblock}));
+ if (err) {
+ return err;
+ }
+
+ lfs_fs_prepsuperblock(lfs, false);
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_fs_demove(lfs_t *lfs) {
+ if (!lfs_gstate_hasmove(&lfs->gdisk)) {
+ return 0;
+ }
+
+ // Fix bad moves
+ LFS_DEBUG("Fixing move {0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16,
+ lfs->gdisk.pair[0],
+ lfs->gdisk.pair[1],
+ lfs_tag_id(lfs->gdisk.tag));
+
+ // no other gstate is supported at this time, so if we found something else
+ // something most likely went wrong in gstate calculation
+ LFS_ASSERT(lfs_tag_type3(lfs->gdisk.tag) == LFS_TYPE_DELETE);
+
+ // fetch and delete the moved entry
+ lfs_mdir_t movedir;
+ int err = lfs_dir_fetch(lfs, &movedir, lfs->gdisk.pair);
+ if (err) {
+ return err;
+ }
+
+ // prep gstate and delete move id
+ uint16_t moveid = lfs_tag_id(lfs->gdisk.tag);
+ lfs_fs_prepmove(lfs, 0x3ff, NULL);
+ err = lfs_dir_commit(lfs, &movedir, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_DELETE, moveid, 0), NULL}));
+ if (err) {
+ return err;
+ }
+
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss) {
+ if (!lfs_gstate_hasorphans(&lfs->gstate)) {
+ return 0;
+ }
+
+ // Check for orphans in two separate passes:
+ // - 1 for half-orphans (relocations)
+ // - 2 for full-orphans (removes/renames)
+ //
+ // Two separate passes are needed as half-orphans can contain outdated
+ // references to full-orphans, effectively hiding them from the deorphan
+ // search.
+ //
+ int pass = 0;
+ while (pass < 2) {
+ // Fix any orphans
+ lfs_mdir_t pdir = {.split = true, .tail = {0, 1}};
+ lfs_mdir_t dir;
+ bool moreorphans = false;
+
+ // iterate over all directory directory entries
+ while (!lfs_pair_isnull(pdir.tail)) {
+ int err = lfs_dir_fetch(lfs, &dir, pdir.tail);
+ if (err) {
+ return err;
+ }
+
+ // check head blocks for orphans
+ if (!pdir.split) {
+ // check if we have a parent
+ lfs_mdir_t parent;
+ lfs_stag_t tag = lfs_fs_parent(lfs, pdir.tail, &parent);
+ if (tag < 0 && tag != LFS_ERR_NOENT) {
+ return tag;
+ }
+
+ if (pass == 0 && tag != LFS_ERR_NOENT) {
+ lfs_block_t pair[2];
+ lfs_stag_t state = lfs_dir_get(lfs, &parent,
+ LFS_MKTAG(0x7ff, 0x3ff, 0), tag, pair);
+ if (state < 0) {
+ return state;
+ }
+ lfs_pair_fromle32(pair);
+
+ if (!lfs_pair_issync(pair, pdir.tail)) {
+ // we have desynced
+ LFS_DEBUG("Fixing half-orphan "
+ "{0x%"PRIx32", 0x%"PRIx32"} "
+ "-> {0x%"PRIx32", 0x%"PRIx32"}",
+ pdir.tail[0], pdir.tail[1], pair[0], pair[1]);
+
+ // fix pending move in this pair? this looks like an
+ // optimization but is in fact _required_ since
+ // relocating may outdate the move.
+ uint16_t moveid = 0x3ff;
+ if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
+ moveid = lfs_tag_id(lfs->gstate.tag);
+ LFS_DEBUG("Fixing move while fixing orphans "
+ "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
+ pdir.pair[0], pdir.pair[1], moveid);
+ lfs_fs_prepmove(lfs, 0x3ff, NULL);
+ }
+
+ lfs_pair_tole32(pair);
+ state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS(
+ {LFS_MKTAG_IF(moveid != 0x3ff,
+ LFS_TYPE_DELETE, moveid, 0), NULL},
+ {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8),
+ pair}));
+ lfs_pair_fromle32(pair);
+ if (state < 0) {
+ return state;
+ }
+
+ // did our commit create more orphans?
+ if (state == LFS_OK_ORPHANED) {
+ moreorphans = true;
+ }
+
+ // refetch tail
+ continue;
+ }
+ }
+
+ // note we only check for full orphans if we may have had a
+ // power-loss, otherwise orphans are created intentionally
+ // during operations such as lfs_mkdir
+ if (pass == 1 && tag == LFS_ERR_NOENT && powerloss) {
+ // we are an orphan
+ LFS_DEBUG("Fixing orphan {0x%"PRIx32", 0x%"PRIx32"}",
+ pdir.tail[0], pdir.tail[1]);
+
+ // steal state
+ err = lfs_dir_getgstate(lfs, &dir, &lfs->gdelta);
+ if (err) {
+ return err;
+ }
+
+ // steal tail
+ lfs_pair_tole32(dir.tail);
+ int state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_TAIL + dir.split, 0x3ff, 8),
+ dir.tail}));
+ lfs_pair_fromle32(dir.tail);
+ if (state < 0) {
+ return state;
+ }
+
+ // did our commit create more orphans?
+ if (state == LFS_OK_ORPHANED) {
+ moreorphans = true;
+ }
+
+ // refetch tail
+ continue;
+ }
+ }
+
+ pdir = dir;
+ }
+
+ pass = moreorphans ? 0 : pass+1;
+ }
+
+ // mark orphans as fixed
+ return lfs_fs_preporphans(lfs, -lfs_gstate_getorphans(&lfs->gstate));
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_fs_forceconsistency(lfs_t *lfs) {
+ int err = lfs_fs_desuperblock(lfs);
+ if (err) {
+ return err;
+ }
+
+ err = lfs_fs_demove(lfs);
+ if (err) {
+ return err;
+ }
+
+ err = lfs_fs_deorphan(lfs, true);
+ if (err) {
+ return err;
+ }
+
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_fs_mkconsistent_(lfs_t *lfs) {
+ // lfs_fs_forceconsistency does most of the work here
+ int err = lfs_fs_forceconsistency(lfs);
+ if (err) {
+ return err;
+ }
+
+ // do we have any pending gstate?
+ lfs_gstate_t delta = {0};
+ lfs_gstate_xor(&delta, &lfs->gdisk);
+ lfs_gstate_xor(&delta, &lfs->gstate);
+ if (!lfs_gstate_iszero(&delta)) {
+ // lfs_dir_commit will implicitly write out any pending gstate
+ lfs_mdir_t root;
+ err = lfs_dir_fetch(lfs, &root, lfs->root);
+ if (err) {
+ return err;
+ }
+
+ err = lfs_dir_commit(lfs, &root, NULL, 0);
+ if (err) {
+ return err;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static int lfs_fs_size_count(void *p, lfs_block_t block) {
+ (void)block;
+ lfs_size_t *size = p;
+ *size += 1;
+ return 0;
+}
+
+static lfs_ssize_t lfs_fs_size_(lfs_t *lfs) {
+ lfs_size_t size = 0;
+ int err = lfs_fs_traverse_(lfs, lfs_fs_size_count, &size, false);
+ if (err) {
+ return err;
+ }
+
+ return size;
+}
+
+// explicit garbage collection
+#ifndef LFS_READONLY
+static int lfs_fs_gc_(lfs_t *lfs) {
+ // force consistency, even if we're not necessarily going to write,
+ // because this function is supposed to take care of janitorial work
+ // isn't it?
+ int err = lfs_fs_forceconsistency(lfs);
+ if (err) {
+ return err;
+ }
+
+ // try to compact metadata pairs, note we can't really accomplish
+ // anything if compact_thresh doesn't at least leave a prog_size
+ // available
+ if (lfs->cfg->compact_thresh
+ < lfs->cfg->block_size - lfs->cfg->prog_size) {
+ // iterate over all mdirs
+ lfs_mdir_t mdir = {.tail = {0, 1}};
+ while (!lfs_pair_isnull(mdir.tail)) {
+ err = lfs_dir_fetch(lfs, &mdir, mdir.tail);
+ if (err) {
+ return err;
+ }
+
+ // not erased? exceeds our compaction threshold?
+ if (!mdir.erased || ((lfs->cfg->compact_thresh == 0)
+ ? mdir.off > lfs->cfg->block_size - lfs->cfg->block_size/8
+ : mdir.off > lfs->cfg->compact_thresh)) {
+ // the easiest way to trigger a compaction is to mark
+ // the mdir as unerased and add an empty commit
+ mdir.erased = false;
+ err = lfs_dir_commit(lfs, &mdir, NULL, 0);
+ if (err) {
+ return err;
+ }
+ }
+ }
+ }
+
+ // try to populate the lookahead buffer, unless it's already full
+ if (lfs->lookahead.size < lfs_min(
+ 8 * lfs->cfg->lookahead_size,
+ lfs->block_count)) {
+ err = lfs_alloc_scan(lfs);
+ if (err) {
+ return err;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+#ifdef LFS_SHRINKNONRELOCATING
+static int lfs_shrink_checkblock(void *data, lfs_block_t block) {
+ lfs_size_t threshold = *((lfs_size_t*)data);
+ if (block >= threshold) {
+ return LFS_ERR_NOTEMPTY;
+ }
+ return 0;
+}
+#endif
+
+static int lfs_fs_grow_(lfs_t *lfs, lfs_size_t block_count) {
+ int err;
+
+ if (block_count == lfs->block_count) {
+ return 0;
+ }
+
+
+#ifndef LFS_SHRINKNONRELOCATING
+ // shrinking is not supported
+ LFS_ASSERT(block_count >= lfs->block_count);
+#endif
+#ifdef LFS_SHRINKNONRELOCATING
+ if (block_count < lfs->block_count) {
+ err = lfs_fs_traverse_(lfs, lfs_shrink_checkblock, &block_count, true);
+ if (err) {
+ return err;
+ }
+ }
+#endif
+
+ lfs->block_count = block_count;
+
+ // fetch the root
+ lfs_mdir_t root;
+ err = lfs_dir_fetch(lfs, &root, lfs->root);
+ if (err) {
+ return err;
+ }
+
+ // update the superblock
+ lfs_superblock_t superblock;
+ lfs_stag_t tag = lfs_dir_get(lfs, &root, LFS_MKTAG(0x7ff, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
+ &superblock);
+ if (tag < 0) {
+ return tag;
+ }
+ lfs_superblock_fromle32(&superblock);
+
+ superblock.block_count = lfs->block_count;
+
+ lfs_superblock_tole32(&superblock);
+ err = lfs_dir_commit(lfs, &root, LFS_MKATTRS(
+ {tag, &superblock}));
+ if (err) {
+ return err;
+ }
+ return 0;
+}
+#endif
+
+#ifdef LFS_MIGRATE
+////// Migration from littelfs v1 below this //////
+
+/// Version info ///
+
+// Software library version
+// Major (top-nibble), incremented on backwards incompatible changes
+// Minor (bottom-nibble), incremented on feature additions
+#define LFS1_VERSION 0x00010007
+#define LFS1_VERSION_MAJOR (0xffff & (LFS1_VERSION >> 16))
+#define LFS1_VERSION_MINOR (0xffff & (LFS1_VERSION >> 0))
+
+// Version of On-disk data structures
+// Major (top-nibble), incremented on backwards incompatible changes
+// Minor (bottom-nibble), incremented on feature additions
+#define LFS1_DISK_VERSION 0x00010001
+#define LFS1_DISK_VERSION_MAJOR (0xffff & (LFS1_DISK_VERSION >> 16))
+#define LFS1_DISK_VERSION_MINOR (0xffff & (LFS1_DISK_VERSION >> 0))
+
+
+/// v1 Definitions ///
+
+// File types
+enum lfs1_type {
+ LFS1_TYPE_REG = 0x11,
+ LFS1_TYPE_DIR = 0x22,
+ LFS1_TYPE_SUPERBLOCK = 0x2e,
+};
+
+typedef struct lfs1 {
+ lfs_block_t root[2];
+} lfs1_t;
+
+typedef struct lfs1_entry {
+ lfs_off_t off;
+
+ struct lfs1_disk_entry {
+ uint8_t type;
+ uint8_t elen;
+ uint8_t alen;
+ uint8_t nlen;
+ union {
+ struct {
+ lfs_block_t head;
+ lfs_size_t size;
+ } file;
+ lfs_block_t dir[2];
+ } u;
+ } d;
+} lfs1_entry_t;
+
+typedef struct lfs1_dir {
+ struct lfs1_dir *next;
+ lfs_block_t pair[2];
+ lfs_off_t off;
+
+ lfs_block_t head[2];
+ lfs_off_t pos;
+
+ struct lfs1_disk_dir {
+ uint32_t rev;
+ lfs_size_t size;
+ lfs_block_t tail[2];
+ } d;
+} lfs1_dir_t;
+
+typedef struct lfs1_superblock {
+ lfs_off_t off;
+
+ struct lfs1_disk_superblock {
+ uint8_t type;
+ uint8_t elen;
+ uint8_t alen;
+ uint8_t nlen;
+ lfs_block_t root[2];
+ uint32_t block_size;
+ uint32_t block_count;
+ uint32_t version;
+ char magic[8];
+ } d;
+} lfs1_superblock_t;
+
+
+/// Low-level wrappers v1->v2 ///
+static void lfs1_crc(uint32_t *crc, const void *buffer, size_t size) {
+ *crc = lfs_crc(*crc, buffer, size);
+}
+
+static int lfs1_bd_read(lfs_t *lfs, lfs_block_t block,
+ lfs_off_t off, void *buffer, lfs_size_t size) {
+ // if we ever do more than writes to alternating pairs,
+ // this may need to consider pcache
+ return lfs_bd_read(lfs, &lfs->pcache, &lfs->rcache, size,
+ block, off, buffer, size);
+}
+
+static int lfs1_bd_crc(lfs_t *lfs, lfs_block_t block,
+ lfs_off_t off, lfs_size_t size, uint32_t *crc) {
+ for (lfs_off_t i = 0; i < size; i++) {
+ uint8_t c;
+ int err = lfs1_bd_read(lfs, block, off+i, &c, 1);
+ if (err) {
+ return err;
+ }
+
+ lfs1_crc(crc, &c, 1);
+ }
+
+ return 0;
+}
+
+
+/// Endian swapping functions ///
+static void lfs1_dir_fromle32(struct lfs1_disk_dir *d) {
+ d->rev = lfs_fromle32(d->rev);
+ d->size = lfs_fromle32(d->size);
+ d->tail[0] = lfs_fromle32(d->tail[0]);
+ d->tail[1] = lfs_fromle32(d->tail[1]);
+}
+
+static void lfs1_dir_tole32(struct lfs1_disk_dir *d) {
+ d->rev = lfs_tole32(d->rev);
+ d->size = lfs_tole32(d->size);
+ d->tail[0] = lfs_tole32(d->tail[0]);
+ d->tail[1] = lfs_tole32(d->tail[1]);
+}
+
+static void lfs1_entry_fromle32(struct lfs1_disk_entry *d) {
+ d->u.dir[0] = lfs_fromle32(d->u.dir[0]);
+ d->u.dir[1] = lfs_fromle32(d->u.dir[1]);
+}
+
+static void lfs1_entry_tole32(struct lfs1_disk_entry *d) {
+ d->u.dir[0] = lfs_tole32(d->u.dir[0]);
+ d->u.dir[1] = lfs_tole32(d->u.dir[1]);
+}
+
+static void lfs1_superblock_fromle32(struct lfs1_disk_superblock *d) {
+ d->root[0] = lfs_fromle32(d->root[0]);
+ d->root[1] = lfs_fromle32(d->root[1]);
+ d->block_size = lfs_fromle32(d->block_size);
+ d->block_count = lfs_fromle32(d->block_count);
+ d->version = lfs_fromle32(d->version);
+}
+
+
+///// Metadata pair and directory operations ///
+static inline lfs_size_t lfs1_entry_size(const lfs1_entry_t *entry) {
+ return 4 + entry->d.elen + entry->d.alen + entry->d.nlen;
+}
+
+static int lfs1_dir_fetch(lfs_t *lfs,
+ lfs1_dir_t *dir, const lfs_block_t pair[2]) {
+ // copy out pair, otherwise may be aliasing dir
+ const lfs_block_t tpair[2] = {pair[0], pair[1]};
+ bool valid = false;
+
+ // check both blocks for the most recent revision
+ for (int i = 0; i < 2; i++) {
+ struct lfs1_disk_dir test;
+ int err = lfs1_bd_read(lfs, tpair[i], 0, &test, sizeof(test));
+ lfs1_dir_fromle32(&test);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ continue;
+ }
+ return err;
+ }
+
+ if (valid && lfs_scmp(test.rev, dir->d.rev) < 0) {
+ continue;
+ }
+
+ if ((0x7fffffff & test.size) < sizeof(test)+4 ||
+ (0x7fffffff & test.size) > lfs->cfg->block_size) {
+ continue;
+ }
+
+ uint32_t crc = 0xffffffff;
+ lfs1_dir_tole32(&test);
+ lfs1_crc(&crc, &test, sizeof(test));
+ lfs1_dir_fromle32(&test);
+ err = lfs1_bd_crc(lfs, tpair[i], sizeof(test),
+ (0x7fffffff & test.size) - sizeof(test), &crc);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ continue;
+ }
+ return err;
+ }
+
+ if (crc != 0) {
+ continue;
+ }
+
+ valid = true;
+
+ // setup dir in case it's valid
+ dir->pair[0] = tpair[(i+0) % 2];
+ dir->pair[1] = tpair[(i+1) % 2];
+ dir->off = sizeof(dir->d);
+ dir->d = test;
+ }
+
+ if (!valid) {
+ LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}",
+ tpair[0], tpair[1]);
+ return LFS_ERR_CORRUPT;
+ }
+
+ return 0;
+}
+
+static int lfs1_dir_next(lfs_t *lfs, lfs1_dir_t *dir, lfs1_entry_t *entry) {
+ while (dir->off + sizeof(entry->d) > (0x7fffffff & dir->d.size)-4) {
+ if (!(0x80000000 & dir->d.size)) {
+ entry->off = dir->off;
+ return LFS_ERR_NOENT;
+ }
+
+ int err = lfs1_dir_fetch(lfs, dir, dir->d.tail);
+ if (err) {
+ return err;
+ }
+
+ dir->off = sizeof(dir->d);
+ dir->pos += sizeof(dir->d) + 4;
+ }
+
+ int err = lfs1_bd_read(lfs, dir->pair[0], dir->off,
+ &entry->d, sizeof(entry->d));
+ lfs1_entry_fromle32(&entry->d);
+ if (err) {
+ return err;
+ }
+
+ entry->off = dir->off;
+ dir->off += lfs1_entry_size(entry);
+ dir->pos += lfs1_entry_size(entry);
+ return 0;
+}
+
+/// littlefs v1 specific operations ///
+int lfs1_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data) {
+ if (lfs_pair_isnull(lfs->lfs1->root)) {
+ return 0;
+ }
+
+ // iterate over metadata pairs
+ lfs1_dir_t dir;
+ lfs1_entry_t entry;
+ lfs_block_t cwd[2] = {0, 1};
+
+ while (true) {
+ for (int i = 0; i < 2; i++) {
+ int err = cb(data, cwd[i]);
+ if (err) {
+ return err;
+ }
+ }
+
+ int err = lfs1_dir_fetch(lfs, &dir, cwd);
+ if (err) {
+ return err;
+ }
+
+ // iterate over contents
+ while (dir.off + sizeof(entry.d) <= (0x7fffffff & dir.d.size)-4) {
+ err = lfs1_bd_read(lfs, dir.pair[0], dir.off,
+ &entry.d, sizeof(entry.d));
+ lfs1_entry_fromle32(&entry.d);
+ if (err) {
+ return err;
+ }
+
+ dir.off += lfs1_entry_size(&entry);
+ if ((0x70 & entry.d.type) == (0x70 & LFS1_TYPE_REG)) {
+ err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache,
+ entry.d.u.file.head, entry.d.u.file.size, cb, data);
+ if (err) {
+ return err;
+ }
+ }
+ }
+
+ // we also need to check if we contain a threaded v2 directory
+ lfs_mdir_t dir2 = {.split=true, .tail={cwd[0], cwd[1]}};
+ while (dir2.split) {
+ err = lfs_dir_fetch(lfs, &dir2, dir2.tail);
+ if (err) {
+ break;
+ }
+
+ for (int i = 0; i < 2; i++) {
+ err = cb(data, dir2.pair[i]);
+ if (err) {
+ return err;
+ }
+ }
+ }
+
+ cwd[0] = dir.d.tail[0];
+ cwd[1] = dir.d.tail[1];
+
+ if (lfs_pair_isnull(cwd)) {
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int lfs1_moved(lfs_t *lfs, const void *e) {
+ if (lfs_pair_isnull(lfs->lfs1->root)) {
+ return 0;
+ }
+
+ // skip superblock
+ lfs1_dir_t cwd;
+ int err = lfs1_dir_fetch(lfs, &cwd, (const lfs_block_t[2]){0, 1});
+ if (err) {
+ return err;
+ }
+
+ // iterate over all directory directory entries
+ lfs1_entry_t entry;
+ while (!lfs_pair_isnull(cwd.d.tail)) {
+ err = lfs1_dir_fetch(lfs, &cwd, cwd.d.tail);
+ if (err) {
+ return err;
+ }
+
+ while (true) {
+ err = lfs1_dir_next(lfs, &cwd, &entry);
+ if (err && err != LFS_ERR_NOENT) {
+ return err;
+ }
+
+ if (err == LFS_ERR_NOENT) {
+ break;
+ }
+
+ if (!(0x80 & entry.d.type) &&
+ memcmp(&entry.d.u, e, sizeof(entry.d.u)) == 0) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+/// Filesystem operations ///
+static int lfs1_mount(lfs_t *lfs, struct lfs1 *lfs1,
+ const struct lfs_config *cfg) {
+ int err = 0;
+ {
+ err = lfs_init(lfs, cfg);
+ if (err) {
+ return err;
+ }
+
+ lfs->lfs1 = lfs1;
+ lfs->lfs1->root[0] = LFS_BLOCK_NULL;
+ lfs->lfs1->root[1] = LFS_BLOCK_NULL;
+
+ // setup free lookahead
+ lfs->lookahead.start = 0;
+ lfs->lookahead.size = 0;
+ lfs->lookahead.next = 0;
+ lfs_alloc_ckpoint(lfs);
+
+ // load superblock
+ lfs1_dir_t dir;
+ lfs1_superblock_t superblock;
+ err = lfs1_dir_fetch(lfs, &dir, (const lfs_block_t[2]){0, 1});
+ if (err && err != LFS_ERR_CORRUPT) {
+ goto cleanup;
+ }
+
+ if (!err) {
+ err = lfs1_bd_read(lfs, dir.pair[0], sizeof(dir.d),
+ &superblock.d, sizeof(superblock.d));
+ lfs1_superblock_fromle32(&superblock.d);
+ if (err) {
+ goto cleanup;
+ }
+
+ lfs->lfs1->root[0] = superblock.d.root[0];
+ lfs->lfs1->root[1] = superblock.d.root[1];
+ }
+
+ if (err || memcmp(superblock.d.magic, "littlefs", 8) != 0) {
+ LFS_ERROR("Invalid superblock at {0x%"PRIx32", 0x%"PRIx32"}",
+ 0, 1);
+ err = LFS_ERR_CORRUPT;
+ goto cleanup;
+ }
+
+ uint16_t major_version = (0xffff & (superblock.d.version >> 16));
+ uint16_t minor_version = (0xffff & (superblock.d.version >> 0));
+ if ((major_version != LFS1_DISK_VERSION_MAJOR ||
+ minor_version > LFS1_DISK_VERSION_MINOR)) {
+ LFS_ERROR("Invalid version v%d.%d", major_version, minor_version);
+ err = LFS_ERR_INVAL;
+ goto cleanup;
+ }
+
+ return 0;
+ }
+
+cleanup:
+ lfs_deinit(lfs);
+ return err;
+}
+
+static int lfs1_unmount(lfs_t *lfs) {
+ return lfs_deinit(lfs);
+}
+
+/// v1 migration ///
+static int lfs_migrate_(lfs_t *lfs, const struct lfs_config *cfg) {
+ struct lfs1 lfs1;
+
+ // Indeterminate filesystem size not allowed for migration.
+ LFS_ASSERT(cfg->block_count != 0);
+
+ int err = lfs1_mount(lfs, &lfs1, cfg);
+ if (err) {
+ return err;
+ }
+
+ {
+ // iterate through each directory, copying over entries
+ // into new directory
+ lfs1_dir_t dir1;
+ lfs_mdir_t dir2;
+ dir1.d.tail[0] = lfs->lfs1->root[0];
+ dir1.d.tail[1] = lfs->lfs1->root[1];
+ while (!lfs_pair_isnull(dir1.d.tail)) {
+ // iterate old dir
+ err = lfs1_dir_fetch(lfs, &dir1, dir1.d.tail);
+ if (err) {
+ goto cleanup;
+ }
+
+ // create new dir and bind as temporary pretend root
+ err = lfs_dir_alloc(lfs, &dir2);
+ if (err) {
+ goto cleanup;
+ }
+
+ dir2.rev = dir1.d.rev;
+ dir1.head[0] = dir1.pair[0];
+ dir1.head[1] = dir1.pair[1];
+ lfs->root[0] = dir2.pair[0];
+ lfs->root[1] = dir2.pair[1];
+
+ err = lfs_dir_commit(lfs, &dir2, NULL, 0);
+ if (err) {
+ goto cleanup;
+ }
+
+ while (true) {
+ lfs1_entry_t entry1;
+ err = lfs1_dir_next(lfs, &dir1, &entry1);
+ if (err && err != LFS_ERR_NOENT) {
+ goto cleanup;
+ }
+
+ if (err == LFS_ERR_NOENT) {
+ break;
+ }
+
+ // check that entry has not been moved
+ if (entry1.d.type & 0x80) {
+ int moved = lfs1_moved(lfs, &entry1.d.u);
+ if (moved < 0) {
+ err = moved;
+ goto cleanup;
+ }
+
+ if (moved) {
+ continue;
+ }
+
+ entry1.d.type &= ~0x80;
+ }
+
+ // also fetch name
+ char name[LFS_NAME_MAX+1];
+ memset(name, 0, sizeof(name));
+ err = lfs1_bd_read(lfs, dir1.pair[0],
+ entry1.off + 4+entry1.d.elen+entry1.d.alen,
+ name, entry1.d.nlen);
+ if (err) {
+ goto cleanup;
+ }
+
+ bool isdir = (entry1.d.type == LFS1_TYPE_DIR);
+
+ // create entry in new dir
+ err = lfs_dir_fetch(lfs, &dir2, lfs->root);
+ if (err) {
+ goto cleanup;
+ }
+
+ uint16_t id;
+ err = lfs_dir_find(lfs, &dir2, &(const char*){name}, &id);
+ if (!(err == LFS_ERR_NOENT && id != 0x3ff)) {
+ err = (err < 0) ? err : LFS_ERR_EXIST;
+ goto cleanup;
+ }
+
+ lfs1_entry_tole32(&entry1.d);
+ err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL},
+ {LFS_MKTAG_IF_ELSE(isdir,
+ LFS_TYPE_DIR, id, entry1.d.nlen,
+ LFS_TYPE_REG, id, entry1.d.nlen),
+ name},
+ {LFS_MKTAG_IF_ELSE(isdir,
+ LFS_TYPE_DIRSTRUCT, id, sizeof(entry1.d.u),
+ LFS_TYPE_CTZSTRUCT, id, sizeof(entry1.d.u)),
+ &entry1.d.u}));
+ lfs1_entry_fromle32(&entry1.d);
+ if (err) {
+ goto cleanup;
+ }
+ }
+
+ if (!lfs_pair_isnull(dir1.d.tail)) {
+ // find last block and update tail to thread into fs
+ err = lfs_dir_fetch(lfs, &dir2, lfs->root);
+ if (err) {
+ goto cleanup;
+ }
+
+ while (dir2.split) {
+ err = lfs_dir_fetch(lfs, &dir2, dir2.tail);
+ if (err) {
+ goto cleanup;
+ }
+ }
+
+ lfs_pair_tole32(dir2.pair);
+ err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir1.d.tail}));
+ lfs_pair_fromle32(dir2.pair);
+ if (err) {
+ goto cleanup;
+ }
+ }
+
+ // Copy over first block to thread into fs. Unfortunately
+ // if this fails there is not much we can do.
+ LFS_DEBUG("Migrating {0x%"PRIx32", 0x%"PRIx32"} "
+ "-> {0x%"PRIx32", 0x%"PRIx32"}",
+ lfs->root[0], lfs->root[1], dir1.head[0], dir1.head[1]);
+
+ err = lfs_bd_erase(lfs, dir1.head[1]);
+ if (err) {
+ goto cleanup;
+ }
+
+ err = lfs_dir_fetch(lfs, &dir2, lfs->root);
+ if (err) {
+ goto cleanup;
+ }
+
+ for (lfs_off_t i = 0; i < dir2.off; i++) {
+ uint8_t dat;
+ err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, dir2.off,
+ dir2.pair[0], i, &dat, 1);
+ if (err) {
+ goto cleanup;
+ }
+
+ err = lfs_bd_prog(lfs,
+ &lfs->pcache, &lfs->rcache, true,
+ dir1.head[1], i, &dat, 1);
+ if (err) {
+ goto cleanup;
+ }
+ }
+
+ err = lfs_bd_flush(lfs, &lfs->pcache, &lfs->rcache, true);
+ if (err) {
+ goto cleanup;
+ }
+ }
+
+ // Create new superblock. This marks a successful migration!
+ err = lfs1_dir_fetch(lfs, &dir1, (const lfs_block_t[2]){0, 1});
+ if (err) {
+ goto cleanup;
+ }
+
+ dir2.pair[0] = dir1.pair[0];
+ dir2.pair[1] = dir1.pair[1];
+ dir2.rev = dir1.d.rev;
+ dir2.off = sizeof(dir2.rev);
+ dir2.etag = 0xffffffff;
+ dir2.count = 0;
+ dir2.tail[0] = lfs->lfs1->root[0];
+ dir2.tail[1] = lfs->lfs1->root[1];
+ dir2.erased = false;
+ dir2.split = true;
+
+ lfs_superblock_t superblock = {
+ .version = LFS_DISK_VERSION,
+ .block_size = lfs->cfg->block_size,
+ .block_count = lfs->cfg->block_count,
+ .name_max = lfs->name_max,
+ .file_max = lfs->file_max,
+ .attr_max = lfs->attr_max,
+ };
+
+ lfs_superblock_tole32(&superblock);
+ err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL},
+ {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
+ {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
+ &superblock}));
+ if (err) {
+ goto cleanup;
+ }
+
+ // sanity check that fetch works
+ err = lfs_dir_fetch(lfs, &dir2, (const lfs_block_t[2]){0, 1});
+ if (err) {
+ goto cleanup;
+ }
+
+ // force compaction to prevent accidentally mounting v1
+ dir2.erased = false;
+ err = lfs_dir_commit(lfs, &dir2, NULL, 0);
+ if (err) {
+ goto cleanup;
+ }
+ }
+
+cleanup:
+ lfs1_unmount(lfs);
+ return err;
+}
+
+#endif
+
+
+/// Public API wrappers ///
+
+// Here we can add tracing/thread safety easily
+
+// Thread-safe wrappers if enabled
+#ifdef LFS_THREADSAFE
+#define LFS_LOCK(cfg) cfg->lock(cfg)
+#define LFS_UNLOCK(cfg) cfg->unlock(cfg)
+#else
+#define LFS_LOCK(cfg) ((void)cfg, 0)
+#define LFS_UNLOCK(cfg) ((void)cfg)
+#endif
+
+// Public API
+#ifndef LFS_READONLY
+int lfs_format(lfs_t *lfs, const struct lfs_config *cfg) {
+ int err = LFS_LOCK(cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_format(%p, %p {.context=%p, "
+ ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
+ ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
+ ".block_size=%"PRIu32", .block_count=%"PRIu32", "
+ ".block_cycles=%"PRId32", .cache_size=%"PRIu32", "
+ ".lookahead_size=%"PRIu32", .read_buffer=%p, "
+ ".prog_buffer=%p, .lookahead_buffer=%p, "
+ ".name_max=%"PRIu32", .file_max=%"PRIu32", "
+ ".attr_max=%"PRIu32"})",
+ (void*)lfs, (void*)cfg, cfg->context,
+ (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
+ (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
+ cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
+ cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
+ cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
+ cfg->name_max, cfg->file_max, cfg->attr_max);
+
+ err = lfs_format_(lfs, cfg);
+
+ LFS_TRACE("lfs_format -> %d", err);
+ LFS_UNLOCK(cfg);
+ return err;
+}
+#endif
+
+int lfs_mount(lfs_t *lfs, const struct lfs_config *cfg) {
+ int err = LFS_LOCK(cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_mount(%p, %p {.context=%p, "
+ ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
+ ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
+ ".block_size=%"PRIu32", .block_count=%"PRIu32", "
+ ".block_cycles=%"PRId32", .cache_size=%"PRIu32", "
+ ".lookahead_size=%"PRIu32", .read_buffer=%p, "
+ ".prog_buffer=%p, .lookahead_buffer=%p, "
+ ".name_max=%"PRIu32", .file_max=%"PRIu32", "
+ ".attr_max=%"PRIu32"})",
+ (void*)lfs, (void*)cfg, cfg->context,
+ (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
+ (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
+ cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
+ cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
+ cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
+ cfg->name_max, cfg->file_max, cfg->attr_max);
+
+ err = lfs_mount_(lfs, cfg);
+
+ LFS_TRACE("lfs_mount -> %d", err);
+ LFS_UNLOCK(cfg);
+ return err;
+}
+
+int lfs_unmount(lfs_t *lfs) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_unmount(%p)", (void*)lfs);
+
+ err = lfs_unmount_(lfs);
+
+ LFS_TRACE("lfs_unmount -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+#ifndef LFS_READONLY
+int lfs_remove(lfs_t *lfs, const char *path) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_remove(%p, \"%s\")", (void*)lfs, path);
+
+ err = lfs_remove_(lfs, path);
+
+ LFS_TRACE("lfs_remove -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+#endif
+
+#ifndef LFS_READONLY
+int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_rename(%p, \"%s\", \"%s\")", (void*)lfs, oldpath, newpath);
+
+ err = lfs_rename_(lfs, oldpath, newpath);
+
+ LFS_TRACE("lfs_rename -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+#endif
+
+int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_stat(%p, \"%s\", %p)", (void*)lfs, path, (void*)info);
+
+ err = lfs_stat_(lfs, path, info);
+
+ LFS_TRACE("lfs_stat -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path,
+ uint8_t type, void *buffer, lfs_size_t size) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_getattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")",
+ (void*)lfs, path, type, buffer, size);
+
+ lfs_ssize_t res = lfs_getattr_(lfs, path, type, buffer, size);
+
+ LFS_TRACE("lfs_getattr -> %"PRId32, res);
+ LFS_UNLOCK(lfs->cfg);
+ return res;
+}
+
+#ifndef LFS_READONLY
+int lfs_setattr(lfs_t *lfs, const char *path,
+ uint8_t type, const void *buffer, lfs_size_t size) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_setattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")",
+ (void*)lfs, path, type, buffer, size);
+
+ err = lfs_setattr_(lfs, path, type, buffer, size);
+
+ LFS_TRACE("lfs_setattr -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+#endif
+
+#ifndef LFS_READONLY
+int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_removeattr(%p, \"%s\", %"PRIu8")", (void*)lfs, path, type);
+
+ err = lfs_removeattr_(lfs, path, type);
+
+ LFS_TRACE("lfs_removeattr -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+#endif
+
+#ifndef LFS_NO_MALLOC
+int lfs_file_open(lfs_t *lfs, lfs_file_t *file, const char *path, int flags) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_file_open(%p, %p, \"%s\", %x)",
+ (void*)lfs, (void*)file, path, (unsigned)flags);
+ LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
+
+ err = lfs_file_open_(lfs, file, path, flags);
+
+ LFS_TRACE("lfs_file_open -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+#endif
+
+int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file,
+ const char *path, int flags,
+ const struct lfs_file_config *cfg) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_file_opencfg(%p, %p, \"%s\", %x, %p {"
+ ".buffer=%p, .attrs=%p, .attr_count=%"PRIu32"})",
+ (void*)lfs, (void*)file, path, (unsigned)flags,
+ (void*)cfg, cfg->buffer, (void*)cfg->attrs, cfg->attr_count);
+ LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
+
+ err = lfs_file_opencfg_(lfs, file, path, flags, cfg);
+
+ LFS_TRACE("lfs_file_opencfg -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+int lfs_file_close(lfs_t *lfs, lfs_file_t *file) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_file_close(%p, %p)", (void*)lfs, (void*)file);
+ LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
+
+ err = lfs_file_close_(lfs, file);
+
+ LFS_TRACE("lfs_file_close -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+#ifndef LFS_READONLY
+int lfs_file_sync(lfs_t *lfs, lfs_file_t *file) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_file_sync(%p, %p)", (void*)lfs, (void*)file);
+ LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
+
+ err = lfs_file_sync_(lfs, file);
+
+ LFS_TRACE("lfs_file_sync -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+#endif
+
+lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file,
+ void *buffer, lfs_size_t size) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_file_read(%p, %p, %p, %"PRIu32")",
+ (void*)lfs, (void*)file, buffer, size);
+ LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
+
+ lfs_ssize_t res = lfs_file_read_(lfs, file, buffer, size);
+
+ LFS_TRACE("lfs_file_read -> %"PRId32, res);
+ LFS_UNLOCK(lfs->cfg);
+ return res;
+}
+
+#ifndef LFS_READONLY
+lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
+ const void *buffer, lfs_size_t size) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_file_write(%p, %p, %p, %"PRIu32")",
+ (void*)lfs, (void*)file, buffer, size);
+ LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
+
+ lfs_ssize_t res = lfs_file_write_(lfs, file, buffer, size);
+
+ LFS_TRACE("lfs_file_write -> %"PRId32, res);
+ LFS_UNLOCK(lfs->cfg);
+ return res;
+}
+#endif
+
+lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file,
+ lfs_soff_t off, int whence) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_file_seek(%p, %p, %"PRId32", %d)",
+ (void*)lfs, (void*)file, off, whence);
+ LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
+
+ lfs_soff_t res = lfs_file_seek_(lfs, file, off, whence);
+
+ LFS_TRACE("lfs_file_seek -> %"PRId32, res);
+ LFS_UNLOCK(lfs->cfg);
+ return res;
+}
+
+#ifndef LFS_READONLY
+int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_file_truncate(%p, %p, %"PRIu32")",
+ (void*)lfs, (void*)file, size);
+ LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
+
+ err = lfs_file_truncate_(lfs, file, size);
+
+ LFS_TRACE("lfs_file_truncate -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+#endif
+
+lfs_soff_t lfs_file_tell(lfs_t *lfs, lfs_file_t *file) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_file_tell(%p, %p)", (void*)lfs, (void*)file);
+ LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
+
+ lfs_soff_t res = lfs_file_tell_(lfs, file);
+
+ LFS_TRACE("lfs_file_tell -> %"PRId32, res);
+ LFS_UNLOCK(lfs->cfg);
+ return res;
+}
+
+int lfs_file_rewind(lfs_t *lfs, lfs_file_t *file) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_file_rewind(%p, %p)", (void*)lfs, (void*)file);
+
+ err = lfs_file_rewind_(lfs, file);
+
+ LFS_TRACE("lfs_file_rewind -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_file_size(%p, %p)", (void*)lfs, (void*)file);
+ LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
+
+ lfs_soff_t res = lfs_file_size_(lfs, file);
+
+ LFS_TRACE("lfs_file_size -> %"PRIu32, res);
+ LFS_UNLOCK(lfs->cfg);
+ return res;
+}
+
+#ifndef LFS_READONLY
+int lfs_mkdir(lfs_t *lfs, const char *path) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_mkdir(%p, \"%s\")", (void*)lfs, path);
+
+ err = lfs_mkdir_(lfs, path);
+
+ LFS_TRACE("lfs_mkdir -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+#endif
+
+int lfs_dir_open(lfs_t *lfs, lfs_dir_t *dir, const char *path) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_dir_open(%p, %p, \"%s\")", (void*)lfs, (void*)dir, path);
+ LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)dir));
+
+ err = lfs_dir_open_(lfs, dir, path);
+
+ LFS_TRACE("lfs_dir_open -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+int lfs_dir_close(lfs_t *lfs, lfs_dir_t *dir) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_dir_close(%p, %p)", (void*)lfs, (void*)dir);
+
+ err = lfs_dir_close_(lfs, dir);
+
+ LFS_TRACE("lfs_dir_close -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+int lfs_dir_read(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_dir_read(%p, %p, %p)",
+ (void*)lfs, (void*)dir, (void*)info);
+
+ err = lfs_dir_read_(lfs, dir, info);
+
+ LFS_TRACE("lfs_dir_read -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+int lfs_dir_seek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_dir_seek(%p, %p, %"PRIu32")",
+ (void*)lfs, (void*)dir, off);
+
+ err = lfs_dir_seek_(lfs, dir, off);
+
+ LFS_TRACE("lfs_dir_seek -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+lfs_soff_t lfs_dir_tell(lfs_t *lfs, lfs_dir_t *dir) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_dir_tell(%p, %p)", (void*)lfs, (void*)dir);
+
+ lfs_soff_t res = lfs_dir_tell_(lfs, dir);
+
+ LFS_TRACE("lfs_dir_tell -> %"PRId32, res);
+ LFS_UNLOCK(lfs->cfg);
+ return res;
+}
+
+int lfs_dir_rewind(lfs_t *lfs, lfs_dir_t *dir) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_dir_rewind(%p, %p)", (void*)lfs, (void*)dir);
+
+ err = lfs_dir_rewind_(lfs, dir);
+
+ LFS_TRACE("lfs_dir_rewind -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+int lfs_fs_stat(lfs_t *lfs, struct lfs_fsinfo *fsinfo) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_fs_stat(%p, %p)", (void*)lfs, (void*)fsinfo);
+
+ err = lfs_fs_stat_(lfs, fsinfo);
+
+ LFS_TRACE("lfs_fs_stat -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+lfs_ssize_t lfs_fs_size(lfs_t *lfs) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_fs_size(%p)", (void*)lfs);
+
+ lfs_ssize_t res = lfs_fs_size_(lfs);
+
+ LFS_TRACE("lfs_fs_size -> %"PRId32, res);
+ LFS_UNLOCK(lfs->cfg);
+ return res;
+}
+
+int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void *, lfs_block_t), void *data) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_fs_traverse(%p, %p, %p)",
+ (void*)lfs, (void*)(uintptr_t)cb, data);
+
+ err = lfs_fs_traverse_(lfs, cb, data, true);
+
+ LFS_TRACE("lfs_fs_traverse -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+#ifndef LFS_READONLY
+int lfs_fs_mkconsistent(lfs_t *lfs) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_fs_mkconsistent(%p)", (void*)lfs);
+
+ err = lfs_fs_mkconsistent_(lfs);
+
+ LFS_TRACE("lfs_fs_mkconsistent -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+#endif
+
+#ifndef LFS_READONLY
+int lfs_fs_gc(lfs_t *lfs) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_fs_gc(%p)", (void*)lfs);
+
+ err = lfs_fs_gc_(lfs);
+
+ LFS_TRACE("lfs_fs_gc -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+#endif
+
+#ifndef LFS_READONLY
+int lfs_fs_grow(lfs_t *lfs, lfs_size_t block_count) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_fs_grow(%p, %"PRIu32")", (void*)lfs, block_count);
+
+ err = lfs_fs_grow_(lfs, block_count);
+
+ LFS_TRACE("lfs_fs_grow -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+#endif
+
+#ifdef LFS_MIGRATE
+int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg) {
+ int err = LFS_LOCK(cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_migrate(%p, %p {.context=%p, "
+ ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
+ ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
+ ".block_size=%"PRIu32", .block_count=%"PRIu32", "
+ ".block_cycles=%"PRId32", .cache_size=%"PRIu32", "
+ ".lookahead_size=%"PRIu32", .read_buffer=%p, "
+ ".prog_buffer=%p, .lookahead_buffer=%p, "
+ ".name_max=%"PRIu32", .file_max=%"PRIu32", "
+ ".attr_max=%"PRIu32"})",
+ (void*)lfs, (void*)cfg, cfg->context,
+ (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
+ (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
+ cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
+ cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
+ cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
+ cfg->name_max, cfg->file_max, cfg->attr_max);
+
+ err = lfs_migrate_(lfs, cfg);
+
+ LFS_TRACE("lfs_migrate -> %d", err);
+ LFS_UNLOCK(cfg);
+ return err;
+}
+#endif
+
diff --git a/packages/littlefs-v2.11.2/lfs.h b/packages/littlefs-v2.11.2/lfs.h
new file mode 100644
index 0000000..215309c
--- /dev/null
+++ b/packages/littlefs-v2.11.2/lfs.h
@@ -0,0 +1,801 @@
+/*
+ * The little filesystem
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef LFS_H
+#define LFS_H
+
+#include "lfs_util.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+/// Version info ///
+
+// Software library version
+// Major (top-nibble), incremented on backwards incompatible changes
+// Minor (bottom-nibble), incremented on feature additions
+#define LFS_VERSION 0x0002000b
+#define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16))
+#define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >> 0))
+
+// Version of On-disk data structures
+// Major (top-nibble), incremented on backwards incompatible changes
+// Minor (bottom-nibble), incremented on feature additions
+#define LFS_DISK_VERSION 0x00020001
+#define LFS_DISK_VERSION_MAJOR (0xffff & (LFS_DISK_VERSION >> 16))
+#define LFS_DISK_VERSION_MINOR (0xffff & (LFS_DISK_VERSION >> 0))
+
+
+/// Definitions ///
+
+// Type definitions
+typedef uint32_t lfs_size_t;
+typedef uint32_t lfs_off_t;
+
+typedef int32_t lfs_ssize_t;
+typedef int32_t lfs_soff_t;
+
+typedef uint32_t lfs_block_t;
+
+// Maximum name size in bytes, may be redefined to reduce the size of the
+// info struct. Limited to <= 1022. Stored in superblock and must be
+// respected by other littlefs drivers.
+#ifndef LFS_NAME_MAX
+#define LFS_NAME_MAX 255
+#endif
+
+// Maximum size of a file in bytes, may be redefined to limit to support other
+// drivers. Limited on disk to <= 2147483647. Stored in superblock and must be
+// respected by other littlefs drivers.
+#ifndef LFS_FILE_MAX
+#define LFS_FILE_MAX 2147483647
+#endif
+
+// Maximum size of custom attributes in bytes, may be redefined, but there is
+// no real benefit to using a smaller LFS_ATTR_MAX. Limited to <= 1022. Stored
+// in superblock and must be respected by other littlefs drivers.
+#ifndef LFS_ATTR_MAX
+#define LFS_ATTR_MAX 1022
+#endif
+
+// Possible error codes, these are negative to allow
+// valid positive return values
+enum lfs_error {
+ LFS_ERR_OK = 0, // No error
+ LFS_ERR_IO = -5, // Error during device operation
+ LFS_ERR_CORRUPT = -84, // Corrupted
+ LFS_ERR_NOENT = -2, // No directory entry
+ LFS_ERR_EXIST = -17, // Entry already exists
+ LFS_ERR_NOTDIR = -20, // Entry is not a dir
+ LFS_ERR_ISDIR = -21, // Entry is a dir
+ LFS_ERR_NOTEMPTY = -39, // Dir is not empty
+ LFS_ERR_BADF = -9, // Bad file number
+ LFS_ERR_FBIG = -27, // File too large
+ LFS_ERR_INVAL = -22, // Invalid parameter
+ LFS_ERR_NOSPC = -28, // No space left on device
+ LFS_ERR_NOMEM = -12, // No more memory available
+ LFS_ERR_NOATTR = -61, // No data/attr available
+ LFS_ERR_NAMETOOLONG = -36, // File name too long
+};
+
+// File types
+enum lfs_type {
+ // file types
+ LFS_TYPE_REG = 0x001,
+ LFS_TYPE_DIR = 0x002,
+
+ // internally used types
+ LFS_TYPE_SPLICE = 0x400,
+ LFS_TYPE_NAME = 0x000,
+ LFS_TYPE_STRUCT = 0x200,
+ LFS_TYPE_USERATTR = 0x300,
+ LFS_TYPE_FROM = 0x100,
+ LFS_TYPE_TAIL = 0x600,
+ LFS_TYPE_GLOBALS = 0x700,
+ LFS_TYPE_CRC = 0x500,
+
+ // internally used type specializations
+ LFS_TYPE_CREATE = 0x401,
+ LFS_TYPE_DELETE = 0x4ff,
+ LFS_TYPE_SUPERBLOCK = 0x0ff,
+ LFS_TYPE_DIRSTRUCT = 0x200,
+ LFS_TYPE_CTZSTRUCT = 0x202,
+ LFS_TYPE_INLINESTRUCT = 0x201,
+ LFS_TYPE_SOFTTAIL = 0x600,
+ LFS_TYPE_HARDTAIL = 0x601,
+ LFS_TYPE_MOVESTATE = 0x7ff,
+ LFS_TYPE_CCRC = 0x500,
+ LFS_TYPE_FCRC = 0x5ff,
+
+ // internal chip sources
+ LFS_FROM_NOOP = 0x000,
+ LFS_FROM_MOVE = 0x101,
+ LFS_FROM_USERATTRS = 0x102,
+};
+
+// File open flags
+enum lfs_open_flags {
+ // open flags
+ LFS_O_RDONLY = 1, // Open a file as read only
+#ifndef LFS_READONLY
+ LFS_O_WRONLY = 2, // Open a file as write only
+ LFS_O_RDWR = 3, // Open a file as read and write
+ LFS_O_CREAT = 0x0100, // Create a file if it does not exist
+ LFS_O_EXCL = 0x0200, // Fail if a file already exists
+ LFS_O_TRUNC = 0x0400, // Truncate the existing file to zero size
+ LFS_O_APPEND = 0x0800, // Move to end of file on every write
+#endif
+
+ // internally used flags
+#ifndef LFS_READONLY
+ LFS_F_DIRTY = 0x010000, // File does not match storage
+ LFS_F_WRITING = 0x020000, // File has been written since last flush
+#endif
+ LFS_F_READING = 0x040000, // File has been read since last flush
+#ifndef LFS_READONLY
+ LFS_F_ERRED = 0x080000, // An error occurred during write
+#endif
+ LFS_F_INLINE = 0x100000, // Currently inlined in directory entry
+};
+
+// File seek flags
+enum lfs_whence_flags {
+ LFS_SEEK_SET = 0, // Seek relative to an absolute position
+ LFS_SEEK_CUR = 1, // Seek relative to the current file position
+ LFS_SEEK_END = 2, // Seek relative to the end of the file
+};
+
+
+// Configuration provided during initialization of the littlefs
+struct lfs_config {
+ // Opaque user provided context that can be used to pass
+ // information to the block device operations
+ void *context;
+
+ // Read a region in a block. Negative error codes are propagated
+ // to the user.
+ int (*read)(const struct lfs_config *c, lfs_block_t block,
+ lfs_off_t off, void *buffer, lfs_size_t size);
+
+ // Program a region in a block. The block must have previously
+ // been erased. Negative error codes are propagated to the user.
+ // May return LFS_ERR_CORRUPT if the block should be considered bad.
+ int (*prog)(const struct lfs_config *c, lfs_block_t block,
+ lfs_off_t off, const void *buffer, lfs_size_t size);
+
+ // Erase a block. A block must be erased before being programmed.
+ // The state of an erased block is undefined. Negative error codes
+ // are propagated to the user.
+ // May return LFS_ERR_CORRUPT if the block should be considered bad.
+ int (*erase)(const struct lfs_config *c, lfs_block_t block);
+
+ // Sync the state of the underlying block device. Negative error codes
+ // are propagated to the user.
+ int (*sync)(const struct lfs_config *c);
+
+#ifdef LFS_THREADSAFE
+ // Lock the underlying block device. Negative error codes
+ // are propagated to the user.
+ int (*lock)(const struct lfs_config *c);
+
+ // Unlock the underlying block device. Negative error codes
+ // are propagated to the user.
+ int (*unlock)(const struct lfs_config *c);
+#endif
+
+ // Minimum size of a block read in bytes. All read operations will be a
+ // multiple of this value.
+ lfs_size_t read_size;
+
+ // Minimum size of a block program in bytes. All program operations will be
+ // a multiple of this value.
+ lfs_size_t prog_size;
+
+ // Size of an erasable block in bytes. This does not impact ram consumption
+ // and may be larger than the physical erase size. However, non-inlined
+ // files take up at minimum one block. Must be a multiple of the read and
+ // program sizes.
+ lfs_size_t block_size;
+
+ // Number of erasable blocks on the device. Defaults to block_count stored
+ // on disk when zero.
+ lfs_size_t block_count;
+
+ // Number of erase cycles before littlefs evicts metadata logs and moves
+ // the metadata to another block. Suggested values are in the
+ // range 100-1000, with large values having better performance at the cost
+ // of less consistent wear distribution.
+ //
+ // Set to -1 to disable block-level wear-leveling.
+ int32_t block_cycles;
+
+ // Size of block caches in bytes. Each cache buffers a portion of a block in
+ // RAM. The littlefs needs a read cache, a program cache, and one additional
+ // cache per file. Larger caches can improve performance by storing more
+ // data and reducing the number of disk accesses. Must be a multiple of the
+ // read and program sizes, and a factor of the block size.
+ lfs_size_t cache_size;
+
+ // Size of the lookahead buffer in bytes. A larger lookahead buffer
+ // increases the number of blocks found during an allocation pass. The
+ // lookahead buffer is stored as a compact bitmap, so each byte of RAM
+ // can track 8 blocks.
+ lfs_size_t lookahead_size;
+
+ // Threshold for metadata compaction during lfs_fs_gc in bytes. Metadata
+ // pairs that exceed this threshold will be compacted during lfs_fs_gc.
+ // Defaults to ~88% block_size when zero, though the default may change
+ // in the future.
+ //
+ // Note this only affects lfs_fs_gc. Normal compactions still only occur
+ // when full.
+ //
+ // Set to -1 to disable metadata compaction during lfs_fs_gc.
+ lfs_size_t compact_thresh;
+
+ // Optional statically allocated read buffer. Must be cache_size.
+ // By default lfs_malloc is used to allocate this buffer.
+ void *read_buffer;
+
+ // Optional statically allocated program buffer. Must be cache_size.
+ // By default lfs_malloc is used to allocate this buffer.
+ void *prog_buffer;
+
+ // Optional statically allocated lookahead buffer. Must be lookahead_size.
+ // By default lfs_malloc is used to allocate this buffer.
+ void *lookahead_buffer;
+
+ // Optional upper limit on length of file names in bytes. No downside for
+ // larger names except the size of the info struct which is controlled by
+ // the LFS_NAME_MAX define. Defaults to LFS_NAME_MAX or name_max stored on
+ // disk when zero.
+ lfs_size_t name_max;
+
+ // Optional upper limit on files in bytes. No downside for larger files
+ // but must be <= LFS_FILE_MAX. Defaults to LFS_FILE_MAX or file_max stored
+ // on disk when zero.
+ lfs_size_t file_max;
+
+ // Optional upper limit on custom attributes in bytes. No downside for
+ // larger attributes size but must be <= LFS_ATTR_MAX. Defaults to
+ // LFS_ATTR_MAX or attr_max stored on disk when zero.
+ lfs_size_t attr_max;
+
+ // Optional upper limit on total space given to metadata pairs in bytes. On
+ // devices with large blocks (e.g. 128kB) setting this to a low size (2-8kB)
+ // can help bound the metadata compaction time. Must be <= block_size.
+ // Defaults to block_size when zero.
+ lfs_size_t metadata_max;
+
+ // Optional upper limit on inlined files in bytes. Inlined files live in
+ // metadata and decrease storage requirements, but may be limited to
+ // improve metadata-related performance. Must be <= cache_size, <=
+ // attr_max, and <= block_size/8. Defaults to the largest possible
+ // inline_max when zero.
+ //
+ // Set to -1 to disable inlined files.
+ lfs_size_t inline_max;
+
+#ifdef LFS_MULTIVERSION
+ // On-disk version to use when writing in the form of 16-bit major version
+ // + 16-bit minor version. This limiting metadata to what is supported by
+ // older minor versions. Note that some features will be lost. Defaults to
+ // to the most recent minor version when zero.
+ uint32_t disk_version;
+#endif
+};
+
+// File info structure
+struct lfs_info {
+ // Type of the file, either LFS_TYPE_REG or LFS_TYPE_DIR
+ uint8_t type;
+
+ // Size of the file, only valid for REG files. Limited to 32-bits.
+ lfs_size_t size;
+
+ // Name of the file stored as a null-terminated string. Limited to
+ // LFS_NAME_MAX+1, which can be changed by redefining LFS_NAME_MAX to
+ // reduce RAM. LFS_NAME_MAX is stored in superblock and must be
+ // respected by other littlefs drivers.
+ char name[LFS_NAME_MAX+1];
+};
+
+// Filesystem info structure
+struct lfs_fsinfo {
+ // On-disk version.
+ uint32_t disk_version;
+
+ // Size of a logical block in bytes.
+ lfs_size_t block_size;
+
+ // Number of logical blocks in filesystem.
+ lfs_size_t block_count;
+
+ // Upper limit on the length of file names in bytes.
+ lfs_size_t name_max;
+
+ // Upper limit on the size of files in bytes.
+ lfs_size_t file_max;
+
+ // Upper limit on the size of custom attributes in bytes.
+ lfs_size_t attr_max;
+};
+
+// Custom attribute structure, used to describe custom attributes
+// committed atomically during file writes.
+struct lfs_attr {
+ // 8-bit type of attribute, provided by user and used to
+ // identify the attribute
+ uint8_t type;
+
+ // Pointer to buffer containing the attribute
+ void *buffer;
+
+ // Size of attribute in bytes, limited to LFS_ATTR_MAX
+ lfs_size_t size;
+};
+
+// Optional configuration provided during lfs_file_opencfg
+struct lfs_file_config {
+ // Optional statically allocated file buffer. Must be cache_size.
+ // By default lfs_malloc is used to allocate this buffer.
+ void *buffer;
+
+ // Optional list of custom attributes related to the file. If the file
+ // is opened with read access, these attributes will be read from disk
+ // during the open call. If the file is opened with write access, the
+ // attributes will be written to disk every file sync or close. This
+ // write occurs atomically with update to the file's contents.
+ //
+ // Custom attributes are uniquely identified by an 8-bit type and limited
+ // to LFS_ATTR_MAX bytes. When read, if the stored attribute is smaller
+ // than the buffer, it will be padded with zeros. If the stored attribute
+ // is larger, then it will be silently truncated. If the attribute is not
+ // found, it will be created implicitly.
+ struct lfs_attr *attrs;
+
+ // Number of custom attributes in the list
+ lfs_size_t attr_count;
+};
+
+
+/// internal littlefs data structures ///
+typedef struct lfs_cache {
+ lfs_block_t block;
+ lfs_off_t off;
+ lfs_size_t size;
+ uint8_t *buffer;
+} lfs_cache_t;
+
+typedef struct lfs_mdir {
+ lfs_block_t pair[2];
+ uint32_t rev;
+ lfs_off_t off;
+ uint32_t etag;
+ uint16_t count;
+ bool erased;
+ bool split;
+ lfs_block_t tail[2];
+} lfs_mdir_t;
+
+// littlefs directory type
+typedef struct lfs_dir {
+ struct lfs_dir *next;
+ uint16_t id;
+ uint8_t type;
+ lfs_mdir_t m;
+
+ lfs_off_t pos;
+ lfs_block_t head[2];
+} lfs_dir_t;
+
+// littlefs file type
+typedef struct lfs_file {
+ struct lfs_file *next;
+ uint16_t id;
+ uint8_t type;
+ lfs_mdir_t m;
+
+ struct lfs_ctz {
+ lfs_block_t head;
+ lfs_size_t size;
+ } ctz;
+
+ uint32_t flags;
+ lfs_off_t pos;
+ lfs_block_t block;
+ lfs_off_t off;
+ lfs_cache_t cache;
+
+ const struct lfs_file_config *cfg;
+} lfs_file_t;
+
+typedef struct lfs_superblock {
+ uint32_t version;
+ lfs_size_t block_size;
+ lfs_size_t block_count;
+ lfs_size_t name_max;
+ lfs_size_t file_max;
+ lfs_size_t attr_max;
+} lfs_superblock_t;
+
+typedef struct lfs_gstate {
+ uint32_t tag;
+ lfs_block_t pair[2];
+} lfs_gstate_t;
+
+// The littlefs filesystem type
+typedef struct lfs {
+ lfs_cache_t rcache;
+ lfs_cache_t pcache;
+
+ lfs_block_t root[2];
+ struct lfs_mlist {
+ struct lfs_mlist *next;
+ uint16_t id;
+ uint8_t type;
+ lfs_mdir_t m;
+ } *mlist;
+ uint32_t seed;
+
+ lfs_gstate_t gstate;
+ lfs_gstate_t gdisk;
+ lfs_gstate_t gdelta;
+
+ struct lfs_lookahead {
+ lfs_block_t start;
+ lfs_block_t size;
+ lfs_block_t next;
+ lfs_block_t ckpoint;
+ uint8_t *buffer;
+ } lookahead;
+
+ const struct lfs_config *cfg;
+ lfs_size_t block_count;
+ lfs_size_t name_max;
+ lfs_size_t file_max;
+ lfs_size_t attr_max;
+ lfs_size_t inline_max;
+
+#ifdef LFS_MIGRATE
+ struct lfs1 *lfs1;
+#endif
+} lfs_t;
+
+
+/// Filesystem functions ///
+
+#ifndef LFS_READONLY
+// Format a block device with the littlefs
+//
+// Requires a littlefs object and config struct. This clobbers the littlefs
+// object, and does not leave the filesystem mounted. The config struct must
+// be zeroed for defaults and backwards compatibility.
+//
+// Returns a negative error code on failure.
+int lfs_format(lfs_t *lfs, const struct lfs_config *config);
+#endif
+
+// Mounts a littlefs
+//
+// Requires a littlefs object and config struct. Multiple filesystems
+// may be mounted simultaneously with multiple littlefs objects. Both
+// lfs and config must be allocated while mounted. The config struct must
+// be zeroed for defaults and backwards compatibility.
+//
+// Returns a negative error code on failure.
+int lfs_mount(lfs_t *lfs, const struct lfs_config *config);
+
+// Unmounts a littlefs
+//
+// Does nothing besides releasing any allocated resources.
+// Returns a negative error code on failure.
+int lfs_unmount(lfs_t *lfs);
+
+/// General operations ///
+
+#ifndef LFS_READONLY
+// Removes a file or directory
+//
+// If removing a directory, the directory must be empty.
+// Returns a negative error code on failure.
+int lfs_remove(lfs_t *lfs, const char *path);
+#endif
+
+#ifndef LFS_READONLY
+// Rename or move a file or directory
+//
+// If the destination exists, it must match the source in type.
+// If the destination is a directory, the directory must be empty.
+//
+// Returns a negative error code on failure.
+int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath);
+#endif
+
+// Find info about a file or directory
+//
+// Fills out the info structure, based on the specified file or directory.
+// Returns a negative error code on failure.
+int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info);
+
+// Get a custom attribute
+//
+// Custom attributes are uniquely identified by an 8-bit type and limited
+// to LFS_ATTR_MAX bytes. When read, if the stored attribute is smaller than
+// the buffer, it will be padded with zeros. If the stored attribute is larger,
+// then it will be silently truncated. If no attribute is found, the error
+// LFS_ERR_NOATTR is returned and the buffer is filled with zeros.
+//
+// Returns the size of the attribute, or a negative error code on failure.
+// Note, the returned size is the size of the attribute on disk, irrespective
+// of the size of the buffer. This can be used to dynamically allocate a buffer
+// or check for existence.
+lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path,
+ uint8_t type, void *buffer, lfs_size_t size);
+
+#ifndef LFS_READONLY
+// Set custom attributes
+//
+// Custom attributes are uniquely identified by an 8-bit type and limited
+// to LFS_ATTR_MAX bytes. If an attribute is not found, it will be
+// implicitly created.
+//
+// Returns a negative error code on failure.
+int lfs_setattr(lfs_t *lfs, const char *path,
+ uint8_t type, const void *buffer, lfs_size_t size);
+#endif
+
+#ifndef LFS_READONLY
+// Removes a custom attribute
+//
+// If an attribute is not found, nothing happens.
+//
+// Returns a negative error code on failure.
+int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type);
+#endif
+
+
+/// File operations ///
+
+#ifndef LFS_NO_MALLOC
+// Open a file
+//
+// The mode that the file is opened in is determined by the flags, which
+// are values from the enum lfs_open_flags that are bitwise-ored together.
+//
+// Returns a negative error code on failure.
+int lfs_file_open(lfs_t *lfs, lfs_file_t *file,
+ const char *path, int flags);
+
+// if LFS_NO_MALLOC is defined, lfs_file_open() will fail with LFS_ERR_NOMEM
+// thus use lfs_file_opencfg() with config.buffer set.
+#endif
+
+// Open a file with extra configuration
+//
+// The mode that the file is opened in is determined by the flags, which
+// are values from the enum lfs_open_flags that are bitwise-ored together.
+//
+// The config struct provides additional config options per file as described
+// above. The config struct must remain allocated while the file is open, and
+// the config struct must be zeroed for defaults and backwards compatibility.
+//
+// Returns a negative error code on failure.
+int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file,
+ const char *path, int flags,
+ const struct lfs_file_config *config);
+
+// Close a file
+//
+// Any pending writes are written out to storage as though
+// sync had been called and releases any allocated resources.
+//
+// Returns a negative error code on failure.
+int lfs_file_close(lfs_t *lfs, lfs_file_t *file);
+
+// Synchronize a file on storage
+//
+// Any pending writes are written out to storage.
+// Returns a negative error code on failure.
+int lfs_file_sync(lfs_t *lfs, lfs_file_t *file);
+
+// Read data from file
+//
+// Takes a buffer and size indicating where to store the read data.
+// Returns the number of bytes read, or a negative error code on failure.
+lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file,
+ void *buffer, lfs_size_t size);
+
+#ifndef LFS_READONLY
+// Write data to file
+//
+// Takes a buffer and size indicating the data to write. The file will not
+// actually be updated on the storage until either sync or close is called.
+//
+// Returns the number of bytes written, or a negative error code on failure.
+lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
+ const void *buffer, lfs_size_t size);
+#endif
+
+// Change the position of the file
+//
+// The change in position is determined by the offset and whence flag.
+// Returns the new position of the file, or a negative error code on failure.
+lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file,
+ lfs_soff_t off, int whence);
+
+#ifndef LFS_READONLY
+// Truncates the size of the file to the specified size
+//
+// Returns a negative error code on failure.
+int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size);
+#endif
+
+// Return the position of the file
+//
+// Equivalent to lfs_file_seek(lfs, file, 0, LFS_SEEK_CUR)
+// Returns the position of the file, or a negative error code on failure.
+lfs_soff_t lfs_file_tell(lfs_t *lfs, lfs_file_t *file);
+
+// Change the position of the file to the beginning of the file
+//
+// Equivalent to lfs_file_seek(lfs, file, 0, LFS_SEEK_SET)
+// Returns a negative error code on failure.
+int lfs_file_rewind(lfs_t *lfs, lfs_file_t *file);
+
+// Return the size of the file
+//
+// Similar to lfs_file_seek(lfs, file, 0, LFS_SEEK_END)
+// Returns the size of the file, or a negative error code on failure.
+lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file);
+
+
+/// Directory operations ///
+
+#ifndef LFS_READONLY
+// Create a directory
+//
+// Returns a negative error code on failure.
+int lfs_mkdir(lfs_t *lfs, const char *path);
+#endif
+
+// Open a directory
+//
+// Once open a directory can be used with read to iterate over files.
+// Returns a negative error code on failure.
+int lfs_dir_open(lfs_t *lfs, lfs_dir_t *dir, const char *path);
+
+// Close a directory
+//
+// Releases any allocated resources.
+// Returns a negative error code on failure.
+int lfs_dir_close(lfs_t *lfs, lfs_dir_t *dir);
+
+// Read an entry in the directory
+//
+// Fills out the info structure, based on the specified file or directory.
+// Returns a positive value on success, 0 at the end of directory,
+// or a negative error code on failure.
+int lfs_dir_read(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info);
+
+// Change the position of the directory
+//
+// The new off must be a value previous returned from tell and specifies
+// an absolute offset in the directory seek.
+//
+// Returns a negative error code on failure.
+int lfs_dir_seek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off);
+
+// Return the position of the directory
+//
+// The returned offset is only meant to be consumed by seek and may not make
+// sense, but does indicate the current position in the directory iteration.
+//
+// Returns the position of the directory, or a negative error code on failure.
+lfs_soff_t lfs_dir_tell(lfs_t *lfs, lfs_dir_t *dir);
+
+// Change the position of the directory to the beginning of the directory
+//
+// Returns a negative error code on failure.
+int lfs_dir_rewind(lfs_t *lfs, lfs_dir_t *dir);
+
+
+/// Filesystem-level filesystem operations
+
+// Find on-disk info about the filesystem
+//
+// Fills out the fsinfo structure based on the filesystem found on-disk.
+// Returns a negative error code on failure.
+int lfs_fs_stat(lfs_t *lfs, struct lfs_fsinfo *fsinfo);
+
+// Finds the current size of the filesystem
+//
+// Note: Result is best effort. If files share COW structures, the returned
+// size may be larger than the filesystem actually is.
+//
+// Returns the number of allocated blocks, or a negative error code on failure.
+lfs_ssize_t lfs_fs_size(lfs_t *lfs);
+
+// Traverse through all blocks in use by the filesystem
+//
+// The provided callback will be called with each block address that is
+// currently in use by the filesystem. This can be used to determine which
+// blocks are in use or how much of the storage is available.
+//
+// Returns a negative error code on failure.
+int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data);
+
+#ifndef LFS_READONLY
+// Attempt to make the filesystem consistent and ready for writing
+//
+// Calling this function is not required, consistency will be implicitly
+// enforced on the first operation that writes to the filesystem, but this
+// function allows the work to be performed earlier and without other
+// filesystem changes.
+//
+// Returns a negative error code on failure.
+int lfs_fs_mkconsistent(lfs_t *lfs);
+#endif
+
+#ifndef LFS_READONLY
+// Attempt any janitorial work
+//
+// This currently:
+// 1. Calls mkconsistent if not already consistent
+// 2. Compacts metadata > compact_thresh
+// 3. Populates the block allocator
+//
+// Though additional janitorial work may be added in the future.
+//
+// Calling this function is not required, but may allow the offloading of
+// expensive janitorial work to a less time-critical code path.
+//
+// Returns a negative error code on failure. Accomplishing nothing is not
+// an error.
+int lfs_fs_gc(lfs_t *lfs);
+#endif
+
+#ifndef LFS_READONLY
+// Grows the filesystem to a new size, updating the superblock with the new
+// block count.
+//
+// If LFS_SHRINKNONRELOCATING is defined, this function will also accept
+// block_counts smaller than the current configuration, after checking
+// that none of the blocks that are being removed are in use.
+// Note that littlefs's pseudorandom block allocation means that
+// this is very unlikely to work in the general case.
+//
+// Returns a negative error code on failure.
+int lfs_fs_grow(lfs_t *lfs, lfs_size_t block_count);
+#endif
+
+#ifndef LFS_READONLY
+#ifdef LFS_MIGRATE
+// Attempts to migrate a previous version of littlefs
+//
+// Behaves similarly to the lfs_format function. Attempts to mount
+// the previous version of littlefs and update the filesystem so it can be
+// mounted with the current version of littlefs.
+//
+// Requires a littlefs object and config struct. This clobbers the littlefs
+// object, and does not leave the filesystem mounted. The config struct must
+// be zeroed for defaults and backwards compatibility.
+//
+// Returns a negative error code on failure.
+int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg);
+#endif
+#endif
+
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif
diff --git a/packages/littlefs-v2.11.2/lfs_config.h b/packages/littlefs-v2.11.2/lfs_config.h
new file mode 100644
index 0000000..473fd28
--- /dev/null
+++ b/packages/littlefs-v2.11.2/lfs_config.h
@@ -0,0 +1,202 @@
+#ifndef _LFS_CONFIG_H_
+#define _LFS_CONFIG_H_
+
+#include
+
+// System includes
+#include
+#include
+#include
+#include
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+// Macros, may be replaced by system specific wrappers. Arguments to these
+// macros must not have side-effects as the macros can be removed for a smaller
+// code footprint
+
+// Logging functions
+#ifdef LFS_YES_TRACE
+#define LFS_TRACE(fmt, ...) \
+ rt_kprintf("%s:%d:trace: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
+#define LFS_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
+#else
+#define LFS_TRACE(...)
+#endif
+
+#ifndef LFS_NO_DEBUG
+#define LFS_DEBUG_(fmt, ...) \
+ rt_kprintf("%s:%d:debug: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
+#define LFS_DEBUG(...) LFS_DEBUG_(__VA_ARGS__, "")
+#else
+#define LFS_DEBUG(...)
+#endif
+
+#ifndef LFS_NO_WARN
+#define LFS_WARN_(fmt, ...) \
+ rt_kprintf("%s:%d:warn: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
+#define LFS_WARN(...) LFS_WARN_(__VA_ARGS__, "")
+#else
+#define LFS_WARN(...)
+#endif
+
+#ifndef LFS_NO_ERROR
+#define LFS_ERROR_(fmt, ...) \
+ rt_kprintf("%s:%d:error: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
+#define LFS_ERROR(...) LFS_ERROR_(__VA_ARGS__, "")
+#else
+#define LFS_ERROR(...)
+#endif
+
+// Runtime assertions
+#ifndef LFS_NO_ASSERT
+#define LFS_ASSERT(test) RT_ASSERT(test)
+#else
+#define LFS_ASSERT(test)
+#endif
+
+
+// Builtin functions, these may be replaced by more efficient
+// toolchain-specific implementations. LFS_NO_INTRINSICS falls back to a more
+// expensive basic C implementation for debugging purposes
+
+// Min/max functions for unsigned 32-bit numbers
+static inline uint32_t lfs_max(uint32_t a, uint32_t b) {
+ return (a > b) ? a : b;
+}
+
+static inline uint32_t lfs_min(uint32_t a, uint32_t b) {
+ return (a < b) ? a : b;
+}
+
+// Align to nearest multiple of a size
+static inline uint32_t lfs_aligndown(uint32_t a, uint32_t alignment) {
+ return a - (a % alignment);
+}
+
+static inline uint32_t lfs_alignup(uint32_t a, uint32_t alignment) {
+ return lfs_aligndown(a + alignment-1, alignment);
+}
+
+// Find the smallest power of 2 greater than or equal to a
+static inline uint32_t lfs_npw2(uint32_t a) {
+#if !defined(LFS_NO_INTRINSICS) && (defined(__GNUC__) || defined(__CC_ARM))
+ return 32 - __builtin_clz(a-1);
+#else
+ uint32_t r = 0;
+ uint32_t s;
+ a -= 1;
+ s = (a > 0xffff) << 4; a >>= s; r |= s;
+ s = (a > 0xff ) << 3; a >>= s; r |= s;
+ s = (a > 0xf ) << 2; a >>= s; r |= s;
+ s = (a > 0x3 ) << 1; a >>= s; r |= s;
+ return (r | (a >> 1)) + 1;
+#endif
+}
+
+// Count the number of trailing binary zeros in a
+// lfs_ctz(0) may be undefined
+static inline uint32_t lfs_ctz(uint32_t a) {
+#if !defined(LFS_NO_INTRINSICS) && defined(__GNUC__)
+ return __builtin_ctz(a);
+#else
+ return lfs_npw2((a & -a) + 1) - 1;
+#endif
+}
+
+// Count the number of binary ones in a
+static inline uint32_t lfs_popc(uint32_t a) {
+#if !defined(LFS_NO_INTRINSICS) && (defined(__GNUC__) || defined(__CC_ARM))
+ return __builtin_popcount(a);
+#else
+ a = a - ((a >> 1) & 0x55555555);
+ a = (a & 0x33333333) + ((a >> 2) & 0x33333333);
+ return (((a + (a >> 4)) & 0xf0f0f0f) * 0x1010101) >> 24;
+#endif
+}
+
+// Find the sequence comparison of a and b, this is the distance
+// between a and b ignoring overflow
+static inline int lfs_scmp(uint32_t a, uint32_t b) {
+ return (int)(unsigned)(a - b);
+}
+
+// Convert between 32-bit little-endian and native order
+static inline uint32_t lfs_fromle32(uint32_t a) {
+#if !defined(LFS_NO_INTRINSICS) && ( \
+ (defined( BYTE_ORDER ) && defined( ORDER_LITTLE_ENDIAN ) && BYTE_ORDER == ORDER_LITTLE_ENDIAN ) || \
+ (defined(__BYTE_ORDER ) && defined(__ORDER_LITTLE_ENDIAN ) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN ) || \
+ (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
+ return a;
+#elif !defined(LFS_NO_INTRINSICS) && ( \
+ (defined( BYTE_ORDER ) && defined( ORDER_BIG_ENDIAN ) && BYTE_ORDER == ORDER_BIG_ENDIAN ) || \
+ (defined(__BYTE_ORDER ) && defined(__ORDER_BIG_ENDIAN ) && __BYTE_ORDER == __ORDER_BIG_ENDIAN ) || \
+ (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
+ return __builtin_bswap32(a);
+#else
+ return (((uint8_t*)&a)[0] << 0) |
+ (((uint8_t*)&a)[1] << 8) |
+ (((uint8_t*)&a)[2] << 16) |
+ (((uint8_t*)&a)[3] << 24);
+#endif
+}
+
+static inline uint32_t lfs_tole32(uint32_t a) {
+ return lfs_fromle32(a);
+}
+
+// Convert between 32-bit big-endian and native order
+static inline uint32_t lfs_frombe32(uint32_t a) {
+#if !defined(LFS_NO_INTRINSICS) && ( \
+ (defined( BYTE_ORDER ) && defined( ORDER_LITTLE_ENDIAN ) && BYTE_ORDER == ORDER_LITTLE_ENDIAN ) || \
+ (defined(__BYTE_ORDER ) && defined(__ORDER_LITTLE_ENDIAN ) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN ) || \
+ (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
+ return __builtin_bswap32(a);
+#elif !defined(LFS_NO_INTRINSICS) && ( \
+ (defined( BYTE_ORDER ) && defined( ORDER_BIG_ENDIAN ) && BYTE_ORDER == ORDER_BIG_ENDIAN ) || \
+ (defined(__BYTE_ORDER ) && defined(__ORDER_BIG_ENDIAN ) && __BYTE_ORDER == __ORDER_BIG_ENDIAN ) || \
+ (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
+ return a;
+#else
+ return (((uint8_t*)&a)[0] << 24) |
+ (((uint8_t*)&a)[1] << 16) |
+ (((uint8_t*)&a)[2] << 8) |
+ (((uint8_t*)&a)[3] << 0);
+#endif
+}
+
+static inline uint32_t lfs_tobe32(uint32_t a) {
+ return lfs_frombe32(a);
+}
+
+// Calculate CRC-32 with polynomial = 0x04c11db7
+uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size);
+
+// Allocate memory, only used if buffers are not provided to littlefs
+// Note, memory must be 64-bit aligned
+static inline void *lfs_malloc(size_t size) {
+#ifndef LFS_NO_MALLOC
+ return rt_malloc(size);
+#else
+ (void)size;
+ return NULL;
+#endif
+}
+
+// Deallocate memory, only used if buffers are not provided to littlefs
+static inline void lfs_free(void *p) {
+#ifndef LFS_NO_MALLOC
+ rt_free(p);
+#else
+ (void)p;
+#endif
+}
+
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif
diff --git a/packages/littlefs-v2.11.2/lfs_crc.c b/packages/littlefs-v2.11.2/lfs_crc.c
new file mode 100644
index 0000000..e205aea
--- /dev/null
+++ b/packages/littlefs-v2.11.2/lfs_crc.c
@@ -0,0 +1,20 @@
+#include "lfs_util.h"
+
+// Software CRC implementation with small lookup table
+uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size) {
+ static const uint32_t rtable[16] = {
+ 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
+ 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
+ 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
+ 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c,
+ };
+
+ const uint8_t *data = buffer;
+
+ for (size_t i = 0; i < size; i++) {
+ crc = (crc >> 4) ^ rtable[(crc ^ (data[i] >> 0)) & 0xf];
+ crc = (crc >> 4) ^ rtable[(crc ^ (data[i] >> 4)) & 0xf];
+ }
+
+ return crc;
+}
diff --git a/packages/littlefs-v2.11.2/lfs_util.c b/packages/littlefs-v2.11.2/lfs_util.c
new file mode 100644
index 0000000..dac72ab
--- /dev/null
+++ b/packages/littlefs-v2.11.2/lfs_util.c
@@ -0,0 +1,37 @@
+/*
+ * lfs util functions
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include "lfs_util.h"
+
+// Only compile if user does not provide custom config
+#ifndef LFS_CONFIG
+
+
+// If user provides their own CRC impl we don't need this
+#ifndef LFS_CRC
+// Software CRC implementation with small lookup table
+uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size) {
+ static const uint32_t rtable[16] = {
+ 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
+ 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
+ 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
+ 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c,
+ };
+
+ const uint8_t *data = buffer;
+
+ for (size_t i = 0; i < size; i++) {
+ crc = (crc >> 4) ^ rtable[(crc ^ (data[i] >> 0)) & 0xf];
+ crc = (crc >> 4) ^ rtable[(crc ^ (data[i] >> 4)) & 0xf];
+ }
+
+ return crc;
+}
+#endif
+
+
+#endif
diff --git a/packages/littlefs-v2.11.2/lfs_util.h b/packages/littlefs-v2.11.2/lfs_util.h
new file mode 100644
index 0000000..c1999fa
--- /dev/null
+++ b/packages/littlefs-v2.11.2/lfs_util.h
@@ -0,0 +1,273 @@
+/*
+ * lfs utility functions
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef LFS_UTIL_H
+#define LFS_UTIL_H
+
+#define LFS_STRINGIZE(x) LFS_STRINGIZE2(x)
+#define LFS_STRINGIZE2(x) #x
+
+// Users can override lfs_util.h with their own configuration by defining
+// LFS_CONFIG as a header file to include (-DLFS_CONFIG=lfs_config.h).
+//
+// If LFS_CONFIG is used, none of the default utils will be emitted and must be
+// provided by the config file. To start, I would suggest copying lfs_util.h
+// and modifying as needed.
+#ifdef LFS_CONFIG
+#include LFS_STRINGIZE(LFS_CONFIG)
+#else
+
+// Alternatively, users can provide a header file which defines
+// macros and other things consumed by littlefs.
+//
+// For example, provide my_defines.h, which contains
+// something like:
+//
+// #include
+// extern void *my_malloc(size_t sz);
+// #define LFS_MALLOC(sz) my_malloc(sz)
+//
+// And build littlefs with the header by defining LFS_DEFINES.
+// (-DLFS_DEFINES=my_defines.h)
+
+#ifdef LFS_DEFINES
+#include LFS_STRINGIZE(LFS_DEFINES)
+#endif
+
+// System includes
+#include
+#include
+#include
+#include
+
+#ifndef LFS_NO_MALLOC
+#include
+#endif
+#ifndef LFS_NO_ASSERT
+#include
+#endif
+#if !defined(LFS_NO_DEBUG) || \
+ !defined(LFS_NO_WARN) || \
+ !defined(LFS_NO_ERROR) || \
+ defined(LFS_YES_TRACE)
+#include
+#endif
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+// Macros, may be replaced by system specific wrappers. Arguments to these
+// macros must not have side-effects as the macros can be removed for a smaller
+// code footprint
+
+// Logging functions
+#ifndef LFS_TRACE
+#ifdef LFS_YES_TRACE
+#define LFS_TRACE_(fmt, ...) \
+ printf("%s:%d:trace: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
+#define LFS_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
+#else
+#define LFS_TRACE(...)
+#endif
+#endif
+
+#ifndef LFS_DEBUG
+#ifndef LFS_NO_DEBUG
+#define LFS_DEBUG_(fmt, ...) \
+ printf("%s:%d:debug: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
+#define LFS_DEBUG(...) LFS_DEBUG_(__VA_ARGS__, "")
+#else
+#define LFS_DEBUG(...)
+#endif
+#endif
+
+#ifndef LFS_WARN
+#ifndef LFS_NO_WARN
+#define LFS_WARN_(fmt, ...) \
+ printf("%s:%d:warn: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
+#define LFS_WARN(...) LFS_WARN_(__VA_ARGS__, "")
+#else
+#define LFS_WARN(...)
+#endif
+#endif
+
+#ifndef LFS_ERROR
+#ifndef LFS_NO_ERROR
+#define LFS_ERROR_(fmt, ...) \
+ printf("%s:%d:error: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
+#define LFS_ERROR(...) LFS_ERROR_(__VA_ARGS__, "")
+#else
+#define LFS_ERROR(...)
+#endif
+#endif
+
+// Runtime assertions
+#ifndef LFS_ASSERT
+#ifndef LFS_NO_ASSERT
+#define LFS_ASSERT(test) assert(test)
+#else
+#define LFS_ASSERT(test)
+#endif
+#endif
+
+
+// Builtin functions, these may be replaced by more efficient
+// toolchain-specific implementations. LFS_NO_INTRINSICS falls back to a more
+// expensive basic C implementation for debugging purposes
+
+// Min/max functions for unsigned 32-bit numbers
+static inline uint32_t lfs_max(uint32_t a, uint32_t b) {
+ return (a > b) ? a : b;
+}
+
+static inline uint32_t lfs_min(uint32_t a, uint32_t b) {
+ return (a < b) ? a : b;
+}
+
+// Align to nearest multiple of a size
+static inline uint32_t lfs_aligndown(uint32_t a, uint32_t alignment) {
+ return a - (a % alignment);
+}
+
+static inline uint32_t lfs_alignup(uint32_t a, uint32_t alignment) {
+ return lfs_aligndown(a + alignment-1, alignment);
+}
+
+// Find the smallest power of 2 greater than or equal to a
+static inline uint32_t lfs_npw2(uint32_t a) {
+#if !defined(LFS_NO_INTRINSICS) && (defined(__GNUC__) || defined(__CC_ARM))
+ return 32 - __builtin_clz(a-1);
+#else
+ uint32_t r = 0;
+ uint32_t s;
+ a -= 1;
+ s = (a > 0xffff) << 4; a >>= s; r |= s;
+ s = (a > 0xff ) << 3; a >>= s; r |= s;
+ s = (a > 0xf ) << 2; a >>= s; r |= s;
+ s = (a > 0x3 ) << 1; a >>= s; r |= s;
+ return (r | (a >> 1)) + 1;
+#endif
+}
+
+// Count the number of trailing binary zeros in a
+// lfs_ctz(0) may be undefined
+static inline uint32_t lfs_ctz(uint32_t a) {
+#if !defined(LFS_NO_INTRINSICS) && defined(__GNUC__)
+ return __builtin_ctz(a);
+#else
+ return lfs_npw2((a & -a) + 1) - 1;
+#endif
+}
+
+// Count the number of binary ones in a
+static inline uint32_t lfs_popc(uint32_t a) {
+#if !defined(LFS_NO_INTRINSICS) && (defined(__GNUC__) || defined(__CC_ARM))
+ return __builtin_popcount(a);
+#else
+ a = a - ((a >> 1) & 0x55555555);
+ a = (a & 0x33333333) + ((a >> 2) & 0x33333333);
+ return (((a + (a >> 4)) & 0xf0f0f0f) * 0x1010101) >> 24;
+#endif
+}
+
+// Find the sequence comparison of a and b, this is the distance
+// between a and b ignoring overflow
+static inline int lfs_scmp(uint32_t a, uint32_t b) {
+ return (int)(unsigned)(a - b);
+}
+
+// Convert between 32-bit little-endian and native order
+static inline uint32_t lfs_fromle32(uint32_t a) {
+#if (defined( BYTE_ORDER ) && defined( ORDER_LITTLE_ENDIAN ) && BYTE_ORDER == ORDER_LITTLE_ENDIAN ) || \
+ (defined(__BYTE_ORDER ) && defined(__ORDER_LITTLE_ENDIAN ) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN ) || \
+ (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+ return a;
+#elif !defined(LFS_NO_INTRINSICS) && ( \
+ (defined( BYTE_ORDER ) && defined( ORDER_BIG_ENDIAN ) && BYTE_ORDER == ORDER_BIG_ENDIAN ) || \
+ (defined(__BYTE_ORDER ) && defined(__ORDER_BIG_ENDIAN ) && __BYTE_ORDER == __ORDER_BIG_ENDIAN ) || \
+ (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
+ return __builtin_bswap32(a);
+#else
+ return ((uint32_t)((uint8_t*)&a)[0] << 0) |
+ ((uint32_t)((uint8_t*)&a)[1] << 8) |
+ ((uint32_t)((uint8_t*)&a)[2] << 16) |
+ ((uint32_t)((uint8_t*)&a)[3] << 24);
+#endif
+}
+
+static inline uint32_t lfs_tole32(uint32_t a) {
+ return lfs_fromle32(a);
+}
+
+// Convert between 32-bit big-endian and native order
+static inline uint32_t lfs_frombe32(uint32_t a) {
+#if !defined(LFS_NO_INTRINSICS) && ( \
+ (defined( BYTE_ORDER ) && defined( ORDER_LITTLE_ENDIAN ) && BYTE_ORDER == ORDER_LITTLE_ENDIAN ) || \
+ (defined(__BYTE_ORDER ) && defined(__ORDER_LITTLE_ENDIAN ) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN ) || \
+ (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
+ return __builtin_bswap32(a);
+#elif (defined( BYTE_ORDER ) && defined( ORDER_BIG_ENDIAN ) && BYTE_ORDER == ORDER_BIG_ENDIAN ) || \
+ (defined(__BYTE_ORDER ) && defined(__ORDER_BIG_ENDIAN ) && __BYTE_ORDER == __ORDER_BIG_ENDIAN ) || \
+ (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+ return a;
+#else
+ return ((uint32_t)((uint8_t*)&a)[0] << 24) |
+ ((uint32_t)((uint8_t*)&a)[1] << 16) |
+ ((uint32_t)((uint8_t*)&a)[2] << 8) |
+ ((uint32_t)((uint8_t*)&a)[3] << 0);
+#endif
+}
+
+static inline uint32_t lfs_tobe32(uint32_t a) {
+ return lfs_frombe32(a);
+}
+
+// Calculate CRC-32 with polynomial = 0x04c11db7
+#ifdef LFS_CRC
+static inline uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size) {
+ return LFS_CRC(crc, buffer, size);
+}
+#else
+uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size);
+#endif
+
+// Allocate memory, only used if buffers are not provided to littlefs
+//
+// littlefs current has no alignment requirements, as it only allocates
+// byte-level buffers.
+static inline void *lfs_malloc(size_t size) {
+#if defined(LFS_MALLOC)
+ return LFS_MALLOC(size);
+#elif !defined(LFS_NO_MALLOC)
+ return malloc(size);
+#else
+ (void)size;
+ return NULL;
+#endif
+}
+
+// Deallocate memory, only used if buffers are not provided to littlefs
+static inline void lfs_free(void *p) {
+#if defined(LFS_FREE)
+ LFS_FREE(p);
+#elif !defined(LFS_NO_MALLOC)
+ free(p);
+#else
+ (void)p;
+#endif
+}
+
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif
+#endif
diff --git a/packages/littlefs-v2.11.2/runners/bench_runner.c b/packages/littlefs-v2.11.2/runners/bench_runner.c
new file mode 100644
index 0000000..e27c189
--- /dev/null
+++ b/packages/littlefs-v2.11.2/runners/bench_runner.c
@@ -0,0 +1,2063 @@
+/*
+ * Runner for littlefs benchmarks
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 199309L
+#endif
+
+#include "runners/bench_runner.h"
+#include "bd/lfs_emubd.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+// some helpers
+
+// append to an array with amortized doubling
+void *mappend(void **p,
+ size_t size,
+ size_t *count,
+ size_t *capacity) {
+ uint8_t *p_ = *p;
+ size_t count_ = *count;
+ size_t capacity_ = *capacity;
+
+ count_ += 1;
+ if (count_ > capacity_) {
+ capacity_ = (2*capacity_ < 4) ? 4 : 2*capacity_;
+
+ p_ = realloc(p_, capacity_*size);
+ if (!p_) {
+ return NULL;
+ }
+ }
+
+ *p = p_;
+ *count = count_;
+ *capacity = capacity_;
+ return &p_[(count_-1)*size];
+}
+
+// a quick self-terminating text-safe varint scheme
+static void leb16_print(uintmax_t x) {
+ // allow 'w' to indicate negative numbers
+ if ((intmax_t)x < 0) {
+ printf("w");
+ x = -x;
+ }
+
+ while (true) {
+ char nibble = (x & 0xf) | (x > 0xf ? 0x10 : 0);
+ printf("%c", (nibble < 10) ? '0'+nibble : 'a'+nibble-10);
+ if (x <= 0xf) {
+ break;
+ }
+ x >>= 4;
+ }
+}
+
+static uintmax_t leb16_parse(const char *s, char **tail) {
+ bool neg = false;
+ uintmax_t x = 0;
+ if (tail) {
+ *tail = (char*)s;
+ }
+
+ if (s[0] == 'w') {
+ neg = true;
+ s = s+1;
+ }
+
+ size_t i = 0;
+ while (true) {
+ uintmax_t nibble = s[i];
+ if (nibble >= '0' && nibble <= '9') {
+ nibble = nibble - '0';
+ } else if (nibble >= 'a' && nibble <= 'v') {
+ nibble = nibble - 'a' + 10;
+ } else {
+ // invalid?
+ return 0;
+ }
+
+ x |= (nibble & 0xf) << (4*i);
+ i += 1;
+ if (!(nibble & 0x10)) {
+ s = s + i;
+ break;
+ }
+ }
+
+ if (tail) {
+ *tail = (char*)s;
+ }
+ return neg ? -x : x;
+}
+
+
+
+// bench_runner types
+
+typedef struct bench_geometry {
+ const char *name;
+ bench_define_t defines[BENCH_GEOMETRY_DEFINE_COUNT];
+} bench_geometry_t;
+
+typedef struct bench_id {
+ const char *name;
+ const bench_define_t *defines;
+ size_t define_count;
+} bench_id_t;
+
+
+// bench suites are linked into a custom ld section
+#if defined(__APPLE__)
+extern struct bench_suite __start__bench_suites __asm("section$start$__DATA$_bench_suites");
+extern struct bench_suite __stop__bench_suites __asm("section$end$__DATA$_bench_suites");
+#else
+extern struct bench_suite __start__bench_suites;
+extern struct bench_suite __stop__bench_suites;
+#endif
+
+const struct bench_suite *bench_suites = &__start__bench_suites;
+#define BENCH_SUITE_COUNT \
+ ((size_t)(&__stop__bench_suites - &__start__bench_suites))
+
+
+// bench define management
+typedef struct bench_define_map {
+ const bench_define_t *defines;
+ size_t count;
+} bench_define_map_t;
+
+typedef struct bench_define_names {
+ const char *const *names;
+ size_t count;
+} bench_define_names_t;
+
+intmax_t bench_define_lit(void *data) {
+ return (intptr_t)data;
+}
+
+#define BENCH_CONST(x) {bench_define_lit, (void*)(uintptr_t)(x)}
+#define BENCH_LIT(x) ((bench_define_t)BENCH_CONST(x))
+
+
+#define BENCH_DEF(k, v) \
+ intmax_t bench_define_##k(void *data) { \
+ (void)data; \
+ return v; \
+ }
+
+ BENCH_IMPLICIT_DEFINES
+#undef BENCH_DEF
+
+#define BENCH_DEFINE_MAP_OVERRIDE 0
+#define BENCH_DEFINE_MAP_EXPLICIT 1
+#define BENCH_DEFINE_MAP_PERMUTATION 2
+#define BENCH_DEFINE_MAP_GEOMETRY 3
+#define BENCH_DEFINE_MAP_IMPLICIT 4
+#define BENCH_DEFINE_MAP_COUNT 5
+
+bench_define_map_t bench_define_maps[BENCH_DEFINE_MAP_COUNT] = {
+ [BENCH_DEFINE_MAP_IMPLICIT] = {
+ (const bench_define_t[BENCH_IMPLICIT_DEFINE_COUNT]) {
+ #define BENCH_DEF(k, v) \
+ [k##_i] = {bench_define_##k, NULL},
+
+ BENCH_IMPLICIT_DEFINES
+ #undef BENCH_DEF
+ },
+ BENCH_IMPLICIT_DEFINE_COUNT,
+ },
+};
+
+#define BENCH_DEFINE_NAMES_SUITE 0
+#define BENCH_DEFINE_NAMES_IMPLICIT 1
+#define BENCH_DEFINE_NAMES_COUNT 2
+
+bench_define_names_t bench_define_names[BENCH_DEFINE_NAMES_COUNT] = {
+ [BENCH_DEFINE_NAMES_IMPLICIT] = {
+ (const char *const[BENCH_IMPLICIT_DEFINE_COUNT]){
+ #define BENCH_DEF(k, v) \
+ [k##_i] = #k,
+
+ BENCH_IMPLICIT_DEFINES
+ #undef BENCH_DEF
+ },
+ BENCH_IMPLICIT_DEFINE_COUNT,
+ },
+};
+
+intmax_t *bench_define_cache;
+size_t bench_define_cache_count;
+unsigned *bench_define_cache_mask;
+
+const char *bench_define_name(size_t define) {
+ // lookup in our bench names
+ for (size_t i = 0; i < BENCH_DEFINE_NAMES_COUNT; i++) {
+ if (define < bench_define_names[i].count
+ && bench_define_names[i].names
+ && bench_define_names[i].names[define]) {
+ return bench_define_names[i].names[define];
+ }
+ }
+
+ return NULL;
+}
+
+bool bench_define_ispermutation(size_t define) {
+ // is this define specific to the permutation?
+ for (size_t i = 0; i < BENCH_DEFINE_MAP_IMPLICIT; i++) {
+ if (define < bench_define_maps[i].count
+ && bench_define_maps[i].defines[define].cb) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+intmax_t bench_define(size_t define) {
+ // is the define in our cache?
+ if (define < bench_define_cache_count
+ && (bench_define_cache_mask[define/(8*sizeof(unsigned))]
+ & (1 << (define%(8*sizeof(unsigned)))))) {
+ return bench_define_cache[define];
+ }
+
+ // lookup in our bench defines
+ for (size_t i = 0; i < BENCH_DEFINE_MAP_COUNT; i++) {
+ if (define < bench_define_maps[i].count
+ && bench_define_maps[i].defines[define].cb) {
+ intmax_t v = bench_define_maps[i].defines[define].cb(
+ bench_define_maps[i].defines[define].data);
+
+ // insert into cache!
+ bench_define_cache[define] = v;
+ bench_define_cache_mask[define / (8*sizeof(unsigned))]
+ |= 1 << (define%(8*sizeof(unsigned)));
+
+ return v;
+ }
+ }
+
+ return 0;
+
+ // not found?
+ const char *name = bench_define_name(define);
+ fprintf(stderr, "error: undefined define %s (%zd)\n",
+ name ? name : "(unknown)",
+ define);
+ assert(false);
+ exit(-1);
+}
+
+void bench_define_flush(void) {
+ // clear cache between permutations
+ memset(bench_define_cache_mask, 0,
+ sizeof(unsigned)*(
+ (bench_define_cache_count+(8*sizeof(unsigned))-1)
+ / (8*sizeof(unsigned))));
+}
+
+// geometry updates
+const bench_geometry_t *bench_geometry = NULL;
+
+void bench_define_geometry(const bench_geometry_t *geometry) {
+ bench_define_maps[BENCH_DEFINE_MAP_GEOMETRY] = (bench_define_map_t){
+ geometry->defines, BENCH_GEOMETRY_DEFINE_COUNT};
+}
+
+// override updates
+typedef struct bench_override {
+ const char *name;
+ const intmax_t *defines;
+ size_t permutations;
+} bench_override_t;
+
+const bench_override_t *bench_overrides = NULL;
+size_t bench_override_count = 0;
+
+bench_define_t *bench_override_defines = NULL;
+size_t bench_override_define_count = 0;
+size_t bench_override_define_permutations = 1;
+size_t bench_override_define_capacity = 0;
+
+// suite/perm updates
+void bench_define_suite(const struct bench_suite *suite) {
+ bench_define_names[BENCH_DEFINE_NAMES_SUITE] = (bench_define_names_t){
+ suite->define_names, suite->define_count};
+
+ // make sure our cache is large enough
+ if (lfs_max(suite->define_count, BENCH_IMPLICIT_DEFINE_COUNT)
+ > bench_define_cache_count) {
+ // align to power of two to avoid any superlinear growth
+ size_t ncount = 1 << lfs_npw2(
+ lfs_max(suite->define_count, BENCH_IMPLICIT_DEFINE_COUNT));
+ bench_define_cache = realloc(bench_define_cache, ncount*sizeof(intmax_t));
+ bench_define_cache_mask = realloc(bench_define_cache_mask,
+ sizeof(unsigned)*(
+ (ncount+(8*sizeof(unsigned))-1)
+ / (8*sizeof(unsigned))));
+ bench_define_cache_count = ncount;
+ }
+
+ // map any overrides
+ if (bench_override_count > 0) {
+ // first figure out the total size of override permutations
+ size_t count = 0;
+ size_t permutations = 1;
+ for (size_t i = 0; i < bench_override_count; i++) {
+ for (size_t d = 0;
+ d < lfs_max(
+ suite->define_count,
+ BENCH_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ // define name match?
+ const char *name = bench_define_name(d);
+ if (name && strcmp(name, bench_overrides[i].name) == 0) {
+ count = lfs_max(count, d+1);
+ permutations *= bench_overrides[i].permutations;
+ break;
+ }
+ }
+ }
+ bench_override_define_count = count;
+ bench_override_define_permutations = permutations;
+
+ // make sure our override arrays are big enough
+ if (count * permutations > bench_override_define_capacity) {
+ // align to power of two to avoid any superlinear growth
+ size_t ncapacity = 1 << lfs_npw2(count * permutations);
+ bench_override_defines = realloc(
+ bench_override_defines,
+ sizeof(bench_define_t)*ncapacity);
+ bench_override_define_capacity = ncapacity;
+ }
+
+ // zero unoverridden defines
+ memset(bench_override_defines, 0,
+ sizeof(bench_define_t) * count * permutations);
+
+ // compute permutations
+ size_t p = 1;
+ for (size_t i = 0; i < bench_override_count; i++) {
+ for (size_t d = 0;
+ d < lfs_max(
+ suite->define_count,
+ BENCH_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ // define name match?
+ const char *name = bench_define_name(d);
+ if (name && strcmp(name, bench_overrides[i].name) == 0) {
+ // scatter the define permutations based on already
+ // seen permutations
+ for (size_t j = 0; j < permutations; j++) {
+ bench_override_defines[j*count + d] = BENCH_LIT(
+ bench_overrides[i].defines[(j/p)
+ % bench_overrides[i].permutations]);
+ }
+
+ // keep track of how many permutations we've seen so far
+ p *= bench_overrides[i].permutations;
+ break;
+ }
+ }
+ }
+ }
+}
+
+void bench_define_perm(
+ const struct bench_suite *suite,
+ const struct bench_case *case_,
+ size_t perm) {
+ if (case_->defines) {
+ bench_define_maps[BENCH_DEFINE_MAP_PERMUTATION] = (bench_define_map_t){
+ case_->defines + perm*suite->define_count,
+ suite->define_count};
+ } else {
+ bench_define_maps[BENCH_DEFINE_MAP_PERMUTATION] = (bench_define_map_t){
+ NULL, 0};
+ }
+}
+
+void bench_define_override(size_t perm) {
+ bench_define_maps[BENCH_DEFINE_MAP_OVERRIDE] = (bench_define_map_t){
+ bench_override_defines + perm*bench_override_define_count,
+ bench_override_define_count};
+}
+
+void bench_define_explicit(
+ const bench_define_t *defines,
+ size_t define_count) {
+ bench_define_maps[BENCH_DEFINE_MAP_EXPLICIT] = (bench_define_map_t){
+ defines, define_count};
+}
+
+void bench_define_cleanup(void) {
+ // bench define management can allocate a few things
+ free(bench_define_cache);
+ free(bench_define_cache_mask);
+ free(bench_override_defines);
+}
+
+
+
+// bench state
+extern const bench_geometry_t *bench_geometries;
+extern size_t bench_geometry_count;
+
+const bench_id_t *bench_ids = (const bench_id_t[]) {
+ {NULL, NULL, 0},
+};
+size_t bench_id_count = 1;
+
+size_t bench_step_start = 0;
+size_t bench_step_stop = -1;
+size_t bench_step_step = 1;
+
+const char *bench_disk_path = NULL;
+const char *bench_trace_path = NULL;
+bool bench_trace_backtrace = false;
+uint32_t bench_trace_period = 0;
+uint32_t bench_trace_freq = 0;
+FILE *bench_trace_file = NULL;
+uint32_t bench_trace_cycles = 0;
+uint64_t bench_trace_time = 0;
+uint64_t bench_trace_open_time = 0;
+lfs_emubd_sleep_t bench_read_sleep = 0.0;
+lfs_emubd_sleep_t bench_prog_sleep = 0.0;
+lfs_emubd_sleep_t bench_erase_sleep = 0.0;
+
+// this determines both the backtrace buffer and the trace printf buffer, if
+// trace ends up interleaved or truncated this may need to be increased
+#ifndef BENCH_TRACE_BACKTRACE_BUFFER_SIZE
+#define BENCH_TRACE_BACKTRACE_BUFFER_SIZE 8192
+#endif
+void *bench_trace_backtrace_buffer[
+ BENCH_TRACE_BACKTRACE_BUFFER_SIZE / sizeof(void*)];
+
+// trace printing
+void bench_trace(const char *fmt, ...) {
+ if (bench_trace_path) {
+ // sample at a specific period?
+ if (bench_trace_period) {
+ if (bench_trace_cycles % bench_trace_period != 0) {
+ bench_trace_cycles += 1;
+ return;
+ }
+ bench_trace_cycles += 1;
+ }
+
+ // sample at a specific frequency?
+ if (bench_trace_freq) {
+ struct timespec t;
+ clock_gettime(CLOCK_MONOTONIC, &t);
+ uint64_t now = (uint64_t)t.tv_sec*1000*1000*1000
+ + (uint64_t)t.tv_nsec;
+ if (now - bench_trace_time < (1000*1000*1000) / bench_trace_freq) {
+ return;
+ }
+ bench_trace_time = now;
+ }
+
+ if (!bench_trace_file) {
+ // Tracing output is heavy and trying to open every trace
+ // call is slow, so we only try to open the trace file every
+ // so often. Note this doesn't affect successfully opened files
+ struct timespec t;
+ clock_gettime(CLOCK_MONOTONIC, &t);
+ uint64_t now = (uint64_t)t.tv_sec*1000*1000*1000
+ + (uint64_t)t.tv_nsec;
+ if (now - bench_trace_open_time < 100*1000*1000) {
+ return;
+ }
+ bench_trace_open_time = now;
+
+ // try to open the trace file
+ int fd;
+ if (strcmp(bench_trace_path, "-") == 0) {
+ fd = dup(1);
+ if (fd < 0) {
+ return;
+ }
+ } else {
+ fd = open(
+ bench_trace_path,
+ O_WRONLY | O_CREAT | O_APPEND | O_NONBLOCK,
+ 0666);
+ if (fd < 0) {
+ return;
+ }
+ int err = fcntl(fd, F_SETFL, O_WRONLY | O_CREAT | O_APPEND);
+ assert(!err);
+ }
+
+ FILE *f = fdopen(fd, "a");
+ assert(f);
+ int err = setvbuf(f, NULL, _IOFBF,
+ BENCH_TRACE_BACKTRACE_BUFFER_SIZE);
+ assert(!err);
+ bench_trace_file = f;
+ }
+
+ // print trace
+ va_list va;
+ va_start(va, fmt);
+ int res = vfprintf(bench_trace_file, fmt, va);
+ va_end(va);
+ if (res < 0) {
+ fclose(bench_trace_file);
+ bench_trace_file = NULL;
+ return;
+ }
+
+ if (bench_trace_backtrace) {
+ // print backtrace
+ size_t count = backtrace(
+ bench_trace_backtrace_buffer,
+ BENCH_TRACE_BACKTRACE_BUFFER_SIZE);
+ // note we skip our own stack frame
+ for (size_t i = 1; i < count; i++) {
+ res = fprintf(bench_trace_file, "\tat %p\n",
+ bench_trace_backtrace_buffer[i]);
+ if (res < 0) {
+ fclose(bench_trace_file);
+ bench_trace_file = NULL;
+ return;
+ }
+ }
+ }
+
+ // flush immediately
+ fflush(bench_trace_file);
+ }
+}
+
+
+// bench prng
+uint32_t bench_prng(uint32_t *state) {
+ // A simple xorshift32 generator, easily reproducible. Keep in mind
+ // determinism is much more important than actual randomness here.
+ uint32_t x = *state;
+ x ^= x << 13;
+ x ^= x >> 17;
+ x ^= x << 5;
+ *state = x;
+ return x;
+}
+
+
+// bench recording state
+static struct lfs_config *bench_cfg = NULL;
+static lfs_emubd_io_t bench_last_readed = 0;
+static lfs_emubd_io_t bench_last_proged = 0;
+static lfs_emubd_io_t bench_last_erased = 0;
+lfs_emubd_io_t bench_readed = 0;
+lfs_emubd_io_t bench_proged = 0;
+lfs_emubd_io_t bench_erased = 0;
+
+void bench_reset(void) {
+ bench_readed = 0;
+ bench_proged = 0;
+ bench_erased = 0;
+ bench_last_readed = 0;
+ bench_last_proged = 0;
+ bench_last_erased = 0;
+}
+
+void bench_start(void) {
+ assert(bench_cfg);
+ lfs_emubd_sio_t readed = lfs_emubd_readed(bench_cfg);
+ assert(readed >= 0);
+ lfs_emubd_sio_t proged = lfs_emubd_proged(bench_cfg);
+ assert(proged >= 0);
+ lfs_emubd_sio_t erased = lfs_emubd_erased(bench_cfg);
+ assert(erased >= 0);
+
+ bench_last_readed = readed;
+ bench_last_proged = proged;
+ bench_last_erased = erased;
+}
+
+void bench_stop(void) {
+ assert(bench_cfg);
+ lfs_emubd_sio_t readed = lfs_emubd_readed(bench_cfg);
+ assert(readed >= 0);
+ lfs_emubd_sio_t proged = lfs_emubd_proged(bench_cfg);
+ assert(proged >= 0);
+ lfs_emubd_sio_t erased = lfs_emubd_erased(bench_cfg);
+ assert(erased >= 0);
+
+ bench_readed += readed - bench_last_readed;
+ bench_proged += proged - bench_last_proged;
+ bench_erased += erased - bench_last_erased;
+}
+
+
+// encode our permutation into a reusable id
+static void perm_printid(
+ const struct bench_suite *suite,
+ const struct bench_case *case_) {
+ (void)suite;
+ // case[:permutation]
+ printf("%s:", case_->name);
+ for (size_t d = 0;
+ d < lfs_max(
+ suite->define_count,
+ BENCH_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ if (bench_define_ispermutation(d)) {
+ leb16_print(d);
+ leb16_print(BENCH_DEFINE(d));
+ }
+ }
+}
+
+// a quick trie for keeping track of permutations we've seen
+typedef struct bench_seen {
+ struct bench_seen_branch *branches;
+ size_t branch_count;
+ size_t branch_capacity;
+} bench_seen_t;
+
+struct bench_seen_branch {
+ intmax_t define;
+ struct bench_seen branch;
+};
+
+bool bench_seen_insert(
+ bench_seen_t *seen,
+ const struct bench_suite *suite,
+ const struct bench_case *case_) {
+ (void)case_;
+ bool was_seen = true;
+
+ // use the currently set defines
+ for (size_t d = 0;
+ d < lfs_max(
+ suite->define_count,
+ BENCH_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ // treat unpermuted defines the same as 0
+ intmax_t define = bench_define_ispermutation(d) ? BENCH_DEFINE(d) : 0;
+
+ // already seen?
+ struct bench_seen_branch *branch = NULL;
+ for (size_t i = 0; i < seen->branch_count; i++) {
+ if (seen->branches[i].define == define) {
+ branch = &seen->branches[i];
+ break;
+ }
+ }
+
+ // need to create a new node
+ if (!branch) {
+ was_seen = false;
+ branch = mappend(
+ (void**)&seen->branches,
+ sizeof(struct bench_seen_branch),
+ &seen->branch_count,
+ &seen->branch_capacity);
+ branch->define = define;
+ branch->branch = (bench_seen_t){NULL, 0, 0};
+ }
+
+ seen = &branch->branch;
+ }
+
+ return was_seen;
+}
+
+void bench_seen_cleanup(bench_seen_t *seen) {
+ for (size_t i = 0; i < seen->branch_count; i++) {
+ bench_seen_cleanup(&seen->branches[i].branch);
+ }
+ free(seen->branches);
+}
+
+// iterate through permutations in a bench case
+static void case_forperm(
+ const struct bench_suite *suite,
+ const struct bench_case *case_,
+ const bench_define_t *defines,
+ size_t define_count,
+ void (*cb)(
+ void *data,
+ const struct bench_suite *suite,
+ const struct bench_case *case_),
+ void *data) {
+ // explicit permutation?
+ if (defines) {
+ bench_define_explicit(defines, define_count);
+
+ for (size_t v = 0; v < bench_override_define_permutations; v++) {
+ // define override permutation
+ bench_define_override(v);
+ bench_define_flush();
+
+ cb(data, suite, case_);
+ }
+
+ return;
+ }
+
+ bench_seen_t seen = {NULL, 0, 0};
+
+ for (size_t k = 0; k < case_->permutations; k++) {
+ // define permutation
+ bench_define_perm(suite, case_, k);
+
+ for (size_t v = 0; v < bench_override_define_permutations; v++) {
+ // define override permutation
+ bench_define_override(v);
+
+ for (size_t g = 0; g < bench_geometry_count; g++) {
+ // define geometry
+ bench_define_geometry(&bench_geometries[g]);
+ bench_define_flush();
+
+ // have we seen this permutation before?
+ bool was_seen = bench_seen_insert(&seen, suite, case_);
+ if (!(k == 0 && v == 0 && g == 0) && was_seen) {
+ continue;
+ }
+
+ cb(data, suite, case_);
+ }
+ }
+ }
+
+ bench_seen_cleanup(&seen);
+}
+
+
+// how many permutations are there actually in a bench case
+struct perm_count_state {
+ size_t total;
+ size_t filtered;
+};
+
+void perm_count(
+ void *data,
+ const struct bench_suite *suite,
+ const struct bench_case *case_) {
+ struct perm_count_state *state = data;
+ (void)suite;
+ (void)case_;
+
+ state->total += 1;
+
+ if (case_->filter && !case_->filter()) {
+ return;
+ }
+
+ state->filtered += 1;
+}
+
+
+// operations we can do
+static void summary(void) {
+ printf("%-23s %7s %7s %7s %11s\n",
+ "", "flags", "suites", "cases", "perms");
+ size_t suites = 0;
+ size_t cases = 0;
+ bench_flags_t flags = 0;
+ struct perm_count_state perms = {0, 0};
+
+ for (size_t t = 0; t < bench_id_count; t++) {
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ bench_define_suite(&bench_suites[i]);
+
+ for (size_t j = 0; j < bench_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (bench_ids[t].name && !(
+ strcmp(bench_ids[t].name,
+ bench_suites[i].name) == 0
+ || strcmp(bench_ids[t].name,
+ bench_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ cases += 1;
+ case_forperm(
+ &bench_suites[i],
+ &bench_suites[i].cases[j],
+ bench_ids[t].defines,
+ bench_ids[t].define_count,
+ perm_count,
+ &perms);
+ }
+
+ suites += 1;
+ flags |= bench_suites[i].flags;
+ }
+ }
+
+ char perm_buf[64];
+ sprintf(perm_buf, "%zu/%zu", perms.filtered, perms.total);
+ char flag_buf[64];
+ sprintf(flag_buf, "%s%s",
+ (flags & BENCH_REENTRANT) ? "r" : "",
+ (!flags) ? "-" : "");
+ printf("%-23s %7s %7zu %7zu %11s\n",
+ "TOTAL",
+ flag_buf,
+ suites,
+ cases,
+ perm_buf);
+}
+
+static void list_suites(void) {
+ // at least size so that names fit
+ unsigned name_width = 23;
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ size_t len = strlen(bench_suites[i].name);
+ if (len > name_width) {
+ name_width = len;
+ }
+ }
+ name_width = 4*((name_width+1+4-1)/4)-1;
+
+ printf("%-*s %7s %7s %11s\n",
+ name_width, "suite", "flags", "cases", "perms");
+ for (size_t t = 0; t < bench_id_count; t++) {
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ bench_define_suite(&bench_suites[i]);
+
+ size_t cases = 0;
+ struct perm_count_state perms = {0, 0};
+
+ for (size_t j = 0; j < bench_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (bench_ids[t].name && !(
+ strcmp(bench_ids[t].name,
+ bench_suites[i].name) == 0
+ || strcmp(bench_ids[t].name,
+ bench_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ cases += 1;
+ case_forperm(
+ &bench_suites[i],
+ &bench_suites[i].cases[j],
+ bench_ids[t].defines,
+ bench_ids[t].define_count,
+ perm_count,
+ &perms);
+ }
+
+ // no benches found?
+ if (!cases) {
+ continue;
+ }
+
+ char perm_buf[64];
+ sprintf(perm_buf, "%zu/%zu", perms.filtered, perms.total);
+ char flag_buf[64];
+ sprintf(flag_buf, "%s%s",
+ (bench_suites[i].flags & BENCH_REENTRANT) ? "r" : "",
+ (!bench_suites[i].flags) ? "-" : "");
+ printf("%-*s %7s %7zu %11s\n",
+ name_width,
+ bench_suites[i].name,
+ flag_buf,
+ cases,
+ perm_buf);
+ }
+ }
+}
+
+static void list_cases(void) {
+ // at least size so that names fit
+ unsigned name_width = 23;
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ for (size_t j = 0; j < bench_suites[i].case_count; j++) {
+ size_t len = strlen(bench_suites[i].cases[j].name);
+ if (len > name_width) {
+ name_width = len;
+ }
+ }
+ }
+ name_width = 4*((name_width+1+4-1)/4)-1;
+
+ printf("%-*s %7s %11s\n", name_width, "case", "flags", "perms");
+ for (size_t t = 0; t < bench_id_count; t++) {
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ bench_define_suite(&bench_suites[i]);
+
+ for (size_t j = 0; j < bench_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (bench_ids[t].name && !(
+ strcmp(bench_ids[t].name,
+ bench_suites[i].name) == 0
+ || strcmp(bench_ids[t].name,
+ bench_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ struct perm_count_state perms = {0, 0};
+ case_forperm(
+ &bench_suites[i],
+ &bench_suites[i].cases[j],
+ bench_ids[t].defines,
+ bench_ids[t].define_count,
+ perm_count,
+ &perms);
+
+ char perm_buf[64];
+ sprintf(perm_buf, "%zu/%zu", perms.filtered, perms.total);
+ char flag_buf[64];
+ sprintf(flag_buf, "%s%s",
+ (bench_suites[i].cases[j].flags & BENCH_REENTRANT)
+ ? "r" : "",
+ (!bench_suites[i].cases[j].flags)
+ ? "-" : "");
+ printf("%-*s %7s %11s\n",
+ name_width,
+ bench_suites[i].cases[j].name,
+ flag_buf,
+ perm_buf);
+ }
+ }
+ }
+}
+
+static void list_suite_paths(void) {
+ // at least size so that names fit
+ unsigned name_width = 23;
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ size_t len = strlen(bench_suites[i].name);
+ if (len > name_width) {
+ name_width = len;
+ }
+ }
+ name_width = 4*((name_width+1+4-1)/4)-1;
+
+ printf("%-*s %s\n", name_width, "suite", "path");
+ for (size_t t = 0; t < bench_id_count; t++) {
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ size_t cases = 0;
+
+ for (size_t j = 0; j < bench_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (bench_ids[t].name && !(
+ strcmp(bench_ids[t].name,
+ bench_suites[i].name) == 0
+ || strcmp(bench_ids[t].name,
+ bench_suites[i].cases[j].name) == 0)) {
+ continue;
+
+ cases += 1;
+ }
+ }
+
+ // no benches found?
+ if (!cases) {
+ continue;
+ }
+
+ printf("%-*s %s\n",
+ name_width,
+ bench_suites[i].name,
+ bench_suites[i].path);
+ }
+ }
+}
+
+static void list_case_paths(void) {
+ // at least size so that names fit
+ unsigned name_width = 23;
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ for (size_t j = 0; j < bench_suites[i].case_count; j++) {
+ size_t len = strlen(bench_suites[i].cases[j].name);
+ if (len > name_width) {
+ name_width = len;
+ }
+ }
+ }
+ name_width = 4*((name_width+1+4-1)/4)-1;
+
+ printf("%-*s %s\n", name_width, "case", "path");
+ for (size_t t = 0; t < bench_id_count; t++) {
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ for (size_t j = 0; j < bench_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (bench_ids[t].name && !(
+ strcmp(bench_ids[t].name,
+ bench_suites[i].name) == 0
+ || strcmp(bench_ids[t].name,
+ bench_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ printf("%-*s %s\n",
+ name_width,
+ bench_suites[i].cases[j].name,
+ bench_suites[i].cases[j].path);
+ }
+ }
+ }
+}
+
+struct list_defines_define {
+ const char *name;
+ intmax_t *values;
+ size_t value_count;
+ size_t value_capacity;
+};
+
+struct list_defines_defines {
+ struct list_defines_define *defines;
+ size_t define_count;
+ size_t define_capacity;
+};
+
+static void list_defines_add(
+ struct list_defines_defines *defines,
+ size_t d) {
+ const char *name = bench_define_name(d);
+ intmax_t value = BENCH_DEFINE(d);
+
+ // define already in defines?
+ for (size_t i = 0; i < defines->define_count; i++) {
+ if (strcmp(defines->defines[i].name, name) == 0) {
+ // value already in values?
+ for (size_t j = 0; j < defines->defines[i].value_count; j++) {
+ if (defines->defines[i].values[j] == value) {
+ return;
+ }
+ }
+
+ *(intmax_t*)mappend(
+ (void**)&defines->defines[i].values,
+ sizeof(intmax_t),
+ &defines->defines[i].value_count,
+ &defines->defines[i].value_capacity) = value;
+
+ return;
+ }
+ }
+
+ // new define?
+ struct list_defines_define *define = mappend(
+ (void**)&defines->defines,
+ sizeof(struct list_defines_define),
+ &defines->define_count,
+ &defines->define_capacity);
+ define->name = name;
+ define->values = malloc(sizeof(intmax_t));
+ define->values[0] = value;
+ define->value_count = 1;
+ define->value_capacity = 1;
+}
+
+void perm_list_defines(
+ void *data,
+ const struct bench_suite *suite,
+ const struct bench_case *case_) {
+ struct list_defines_defines *defines = data;
+ (void)suite;
+ (void)case_;
+
+ // collect defines
+ for (size_t d = 0;
+ d < lfs_max(suite->define_count,
+ BENCH_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ if (d < BENCH_IMPLICIT_DEFINE_COUNT
+ || bench_define_ispermutation(d)) {
+ list_defines_add(defines, d);
+ }
+ }
+}
+
+void perm_list_permutation_defines(
+ void *data,
+ const struct bench_suite *suite,
+ const struct bench_case *case_) {
+ struct list_defines_defines *defines = data;
+ (void)suite;
+ (void)case_;
+
+ // collect permutation_defines
+ for (size_t d = 0;
+ d < lfs_max(suite->define_count,
+ BENCH_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ if (bench_define_ispermutation(d)) {
+ list_defines_add(defines, d);
+ }
+ }
+}
+
+extern const bench_geometry_t builtin_geometries[];
+
+static void list_defines(void) {
+ struct list_defines_defines defines = {NULL, 0, 0};
+
+ // add defines
+ for (size_t t = 0; t < bench_id_count; t++) {
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ bench_define_suite(&bench_suites[i]);
+
+ for (size_t j = 0; j < bench_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (bench_ids[t].name && !(
+ strcmp(bench_ids[t].name,
+ bench_suites[i].name) == 0
+ || strcmp(bench_ids[t].name,
+ bench_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ case_forperm(
+ &bench_suites[i],
+ &bench_suites[i].cases[j],
+ bench_ids[t].defines,
+ bench_ids[t].define_count,
+ perm_list_defines,
+ &defines);
+ }
+ }
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ printf("%s=", defines.defines[i].name);
+ for (size_t j = 0; j < defines.defines[i].value_count; j++) {
+ printf("%jd", defines.defines[i].values[j]);
+ if (j != defines.defines[i].value_count-1) {
+ printf(",");
+ }
+ }
+ printf("\n");
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ free(defines.defines[i].values);
+ }
+ free(defines.defines);
+}
+
+static void list_permutation_defines(void) {
+ struct list_defines_defines defines = {NULL, 0, 0};
+
+ // add permutation defines
+ for (size_t t = 0; t < bench_id_count; t++) {
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ bench_define_suite(&bench_suites[i]);
+
+ for (size_t j = 0; j < bench_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (bench_ids[t].name && !(
+ strcmp(bench_ids[t].name,
+ bench_suites[i].name) == 0
+ || strcmp(bench_ids[t].name,
+ bench_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ case_forperm(
+ &bench_suites[i],
+ &bench_suites[i].cases[j],
+ bench_ids[t].defines,
+ bench_ids[t].define_count,
+ perm_list_permutation_defines,
+ &defines);
+ }
+ }
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ printf("%s=", defines.defines[i].name);
+ for (size_t j = 0; j < defines.defines[i].value_count; j++) {
+ printf("%jd", defines.defines[i].values[j]);
+ if (j != defines.defines[i].value_count-1) {
+ printf(",");
+ }
+ }
+ printf("\n");
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ free(defines.defines[i].values);
+ }
+ free(defines.defines);
+}
+
+static void list_implicit_defines(void) {
+ struct list_defines_defines defines = {NULL, 0, 0};
+
+ // yes we do need to define a suite, this does a bit of bookeeping
+ // such as setting up the define cache
+ bench_define_suite(&(const struct bench_suite){0});
+
+ // make sure to include builtin geometries here
+ extern const bench_geometry_t builtin_geometries[];
+ for (size_t g = 0; builtin_geometries[g].name; g++) {
+ bench_define_geometry(&builtin_geometries[g]);
+ bench_define_flush();
+
+ // add implicit defines
+ for (size_t d = 0; d < BENCH_IMPLICIT_DEFINE_COUNT; d++) {
+ list_defines_add(&defines, d);
+ }
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ printf("%s=", defines.defines[i].name);
+ for (size_t j = 0; j < defines.defines[i].value_count; j++) {
+ printf("%jd", defines.defines[i].values[j]);
+ if (j != defines.defines[i].value_count-1) {
+ printf(",");
+ }
+ }
+ printf("\n");
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ free(defines.defines[i].values);
+ }
+ free(defines.defines);
+}
+
+
+
+// geometries to bench
+
+const bench_geometry_t builtin_geometries[] = {
+ {"default", {{0}, BENCH_CONST(16), BENCH_CONST(512), {0}}},
+ {"eeprom", {{0}, BENCH_CONST(1), BENCH_CONST(512), {0}}},
+ {"emmc", {{0}, {0}, BENCH_CONST(512), {0}}},
+ {"nor", {{0}, BENCH_CONST(1), BENCH_CONST(4096), {0}}},
+ {"nand", {{0}, BENCH_CONST(4096), BENCH_CONST(32768), {0}}},
+ {NULL, {{0}, {0}, {0}, {0}}},
+};
+
+const bench_geometry_t *bench_geometries = builtin_geometries;
+size_t bench_geometry_count = 5;
+
+static void list_geometries(void) {
+ // at least size so that names fit
+ unsigned name_width = 23;
+ for (size_t g = 0; builtin_geometries[g].name; g++) {
+ size_t len = strlen(builtin_geometries[g].name);
+ if (len > name_width) {
+ name_width = len;
+ }
+ }
+ name_width = 4*((name_width+1+4-1)/4)-1;
+
+ // yes we do need to define a suite, this does a bit of bookeeping
+ // such as setting up the define cache
+ bench_define_suite(&(const struct bench_suite){0});
+
+ printf("%-*s %7s %7s %7s %7s %11s\n",
+ name_width, "geometry", "read", "prog", "erase", "count", "size");
+ for (size_t g = 0; builtin_geometries[g].name; g++) {
+ bench_define_geometry(&builtin_geometries[g]);
+ bench_define_flush();
+ printf("%-*s %7ju %7ju %7ju %7ju %11ju\n",
+ name_width,
+ builtin_geometries[g].name,
+ READ_SIZE,
+ PROG_SIZE,
+ ERASE_SIZE,
+ ERASE_COUNT,
+ ERASE_SIZE*ERASE_COUNT);
+ }
+}
+
+
+
+// global bench step count
+size_t bench_step = 0;
+
+void perm_run(
+ void *data,
+ const struct bench_suite *suite,
+ const struct bench_case *case_) {
+ (void)data;
+
+ // skip this step?
+ if (!(bench_step >= bench_step_start
+ && bench_step < bench_step_stop
+ && (bench_step-bench_step_start) % bench_step_step == 0)) {
+ bench_step += 1;
+ return;
+ }
+ bench_step += 1;
+
+ // filter?
+ if (case_->filter && !case_->filter()) {
+ printf("skipped ");
+ perm_printid(suite, case_);
+ printf("\n");
+ return;
+ }
+
+ // create block device and configuration
+ lfs_emubd_t bd;
+
+ struct lfs_config cfg = {
+ .context = &bd,
+ .read = lfs_emubd_read,
+ .prog = lfs_emubd_prog,
+ .erase = lfs_emubd_erase,
+ .sync = lfs_emubd_sync,
+ .read_size = READ_SIZE,
+ .prog_size = PROG_SIZE,
+ .block_size = BLOCK_SIZE,
+ .block_count = BLOCK_COUNT,
+ .block_cycles = BLOCK_CYCLES,
+ .cache_size = CACHE_SIZE,
+ .lookahead_size = LOOKAHEAD_SIZE,
+ .compact_thresh = COMPACT_THRESH,
+ .metadata_max = METADATA_MAX,
+ .inline_max = INLINE_MAX,
+ };
+
+ struct lfs_emubd_config bdcfg = {
+ .read_size = READ_SIZE,
+ .prog_size = PROG_SIZE,
+ .erase_size = ERASE_SIZE,
+ .erase_count = ERASE_COUNT,
+ .erase_value = ERASE_VALUE,
+ .erase_cycles = ERASE_CYCLES,
+ .badblock_behavior = BADBLOCK_BEHAVIOR,
+ .disk_path = bench_disk_path,
+ .read_sleep = bench_read_sleep,
+ .prog_sleep = bench_prog_sleep,
+ .erase_sleep = bench_erase_sleep,
+ };
+
+ int err = lfs_emubd_create(&cfg, &bdcfg);
+ if (err) {
+ fprintf(stderr, "error: could not create block device: %d\n", err);
+ exit(-1);
+ }
+
+ // run the bench
+ bench_cfg = &cfg;
+ bench_reset();
+ printf("running ");
+ perm_printid(suite, case_);
+ printf("\n");
+
+ case_->run(&cfg);
+
+ printf("finished ");
+ perm_printid(suite, case_);
+ printf(" %"PRIu64" %"PRIu64" %"PRIu64,
+ bench_readed,
+ bench_proged,
+ bench_erased);
+ printf("\n");
+
+ // cleanup
+ err = lfs_emubd_destroy(&cfg);
+ if (err) {
+ fprintf(stderr, "error: could not destroy block device: %d\n", err);
+ exit(-1);
+ }
+}
+
+static void run(void) {
+ // ignore disconnected pipes
+ signal(SIGPIPE, SIG_IGN);
+
+ for (size_t t = 0; t < bench_id_count; t++) {
+ for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) {
+ bench_define_suite(&bench_suites[i]);
+
+ for (size_t j = 0; j < bench_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (bench_ids[t].name && !(
+ strcmp(bench_ids[t].name,
+ bench_suites[i].name) == 0
+ || strcmp(bench_ids[t].name,
+ bench_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ case_forperm(
+ &bench_suites[i],
+ &bench_suites[i].cases[j],
+ bench_ids[t].defines,
+ bench_ids[t].define_count,
+ perm_run,
+ NULL);
+ }
+ }
+ }
+}
+
+
+
+// option handling
+enum opt_flags {
+ OPT_HELP = 'h',
+ OPT_SUMMARY = 'Y',
+ OPT_LIST_SUITES = 'l',
+ OPT_LIST_CASES = 'L',
+ OPT_LIST_SUITE_PATHS = 1,
+ OPT_LIST_CASE_PATHS = 2,
+ OPT_LIST_DEFINES = 3,
+ OPT_LIST_PERMUTATION_DEFINES = 4,
+ OPT_LIST_IMPLICIT_DEFINES = 5,
+ OPT_LIST_GEOMETRIES = 6,
+ OPT_DEFINE = 'D',
+ OPT_GEOMETRY = 'G',
+ OPT_STEP = 's',
+ OPT_DISK = 'd',
+ OPT_TRACE = 't',
+ OPT_TRACE_BACKTRACE = 7,
+ OPT_TRACE_PERIOD = 8,
+ OPT_TRACE_FREQ = 9,
+ OPT_READ_SLEEP = 10,
+ OPT_PROG_SLEEP = 11,
+ OPT_ERASE_SLEEP = 12,
+};
+
+const char *short_opts = "hYlLD:G:s:d:t:";
+
+const struct option long_opts[] = {
+ {"help", no_argument, NULL, OPT_HELP},
+ {"summary", no_argument, NULL, OPT_SUMMARY},
+ {"list-suites", no_argument, NULL, OPT_LIST_SUITES},
+ {"list-cases", no_argument, NULL, OPT_LIST_CASES},
+ {"list-suite-paths", no_argument, NULL, OPT_LIST_SUITE_PATHS},
+ {"list-case-paths", no_argument, NULL, OPT_LIST_CASE_PATHS},
+ {"list-defines", no_argument, NULL, OPT_LIST_DEFINES},
+ {"list-permutation-defines",
+ no_argument, NULL, OPT_LIST_PERMUTATION_DEFINES},
+ {"list-implicit-defines",
+ no_argument, NULL, OPT_LIST_IMPLICIT_DEFINES},
+ {"list-geometries", no_argument, NULL, OPT_LIST_GEOMETRIES},
+ {"define", required_argument, NULL, OPT_DEFINE},
+ {"geometry", required_argument, NULL, OPT_GEOMETRY},
+ {"step", required_argument, NULL, OPT_STEP},
+ {"disk", required_argument, NULL, OPT_DISK},
+ {"trace", required_argument, NULL, OPT_TRACE},
+ {"trace-backtrace", no_argument, NULL, OPT_TRACE_BACKTRACE},
+ {"trace-period", required_argument, NULL, OPT_TRACE_PERIOD},
+ {"trace-freq", required_argument, NULL, OPT_TRACE_FREQ},
+ {"read-sleep", required_argument, NULL, OPT_READ_SLEEP},
+ {"prog-sleep", required_argument, NULL, OPT_PROG_SLEEP},
+ {"erase-sleep", required_argument, NULL, OPT_ERASE_SLEEP},
+ {NULL, 0, NULL, 0},
+};
+
+const char *const help_text[] = {
+ "Show this help message.",
+ "Show quick summary.",
+ "List bench suites.",
+ "List bench cases.",
+ "List the path for each bench suite.",
+ "List the path and line number for each bench case.",
+ "List all defines in this bench-runner.",
+ "List explicit defines in this bench-runner.",
+ "List implicit defines in this bench-runner.",
+ "List the available disk geometries.",
+ "Override a bench define.",
+ "Comma-separated list of disk geometries to bench.",
+ "Comma-separated range of bench permutations to run (start,stop,step).",
+ "Direct block device operations to this file.",
+ "Direct trace output to this file.",
+ "Include a backtrace with every trace statement.",
+ "Sample trace output at this period in cycles.",
+ "Sample trace output at this frequency in hz.",
+ "Artificial read delay in seconds.",
+ "Artificial prog delay in seconds.",
+ "Artificial erase delay in seconds.",
+};
+
+int main(int argc, char **argv) {
+ void (*op)(void) = run;
+
+ size_t bench_override_capacity = 0;
+ size_t bench_geometry_capacity = 0;
+ size_t bench_id_capacity = 0;
+
+ // parse options
+ while (true) {
+ int c = getopt_long(argc, argv, short_opts, long_opts, NULL);
+ switch (c) {
+ // generate help message
+ case OPT_HELP: {
+ printf("usage: %s [options] [bench_id]\n", argv[0]);
+ printf("\n");
+
+ printf("options:\n");
+ size_t i = 0;
+ while (long_opts[i].name) {
+ size_t indent;
+ if (long_opts[i].has_arg == no_argument) {
+ if (long_opts[i].val >= '0' && long_opts[i].val < 'z') {
+ indent = printf(" -%c, --%s ",
+ long_opts[i].val,
+ long_opts[i].name);
+ } else {
+ indent = printf(" --%s ",
+ long_opts[i].name);
+ }
+ } else {
+ if (long_opts[i].val >= '0' && long_opts[i].val < 'z') {
+ indent = printf(" -%c %s, --%s %s ",
+ long_opts[i].val,
+ long_opts[i].name,
+ long_opts[i].name,
+ long_opts[i].name);
+ } else {
+ indent = printf(" --%s %s ",
+ long_opts[i].name,
+ long_opts[i].name);
+ }
+ }
+
+ // a quick, hacky, byte-level method for text wrapping
+ size_t len = strlen(help_text[i]);
+ size_t j = 0;
+ if (indent < 24) {
+ printf("%*s %.80s\n",
+ (int)(24-1-indent),
+ "",
+ &help_text[i][j]);
+ j += 80;
+ } else {
+ printf("\n");
+ }
+
+ while (j < len) {
+ printf("%24s%.80s\n", "", &help_text[i][j]);
+ j += 80;
+ }
+
+ i += 1;
+ }
+
+ printf("\n");
+ exit(0);
+ }
+ // summary/list flags
+ case OPT_SUMMARY:
+ op = summary;
+ break;
+ case OPT_LIST_SUITES:
+ op = list_suites;
+ break;
+ case OPT_LIST_CASES:
+ op = list_cases;
+ break;
+ case OPT_LIST_SUITE_PATHS:
+ op = list_suite_paths;
+ break;
+ case OPT_LIST_CASE_PATHS:
+ op = list_case_paths;
+ break;
+ case OPT_LIST_DEFINES:
+ op = list_defines;
+ break;
+ case OPT_LIST_PERMUTATION_DEFINES:
+ op = list_permutation_defines;
+ break;
+ case OPT_LIST_IMPLICIT_DEFINES:
+ op = list_implicit_defines;
+ break;
+ case OPT_LIST_GEOMETRIES:
+ op = list_geometries;
+ break;
+ // configuration
+ case OPT_DEFINE: {
+ // allocate space
+ bench_override_t *override = mappend(
+ (void**)&bench_overrides,
+ sizeof(bench_override_t),
+ &bench_override_count,
+ &bench_override_capacity);
+
+ // parse into string key/intmax_t value, cannibalizing the
+ // arg in the process
+ char *sep = strchr(optarg, '=');
+ char *parsed = NULL;
+ if (!sep) {
+ goto invalid_define;
+ }
+ *sep = '\0';
+ override->name = optarg;
+ optarg = sep+1;
+
+ // parse comma-separated permutations
+ {
+ override->defines = NULL;
+ override->permutations = 0;
+ size_t override_capacity = 0;
+ while (true) {
+ optarg += strspn(optarg, " ");
+
+ if (strncmp(optarg, "range", strlen("range")) == 0) {
+ // range of values
+ optarg += strlen("range");
+ optarg += strspn(optarg, " ");
+ if (*optarg != '(') {
+ goto invalid_define;
+ }
+ optarg += 1;
+
+ intmax_t start = strtoumax(optarg, &parsed, 0);
+ intmax_t stop = -1;
+ intmax_t step = 1;
+ // allow empty string for start=0
+ if (parsed == optarg) {
+ start = 0;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != ',' && *optarg != ')') {
+ goto invalid_define;
+ }
+
+ if (*optarg == ',') {
+ optarg += 1;
+ stop = strtoumax(optarg, &parsed, 0);
+ // allow empty string for stop=end
+ if (parsed == optarg) {
+ stop = -1;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != ',' && *optarg != ')') {
+ goto invalid_define;
+ }
+
+ if (*optarg == ',') {
+ optarg += 1;
+ step = strtoumax(optarg, &parsed, 0);
+ // allow empty string for stop=1
+ if (parsed == optarg) {
+ step = 1;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != ')') {
+ goto invalid_define;
+ }
+ }
+ } else {
+ // single value = stop only
+ stop = start;
+ start = 0;
+ }
+
+ if (*optarg != ')') {
+ goto invalid_define;
+ }
+ optarg += 1;
+
+ // calculate the range of values
+ assert(step != 0);
+ for (intmax_t i = start;
+ (step < 0)
+ ? i > stop
+ : (uintmax_t)i < (uintmax_t)stop;
+ i += step) {
+ *(intmax_t*)mappend(
+ (void**)&override->defines,
+ sizeof(intmax_t),
+ &override->permutations,
+ &override_capacity) = i;
+ }
+ } else if (*optarg != '\0') {
+ // single value
+ intmax_t define = strtoimax(optarg, &parsed, 0);
+ if (parsed == optarg) {
+ goto invalid_define;
+ }
+ optarg = parsed + strspn(parsed, " ");
+ *(intmax_t*)mappend(
+ (void**)&override->defines,
+ sizeof(intmax_t),
+ &override->permutations,
+ &override_capacity) = define;
+ } else {
+ break;
+ }
+
+ if (*optarg == ',') {
+ optarg += 1;
+ }
+ }
+ }
+ assert(override->permutations > 0);
+ break;
+
+invalid_define:
+ fprintf(stderr, "error: invalid define: %s\n", optarg);
+ exit(-1);
+ }
+ case OPT_GEOMETRY: {
+ // reset our geometry scenarios
+ if (bench_geometry_capacity > 0) {
+ free((bench_geometry_t*)bench_geometries);
+ }
+ bench_geometries = NULL;
+ bench_geometry_count = 0;
+ bench_geometry_capacity = 0;
+
+ // parse the comma separated list of disk geometries
+ while (*optarg) {
+ // allocate space
+ bench_geometry_t *geometry = mappend(
+ (void**)&bench_geometries,
+ sizeof(bench_geometry_t),
+ &bench_geometry_count,
+ &bench_geometry_capacity);
+
+ // parse the disk geometry
+ optarg += strspn(optarg, " ");
+
+ // named disk geometry
+ size_t len = strcspn(optarg, " ,");
+ for (size_t i = 0; builtin_geometries[i].name; i++) {
+ if (len == strlen(builtin_geometries[i].name)
+ && memcmp(optarg,
+ builtin_geometries[i].name,
+ len) == 0) {
+ *geometry = builtin_geometries[i];
+ optarg += len;
+ goto geometry_next;
+ }
+ }
+
+ // comma-separated read/prog/erase/count
+ if (*optarg == '{') {
+ lfs_size_t sizes[4];
+ size_t count = 0;
+
+ char *s = optarg + 1;
+ while (count < 4) {
+ char *parsed = NULL;
+ sizes[count] = strtoumax(s, &parsed, 0);
+ count += 1;
+
+ s = parsed + strspn(parsed, " ");
+ if (*s == ',') {
+ s += 1;
+ continue;
+ } else if (*s == '}') {
+ s += 1;
+ break;
+ } else {
+ goto geometry_unknown;
+ }
+ }
+
+ // allow implicit r=p and p=e for common geometries
+ memset(geometry, 0, sizeof(bench_geometry_t));
+ if (count >= 3) {
+ geometry->defines[READ_SIZE_i]
+ = BENCH_LIT(sizes[0]);
+ geometry->defines[PROG_SIZE_i]
+ = BENCH_LIT(sizes[1]);
+ geometry->defines[ERASE_SIZE_i]
+ = BENCH_LIT(sizes[2]);
+ } else if (count >= 2) {
+ geometry->defines[PROG_SIZE_i]
+ = BENCH_LIT(sizes[0]);
+ geometry->defines[ERASE_SIZE_i]
+ = BENCH_LIT(sizes[1]);
+ } else {
+ geometry->defines[ERASE_SIZE_i]
+ = BENCH_LIT(sizes[0]);
+ }
+ if (count >= 4) {
+ geometry->defines[ERASE_COUNT_i]
+ = BENCH_LIT(sizes[3]);
+ }
+ optarg = s;
+ goto geometry_next;
+ }
+
+ // leb16-encoded read/prog/erase/count
+ if (*optarg == ':') {
+ lfs_size_t sizes[4];
+ size_t count = 0;
+
+ char *s = optarg + 1;
+ while (true) {
+ char *parsed = NULL;
+ uintmax_t x = leb16_parse(s, &parsed);
+ if (parsed == s || count >= 4) {
+ break;
+ }
+
+ sizes[count] = x;
+ count += 1;
+ s = parsed;
+ }
+
+ // allow implicit r=p and p=e for common geometries
+ memset(geometry, 0, sizeof(bench_geometry_t));
+ if (count >= 3) {
+ geometry->defines[READ_SIZE_i]
+ = BENCH_LIT(sizes[0]);
+ geometry->defines[PROG_SIZE_i]
+ = BENCH_LIT(sizes[1]);
+ geometry->defines[ERASE_SIZE_i]
+ = BENCH_LIT(sizes[2]);
+ } else if (count >= 2) {
+ geometry->defines[PROG_SIZE_i]
+ = BENCH_LIT(sizes[0]);
+ geometry->defines[ERASE_SIZE_i]
+ = BENCH_LIT(sizes[1]);
+ } else {
+ geometry->defines[ERASE_SIZE_i]
+ = BENCH_LIT(sizes[0]);
+ }
+ if (count >= 4) {
+ geometry->defines[ERASE_COUNT_i]
+ = BENCH_LIT(sizes[3]);
+ }
+ optarg = s;
+ goto geometry_next;
+ }
+
+geometry_unknown:
+ // unknown scenario?
+ fprintf(stderr, "error: unknown disk geometry: %s\n",
+ optarg);
+ exit(-1);
+
+geometry_next:
+ optarg += strspn(optarg, " ");
+ if (*optarg == ',') {
+ optarg += 1;
+ } else if (*optarg == '\0') {
+ break;
+ } else {
+ goto geometry_unknown;
+ }
+ }
+ break;
+ }
+ case OPT_STEP: {
+ char *parsed = NULL;
+ bench_step_start = strtoumax(optarg, &parsed, 0);
+ bench_step_stop = -1;
+ bench_step_step = 1;
+ // allow empty string for start=0
+ if (parsed == optarg) {
+ bench_step_start = 0;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != ',' && *optarg != '\0') {
+ goto step_unknown;
+ }
+
+ if (*optarg == ',') {
+ optarg += 1;
+ bench_step_stop = strtoumax(optarg, &parsed, 0);
+ // allow empty string for stop=end
+ if (parsed == optarg) {
+ bench_step_stop = -1;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != ',' && *optarg != '\0') {
+ goto step_unknown;
+ }
+
+ if (*optarg == ',') {
+ optarg += 1;
+ bench_step_step = strtoumax(optarg, &parsed, 0);
+ // allow empty string for stop=1
+ if (parsed == optarg) {
+ bench_step_step = 1;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != '\0') {
+ goto step_unknown;
+ }
+ }
+ } else {
+ // single value = stop only
+ bench_step_stop = bench_step_start;
+ bench_step_start = 0;
+ }
+
+ break;
+step_unknown:
+ fprintf(stderr, "error: invalid step: %s\n", optarg);
+ exit(-1);
+ }
+ case OPT_DISK:
+ bench_disk_path = optarg;
+ break;
+ case OPT_TRACE:
+ bench_trace_path = optarg;
+ break;
+ case OPT_TRACE_BACKTRACE:
+ bench_trace_backtrace = true;
+ break;
+ case OPT_TRACE_PERIOD: {
+ char *parsed = NULL;
+ bench_trace_period = strtoumax(optarg, &parsed, 0);
+ if (parsed == optarg) {
+ fprintf(stderr, "error: invalid trace-period: %s\n", optarg);
+ exit(-1);
+ }
+ break;
+ }
+ case OPT_TRACE_FREQ: {
+ char *parsed = NULL;
+ bench_trace_freq = strtoumax(optarg, &parsed, 0);
+ if (parsed == optarg) {
+ fprintf(stderr, "error: invalid trace-freq: %s\n", optarg);
+ exit(-1);
+ }
+ break;
+ }
+ case OPT_READ_SLEEP: {
+ char *parsed = NULL;
+ double read_sleep = strtod(optarg, &parsed);
+ if (parsed == optarg) {
+ fprintf(stderr, "error: invalid read-sleep: %s\n", optarg);
+ exit(-1);
+ }
+ bench_read_sleep = read_sleep*1.0e9;
+ break;
+ }
+ case OPT_PROG_SLEEP: {
+ char *parsed = NULL;
+ double prog_sleep = strtod(optarg, &parsed);
+ if (parsed == optarg) {
+ fprintf(stderr, "error: invalid prog-sleep: %s\n", optarg);
+ exit(-1);
+ }
+ bench_prog_sleep = prog_sleep*1.0e9;
+ break;
+ }
+ case OPT_ERASE_SLEEP: {
+ char *parsed = NULL;
+ double erase_sleep = strtod(optarg, &parsed);
+ if (parsed == optarg) {
+ fprintf(stderr, "error: invalid erase-sleep: %s\n", optarg);
+ exit(-1);
+ }
+ bench_erase_sleep = erase_sleep*1.0e9;
+ break;
+ }
+ // done parsing
+ case -1:
+ goto getopt_done;
+ // unknown arg, getopt prints a message for us
+ default:
+ exit(-1);
+ }
+ }
+getopt_done: ;
+
+ if (argc > optind) {
+ // reset our bench identifier list
+ bench_ids = NULL;
+ bench_id_count = 0;
+ bench_id_capacity = 0;
+ }
+
+ // parse bench identifier, if any, cannibalizing the arg in the process
+ for (; argc > optind; optind++) {
+ bench_define_t *defines = NULL;
+ size_t define_count = 0;
+
+ // parse name, can be suite or case
+ char *name = argv[optind];
+ char *defines_ = strchr(name, ':');
+ if (defines_) {
+ *defines_ = '\0';
+ defines_ += 1;
+ }
+
+ // remove optional path and .toml suffix
+ char *slash = strrchr(name, '/');
+ if (slash) {
+ name = slash+1;
+ }
+
+ size_t name_len = strlen(name);
+ if (name_len > 5 && strcmp(&name[name_len-5], ".toml") == 0) {
+ name[name_len-5] = '\0';
+ }
+
+ if (defines_) {
+ // parse defines
+ while (true) {
+ char *parsed;
+ size_t d = leb16_parse(defines_, &parsed);
+ intmax_t v = leb16_parse(parsed, &parsed);
+ if (parsed == defines_) {
+ break;
+ }
+ defines_ = parsed;
+
+ if (d >= define_count) {
+ // align to power of two to avoid any superlinear growth
+ size_t ncount = 1 << lfs_npw2(d+1);
+ defines = realloc(defines,
+ ncount*sizeof(bench_define_t));
+ memset(defines+define_count, 0,
+ (ncount-define_count)*sizeof(bench_define_t));
+ define_count = ncount;
+ }
+ defines[d] = BENCH_LIT(v);
+ }
+ }
+
+ // append to identifier list
+ *(bench_id_t*)mappend(
+ (void**)&bench_ids,
+ sizeof(bench_id_t),
+ &bench_id_count,
+ &bench_id_capacity) = (bench_id_t){
+ .name = name,
+ .defines = defines,
+ .define_count = define_count,
+ };
+ }
+
+ // do the thing
+ op();
+
+ // cleanup (need to be done for valgrind benching)
+ bench_define_cleanup();
+ if (bench_overrides) {
+ for (size_t i = 0; i < bench_override_count; i++) {
+ free((void*)bench_overrides[i].defines);
+ }
+ free((void*)bench_overrides);
+ }
+ if (bench_geometry_capacity) {
+ free((void*)bench_geometries);
+ }
+ if (bench_id_capacity) {
+ for (size_t i = 0; i < bench_id_count; i++) {
+ free((void*)bench_ids[i].defines);
+ }
+ free((void*)bench_ids);
+ }
+}
diff --git a/packages/littlefs-v2.11.2/runners/bench_runner.h b/packages/littlefs-v2.11.2/runners/bench_runner.h
new file mode 100644
index 0000000..848b5e8
--- /dev/null
+++ b/packages/littlefs-v2.11.2/runners/bench_runner.h
@@ -0,0 +1,146 @@
+/*
+ * Runner for littlefs benchmarks
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef BENCH_RUNNER_H
+#define BENCH_RUNNER_H
+
+
+// override LFS_TRACE
+void bench_trace(const char *fmt, ...);
+
+#define LFS_TRACE_(fmt, ...) \
+ bench_trace("%s:%d:trace: " fmt "%s\n", \
+ __FILE__, \
+ __LINE__, \
+ __VA_ARGS__)
+#define LFS_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
+#define LFS_EMUBD_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
+
+// provide BENCH_START/BENCH_STOP macros
+void bench_start(void);
+void bench_stop(void);
+
+#define BENCH_START() bench_start()
+#define BENCH_STOP() bench_stop()
+
+
+// note these are indirectly included in any generated files
+#include "bd/lfs_emubd.h"
+#include
+
+// give source a chance to define feature macros
+#undef _FEATURES_H
+#undef _STDIO_H
+
+
+// generated bench configurations
+struct lfs_config;
+
+enum bench_flags {
+ BENCH_REENTRANT = 0x1,
+};
+typedef uint8_t bench_flags_t;
+
+typedef struct bench_define {
+ intmax_t (*cb)(void *data);
+ void *data;
+} bench_define_t;
+
+struct bench_case {
+ const char *name;
+ const char *path;
+ bench_flags_t flags;
+ size_t permutations;
+
+ const bench_define_t *defines;
+
+ bool (*filter)(void);
+ void (*run)(struct lfs_config *cfg);
+};
+
+struct bench_suite {
+ const char *name;
+ const char *path;
+ bench_flags_t flags;
+
+ const char *const *define_names;
+ size_t define_count;
+
+ const struct bench_case *cases;
+ size_t case_count;
+};
+
+
+// deterministic prng for pseudo-randomness in benches
+uint32_t bench_prng(uint32_t *state);
+
+#define BENCH_PRNG(state) bench_prng(state)
+
+
+// access generated bench defines
+intmax_t bench_define(size_t define);
+
+#define BENCH_DEFINE(i) bench_define(i)
+
+// a few preconfigured defines that control how benches run
+
+#define READ_SIZE_i 0
+#define PROG_SIZE_i 1
+#define ERASE_SIZE_i 2
+#define ERASE_COUNT_i 3
+#define BLOCK_SIZE_i 4
+#define BLOCK_COUNT_i 5
+#define CACHE_SIZE_i 6
+#define LOOKAHEAD_SIZE_i 7
+#define COMPACT_THRESH_i 8
+#define METADATA_MAX_i 9
+#define INLINE_MAX_i 10
+#define BLOCK_CYCLES_i 11
+#define ERASE_VALUE_i 12
+#define ERASE_CYCLES_i 13
+#define BADBLOCK_BEHAVIOR_i 14
+#define POWERLOSS_BEHAVIOR_i 15
+
+#define READ_SIZE bench_define(READ_SIZE_i)
+#define PROG_SIZE bench_define(PROG_SIZE_i)
+#define ERASE_SIZE bench_define(ERASE_SIZE_i)
+#define ERASE_COUNT bench_define(ERASE_COUNT_i)
+#define BLOCK_SIZE bench_define(BLOCK_SIZE_i)
+#define BLOCK_COUNT bench_define(BLOCK_COUNT_i)
+#define CACHE_SIZE bench_define(CACHE_SIZE_i)
+#define LOOKAHEAD_SIZE bench_define(LOOKAHEAD_SIZE_i)
+#define COMPACT_THRESH bench_define(COMPACT_THRESH_i)
+#define METADATA_MAX bench_define(METADATA_MAX_i)
+#define INLINE_MAX bench_define(INLINE_MAX_i)
+#define BLOCK_CYCLES bench_define(BLOCK_CYCLES_i)
+#define ERASE_VALUE bench_define(ERASE_VALUE_i)
+#define ERASE_CYCLES bench_define(ERASE_CYCLES_i)
+#define BADBLOCK_BEHAVIOR bench_define(BADBLOCK_BEHAVIOR_i)
+#define POWERLOSS_BEHAVIOR bench_define(POWERLOSS_BEHAVIOR_i)
+
+#define BENCH_IMPLICIT_DEFINES \
+ BENCH_DEF(READ_SIZE, PROG_SIZE) \
+ BENCH_DEF(PROG_SIZE, ERASE_SIZE) \
+ BENCH_DEF(ERASE_SIZE, 0) \
+ BENCH_DEF(ERASE_COUNT, (1024*1024)/BLOCK_SIZE) \
+ BENCH_DEF(BLOCK_SIZE, ERASE_SIZE) \
+ BENCH_DEF(BLOCK_COUNT, ERASE_COUNT/lfs_max(BLOCK_SIZE/ERASE_SIZE,1))\
+ BENCH_DEF(CACHE_SIZE, lfs_max(64,lfs_max(READ_SIZE,PROG_SIZE))) \
+ BENCH_DEF(LOOKAHEAD_SIZE, 16) \
+ BENCH_DEF(COMPACT_THRESH, 0) \
+ BENCH_DEF(METADATA_MAX, 0) \
+ BENCH_DEF(INLINE_MAX, 0) \
+ BENCH_DEF(BLOCK_CYCLES, -1) \
+ BENCH_DEF(ERASE_VALUE, 0xff) \
+ BENCH_DEF(ERASE_CYCLES, 0) \
+ BENCH_DEF(BADBLOCK_BEHAVIOR, LFS_EMUBD_BADBLOCK_PROGERROR) \
+ BENCH_DEF(POWERLOSS_BEHAVIOR, LFS_EMUBD_POWERLOSS_NOOP)
+
+#define BENCH_GEOMETRY_DEFINE_COUNT 4
+#define BENCH_IMPLICIT_DEFINE_COUNT 16
+
+
+#endif
diff --git a/packages/littlefs-v2.11.2/runners/test_runner.c b/packages/littlefs-v2.11.2/runners/test_runner.c
new file mode 100644
index 0000000..76cb149
--- /dev/null
+++ b/packages/littlefs-v2.11.2/runners/test_runner.c
@@ -0,0 +1,2818 @@
+/*
+ * Runner for littlefs tests
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 199309L
+#endif
+
+#include "runners/test_runner.h"
+#include "bd/lfs_emubd.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+// some helpers
+
+// append to an array with amortized doubling
+void *mappend(void **p,
+ size_t size,
+ size_t *count,
+ size_t *capacity) {
+ uint8_t *p_ = *p;
+ size_t count_ = *count;
+ size_t capacity_ = *capacity;
+
+ count_ += 1;
+ if (count_ > capacity_) {
+ capacity_ = (2*capacity_ < 4) ? 4 : 2*capacity_;
+
+ p_ = realloc(p_, capacity_*size);
+ if (!p_) {
+ return NULL;
+ }
+ }
+
+ *p = p_;
+ *count = count_;
+ *capacity = capacity_;
+ return &p_[(count_-1)*size];
+}
+
+// a quick self-terminating text-safe varint scheme
+static void leb16_print(uintmax_t x) {
+ // allow 'w' to indicate negative numbers
+ if ((intmax_t)x < 0) {
+ printf("w");
+ x = -x;
+ }
+
+ while (true) {
+ char nibble = (x & 0xf) | (x > 0xf ? 0x10 : 0);
+ printf("%c", (nibble < 10) ? '0'+nibble : 'a'+nibble-10);
+ if (x <= 0xf) {
+ break;
+ }
+ x >>= 4;
+ }
+}
+
+static uintmax_t leb16_parse(const char *s, char **tail) {
+ bool neg = false;
+ uintmax_t x = 0;
+ if (tail) {
+ *tail = (char*)s;
+ }
+
+ if (s[0] == 'w') {
+ neg = true;
+ s = s+1;
+ }
+
+ size_t i = 0;
+ while (true) {
+ uintmax_t nibble = s[i];
+ if (nibble >= '0' && nibble <= '9') {
+ nibble = nibble - '0';
+ } else if (nibble >= 'a' && nibble <= 'v') {
+ nibble = nibble - 'a' + 10;
+ } else {
+ // invalid?
+ return 0;
+ }
+
+ x |= (nibble & 0xf) << (4*i);
+ i += 1;
+ if (!(nibble & 0x10)) {
+ s = s + i;
+ break;
+ }
+ }
+
+ if (tail) {
+ *tail = (char*)s;
+ }
+ return neg ? -x : x;
+}
+
+
+
+// test_runner types
+
+typedef struct test_geometry {
+ const char *name;
+ test_define_t defines[TEST_GEOMETRY_DEFINE_COUNT];
+} test_geometry_t;
+
+typedef struct test_powerloss {
+ const char *name;
+ void (*run)(
+ const lfs_emubd_powercycles_t *cycles,
+ size_t cycle_count,
+ const struct test_suite *suite,
+ const struct test_case *case_);
+ const lfs_emubd_powercycles_t *cycles;
+ size_t cycle_count;
+} test_powerloss_t;
+
+typedef struct test_id {
+ const char *name;
+ const test_define_t *defines;
+ size_t define_count;
+ const lfs_emubd_powercycles_t *cycles;
+ size_t cycle_count;
+} test_id_t;
+
+
+// test suites are linked into a custom ld section
+#if defined(__APPLE__)
+extern struct test_suite __start__test_suites __asm("section$start$__DATA$_test_suites");
+extern struct test_suite __stop__test_suites __asm("section$end$__DATA$_test_suites");
+#else
+extern struct test_suite __start__test_suites;
+extern struct test_suite __stop__test_suites;
+#endif
+
+const struct test_suite *test_suites = &__start__test_suites;
+#define TEST_SUITE_COUNT \
+ ((size_t)(&__stop__test_suites - &__start__test_suites))
+
+
+// test define management
+typedef struct test_define_map {
+ const test_define_t *defines;
+ size_t count;
+} test_define_map_t;
+
+typedef struct test_define_names {
+ const char *const *names;
+ size_t count;
+} test_define_names_t;
+
+intmax_t test_define_lit(void *data) {
+ return (intptr_t)data;
+}
+
+#define TEST_CONST(x) {test_define_lit, (void*)(uintptr_t)(x)}
+#define TEST_LIT(x) ((test_define_t)TEST_CONST(x))
+
+
+#define TEST_DEF(k, v) \
+ intmax_t test_define_##k(void *data) { \
+ (void)data; \
+ return v; \
+ }
+
+ TEST_IMPLICIT_DEFINES
+#undef TEST_DEF
+
+#define TEST_DEFINE_MAP_OVERRIDE 0
+#define TEST_DEFINE_MAP_EXPLICIT 1
+#define TEST_DEFINE_MAP_PERMUTATION 2
+#define TEST_DEFINE_MAP_GEOMETRY 3
+#define TEST_DEFINE_MAP_IMPLICIT 4
+#define TEST_DEFINE_MAP_COUNT 5
+
+test_define_map_t test_define_maps[TEST_DEFINE_MAP_COUNT] = {
+ [TEST_DEFINE_MAP_IMPLICIT] = {
+ (const test_define_t[TEST_IMPLICIT_DEFINE_COUNT]) {
+ #define TEST_DEF(k, v) \
+ [k##_i] = {test_define_##k, NULL},
+
+ TEST_IMPLICIT_DEFINES
+ #undef TEST_DEF
+ },
+ TEST_IMPLICIT_DEFINE_COUNT,
+ },
+};
+
+#define TEST_DEFINE_NAMES_SUITE 0
+#define TEST_DEFINE_NAMES_IMPLICIT 1
+#define TEST_DEFINE_NAMES_COUNT 2
+
+test_define_names_t test_define_names[TEST_DEFINE_NAMES_COUNT] = {
+ [TEST_DEFINE_NAMES_IMPLICIT] = {
+ (const char *const[TEST_IMPLICIT_DEFINE_COUNT]){
+ #define TEST_DEF(k, v) \
+ [k##_i] = #k,
+
+ TEST_IMPLICIT_DEFINES
+ #undef TEST_DEF
+ },
+ TEST_IMPLICIT_DEFINE_COUNT,
+ },
+};
+
+intmax_t *test_define_cache;
+size_t test_define_cache_count;
+unsigned *test_define_cache_mask;
+
+const char *test_define_name(size_t define) {
+ // lookup in our test names
+ for (size_t i = 0; i < TEST_DEFINE_NAMES_COUNT; i++) {
+ if (define < test_define_names[i].count
+ && test_define_names[i].names
+ && test_define_names[i].names[define]) {
+ return test_define_names[i].names[define];
+ }
+ }
+
+ return NULL;
+}
+
+bool test_define_ispermutation(size_t define) {
+ // is this define specific to the permutation?
+ for (size_t i = 0; i < TEST_DEFINE_MAP_IMPLICIT; i++) {
+ if (define < test_define_maps[i].count
+ && test_define_maps[i].defines[define].cb) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+intmax_t test_define(size_t define) {
+ // is the define in our cache?
+ if (define < test_define_cache_count
+ && (test_define_cache_mask[define/(8*sizeof(unsigned))]
+ & (1 << (define%(8*sizeof(unsigned)))))) {
+ return test_define_cache[define];
+ }
+
+ // lookup in our test defines
+ for (size_t i = 0; i < TEST_DEFINE_MAP_COUNT; i++) {
+ if (define < test_define_maps[i].count
+ && test_define_maps[i].defines[define].cb) {
+ intmax_t v = test_define_maps[i].defines[define].cb(
+ test_define_maps[i].defines[define].data);
+
+ // insert into cache!
+ test_define_cache[define] = v;
+ test_define_cache_mask[define / (8*sizeof(unsigned))]
+ |= 1 << (define%(8*sizeof(unsigned)));
+
+ return v;
+ }
+ }
+
+ return 0;
+
+ // not found?
+ const char *name = test_define_name(define);
+ fprintf(stderr, "error: undefined define %s (%zd)\n",
+ name ? name : "(unknown)",
+ define);
+ assert(false);
+ exit(-1);
+}
+
+void test_define_flush(void) {
+ // clear cache between permutations
+ memset(test_define_cache_mask, 0,
+ sizeof(unsigned)*(
+ (test_define_cache_count+(8*sizeof(unsigned))-1)
+ / (8*sizeof(unsigned))));
+}
+
+// geometry updates
+const test_geometry_t *test_geometry = NULL;
+
+void test_define_geometry(const test_geometry_t *geometry) {
+ test_define_maps[TEST_DEFINE_MAP_GEOMETRY] = (test_define_map_t){
+ geometry->defines, TEST_GEOMETRY_DEFINE_COUNT};
+}
+
+// override updates
+typedef struct test_override {
+ const char *name;
+ const intmax_t *defines;
+ size_t permutations;
+} test_override_t;
+
+const test_override_t *test_overrides = NULL;
+size_t test_override_count = 0;
+
+test_define_t *test_override_defines = NULL;
+size_t test_override_define_count = 0;
+size_t test_override_define_permutations = 1;
+size_t test_override_define_capacity = 0;
+
+// suite/perm updates
+void test_define_suite(const struct test_suite *suite) {
+ test_define_names[TEST_DEFINE_NAMES_SUITE] = (test_define_names_t){
+ suite->define_names, suite->define_count};
+
+ // make sure our cache is large enough
+ if (lfs_max(suite->define_count, TEST_IMPLICIT_DEFINE_COUNT)
+ > test_define_cache_count) {
+ // align to power of two to avoid any superlinear growth
+ size_t ncount = 1 << lfs_npw2(
+ lfs_max(suite->define_count, TEST_IMPLICIT_DEFINE_COUNT));
+ test_define_cache = realloc(test_define_cache, ncount*sizeof(intmax_t));
+ test_define_cache_mask = realloc(test_define_cache_mask,
+ sizeof(unsigned)*(
+ (ncount+(8*sizeof(unsigned))-1)
+ / (8*sizeof(unsigned))));
+ test_define_cache_count = ncount;
+ }
+
+ // map any overrides
+ if (test_override_count > 0) {
+ // first figure out the total size of override permutations
+ size_t count = 0;
+ size_t permutations = 1;
+ for (size_t i = 0; i < test_override_count; i++) {
+ for (size_t d = 0;
+ d < lfs_max(
+ suite->define_count,
+ TEST_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ // define name match?
+ const char *name = test_define_name(d);
+ if (name && strcmp(name, test_overrides[i].name) == 0) {
+ count = lfs_max(count, d+1);
+ permutations *= test_overrides[i].permutations;
+ break;
+ }
+ }
+ }
+ test_override_define_count = count;
+ test_override_define_permutations = permutations;
+
+ // make sure our override arrays are big enough
+ if (count * permutations > test_override_define_capacity) {
+ // align to power of two to avoid any superlinear growth
+ size_t ncapacity = 1 << lfs_npw2(count * permutations);
+ test_override_defines = realloc(
+ test_override_defines,
+ sizeof(test_define_t)*ncapacity);
+ test_override_define_capacity = ncapacity;
+ }
+
+ // zero unoverridden defines
+ memset(test_override_defines, 0,
+ sizeof(test_define_t) * count * permutations);
+
+ // compute permutations
+ size_t p = 1;
+ for (size_t i = 0; i < test_override_count; i++) {
+ for (size_t d = 0;
+ d < lfs_max(
+ suite->define_count,
+ TEST_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ // define name match?
+ const char *name = test_define_name(d);
+ if (name && strcmp(name, test_overrides[i].name) == 0) {
+ // scatter the define permutations based on already
+ // seen permutations
+ for (size_t j = 0; j < permutations; j++) {
+ test_override_defines[j*count + d] = TEST_LIT(
+ test_overrides[i].defines[(j/p)
+ % test_overrides[i].permutations]);
+ }
+
+ // keep track of how many permutations we've seen so far
+ p *= test_overrides[i].permutations;
+ break;
+ }
+ }
+ }
+ }
+}
+
+void test_define_perm(
+ const struct test_suite *suite,
+ const struct test_case *case_,
+ size_t perm) {
+ if (case_->defines) {
+ test_define_maps[TEST_DEFINE_MAP_PERMUTATION] = (test_define_map_t){
+ case_->defines + perm*suite->define_count,
+ suite->define_count};
+ } else {
+ test_define_maps[TEST_DEFINE_MAP_PERMUTATION] = (test_define_map_t){
+ NULL, 0};
+ }
+}
+
+void test_define_override(size_t perm) {
+ test_define_maps[TEST_DEFINE_MAP_OVERRIDE] = (test_define_map_t){
+ test_override_defines + perm*test_override_define_count,
+ test_override_define_count};
+}
+
+void test_define_explicit(
+ const test_define_t *defines,
+ size_t define_count) {
+ test_define_maps[TEST_DEFINE_MAP_EXPLICIT] = (test_define_map_t){
+ defines, define_count};
+}
+
+void test_define_cleanup(void) {
+ // test define management can allocate a few things
+ free(test_define_cache);
+ free(test_define_cache_mask);
+ free(test_override_defines);
+}
+
+
+
+// test state
+extern const test_geometry_t *test_geometries;
+extern size_t test_geometry_count;
+
+extern const test_powerloss_t *test_powerlosses;
+extern size_t test_powerloss_count;
+
+const test_id_t *test_ids = (const test_id_t[]) {
+ {NULL, NULL, 0, NULL, 0},
+};
+size_t test_id_count = 1;
+
+size_t test_step_start = 0;
+size_t test_step_stop = -1;
+size_t test_step_step = 1;
+
+const char *test_disk_path = NULL;
+const char *test_trace_path = NULL;
+bool test_trace_backtrace = false;
+uint32_t test_trace_period = 0;
+uint32_t test_trace_freq = 0;
+FILE *test_trace_file = NULL;
+uint32_t test_trace_cycles = 0;
+uint64_t test_trace_time = 0;
+uint64_t test_trace_open_time = 0;
+lfs_emubd_sleep_t test_read_sleep = 0.0;
+lfs_emubd_sleep_t test_prog_sleep = 0.0;
+lfs_emubd_sleep_t test_erase_sleep = 0.0;
+
+// this determines both the backtrace buffer and the trace printf buffer, if
+// trace ends up interleaved or truncated this may need to be increased
+#ifndef TEST_TRACE_BACKTRACE_BUFFER_SIZE
+#define TEST_TRACE_BACKTRACE_BUFFER_SIZE 8192
+#endif
+void *test_trace_backtrace_buffer[
+ TEST_TRACE_BACKTRACE_BUFFER_SIZE / sizeof(void*)];
+
+// trace printing
+void test_trace(const char *fmt, ...) {
+ if (test_trace_path) {
+ // sample at a specific period?
+ if (test_trace_period) {
+ if (test_trace_cycles % test_trace_period != 0) {
+ test_trace_cycles += 1;
+ return;
+ }
+ test_trace_cycles += 1;
+ }
+
+ // sample at a specific frequency?
+ if (test_trace_freq) {
+ struct timespec t;
+ clock_gettime(CLOCK_MONOTONIC, &t);
+ uint64_t now = (uint64_t)t.tv_sec*1000*1000*1000
+ + (uint64_t)t.tv_nsec;
+ if (now - test_trace_time < (1000*1000*1000) / test_trace_freq) {
+ return;
+ }
+ test_trace_time = now;
+ }
+
+ if (!test_trace_file) {
+ // Tracing output is heavy and trying to open every trace
+ // call is slow, so we only try to open the trace file every
+ // so often. Note this doesn't affect successfully opened files
+ struct timespec t;
+ clock_gettime(CLOCK_MONOTONIC, &t);
+ uint64_t now = (uint64_t)t.tv_sec*1000*1000*1000
+ + (uint64_t)t.tv_nsec;
+ if (now - test_trace_open_time < 100*1000*1000) {
+ return;
+ }
+ test_trace_open_time = now;
+
+ // try to open the trace file
+ int fd;
+ if (strcmp(test_trace_path, "-") == 0) {
+ fd = dup(1);
+ if (fd < 0) {
+ return;
+ }
+ } else {
+ fd = open(
+ test_trace_path,
+ O_WRONLY | O_CREAT | O_APPEND | O_NONBLOCK,
+ 0666);
+ if (fd < 0) {
+ return;
+ }
+ int err = fcntl(fd, F_SETFL, O_WRONLY | O_CREAT | O_APPEND);
+ assert(!err);
+ }
+
+ FILE *f = fdopen(fd, "a");
+ assert(f);
+ int err = setvbuf(f, NULL, _IOFBF,
+ TEST_TRACE_BACKTRACE_BUFFER_SIZE);
+ assert(!err);
+ test_trace_file = f;
+ }
+
+ // print trace
+ va_list va;
+ va_start(va, fmt);
+ int res = vfprintf(test_trace_file, fmt, va);
+ va_end(va);
+ if (res < 0) {
+ fclose(test_trace_file);
+ test_trace_file = NULL;
+ return;
+ }
+
+ if (test_trace_backtrace) {
+ // print backtrace
+ size_t count = backtrace(
+ test_trace_backtrace_buffer,
+ TEST_TRACE_BACKTRACE_BUFFER_SIZE);
+ // note we skip our own stack frame
+ for (size_t i = 1; i < count; i++) {
+ res = fprintf(test_trace_file, "\tat %p\n",
+ test_trace_backtrace_buffer[i]);
+ if (res < 0) {
+ fclose(test_trace_file);
+ test_trace_file = NULL;
+ return;
+ }
+ }
+ }
+
+ // flush immediately
+ fflush(test_trace_file);
+ }
+}
+
+
+// test prng
+uint32_t test_prng(uint32_t *state) {
+ // A simple xorshift32 generator, easily reproducible. Keep in mind
+ // determinism is much more important than actual randomness here.
+ uint32_t x = *state;
+ x ^= x << 13;
+ x ^= x >> 17;
+ x ^= x << 5;
+ *state = x;
+ return x;
+}
+
+
+// encode our permutation into a reusable id
+static void perm_printid(
+ const struct test_suite *suite,
+ const struct test_case *case_,
+ const lfs_emubd_powercycles_t *cycles,
+ size_t cycle_count) {
+ (void)suite;
+ // case[:permutation[:powercycles]]
+ printf("%s:", case_->name);
+ for (size_t d = 0;
+ d < lfs_max(
+ suite->define_count,
+ TEST_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ if (test_define_ispermutation(d)) {
+ leb16_print(d);
+ leb16_print(TEST_DEFINE(d));
+ }
+ }
+
+ // only print power-cycles if any occured
+ if (cycles) {
+ printf(":");
+ for (size_t i = 0; i < cycle_count; i++) {
+ leb16_print(cycles[i]);
+ }
+ }
+}
+
+
+// a quick trie for keeping track of permutations we've seen
+typedef struct test_seen {
+ struct test_seen_branch *branches;
+ size_t branch_count;
+ size_t branch_capacity;
+} test_seen_t;
+
+struct test_seen_branch {
+ intmax_t define;
+ struct test_seen branch;
+};
+
+bool test_seen_insert(
+ test_seen_t *seen,
+ const struct test_suite *suite,
+ const struct test_case *case_) {
+ (void)case_;
+ bool was_seen = true;
+
+ // use the currently set defines
+ for (size_t d = 0;
+ d < lfs_max(
+ suite->define_count,
+ TEST_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ // treat unpermuted defines the same as 0
+ intmax_t define = test_define_ispermutation(d) ? TEST_DEFINE(d) : 0;
+
+ // already seen?
+ struct test_seen_branch *branch = NULL;
+ for (size_t i = 0; i < seen->branch_count; i++) {
+ if (seen->branches[i].define == define) {
+ branch = &seen->branches[i];
+ break;
+ }
+ }
+
+ // need to create a new node
+ if (!branch) {
+ was_seen = false;
+ branch = mappend(
+ (void**)&seen->branches,
+ sizeof(struct test_seen_branch),
+ &seen->branch_count,
+ &seen->branch_capacity);
+ branch->define = define;
+ branch->branch = (test_seen_t){NULL, 0, 0};
+ }
+
+ seen = &branch->branch;
+ }
+
+ return was_seen;
+}
+
+void test_seen_cleanup(test_seen_t *seen) {
+ for (size_t i = 0; i < seen->branch_count; i++) {
+ test_seen_cleanup(&seen->branches[i].branch);
+ }
+ free(seen->branches);
+}
+
+static void run_powerloss_none(
+ const lfs_emubd_powercycles_t *cycles,
+ size_t cycle_count,
+ const struct test_suite *suite,
+ const struct test_case *case_);
+static void run_powerloss_cycles(
+ const lfs_emubd_powercycles_t *cycles,
+ size_t cycle_count,
+ const struct test_suite *suite,
+ const struct test_case *case_);
+
+// iterate through permutations in a test case
+static void case_forperm(
+ const struct test_suite *suite,
+ const struct test_case *case_,
+ const test_define_t *defines,
+ size_t define_count,
+ const lfs_emubd_powercycles_t *cycles,
+ size_t cycle_count,
+ void (*cb)(
+ void *data,
+ const struct test_suite *suite,
+ const struct test_case *case_,
+ const test_powerloss_t *powerloss),
+ void *data) {
+ // explicit permutation?
+ if (defines) {
+ test_define_explicit(defines, define_count);
+
+ for (size_t v = 0; v < test_override_define_permutations; v++) {
+ // define override permutation
+ test_define_override(v);
+ test_define_flush();
+
+ // explicit powerloss cycles?
+ if (cycles) {
+ cb(data, suite, case_, &(test_powerloss_t){
+ .run=run_powerloss_cycles,
+ .cycles=cycles,
+ .cycle_count=cycle_count});
+ } else {
+ for (size_t p = 0; p < test_powerloss_count; p++) {
+ // skip non-reentrant tests when powerloss testing
+ if (test_powerlosses[p].run != run_powerloss_none
+ && !(case_->flags & TEST_REENTRANT)) {
+ continue;
+ }
+
+ cb(data, suite, case_, &test_powerlosses[p]);
+ }
+ }
+ }
+
+ return;
+ }
+
+ test_seen_t seen = {NULL, 0, 0};
+
+ for (size_t k = 0; k < case_->permutations; k++) {
+ // define permutation
+ test_define_perm(suite, case_, k);
+
+ for (size_t v = 0; v < test_override_define_permutations; v++) {
+ // define override permutation
+ test_define_override(v);
+
+ for (size_t g = 0; g < test_geometry_count; g++) {
+ // define geometry
+ test_define_geometry(&test_geometries[g]);
+ test_define_flush();
+
+ // have we seen this permutation before?
+ bool was_seen = test_seen_insert(&seen, suite, case_);
+ if (!(k == 0 && v == 0 && g == 0) && was_seen) {
+ continue;
+ }
+
+ if (cycles) {
+ cb(data, suite, case_, &(test_powerloss_t){
+ .run=run_powerloss_cycles,
+ .cycles=cycles,
+ .cycle_count=cycle_count});
+ } else {
+ for (size_t p = 0; p < test_powerloss_count; p++) {
+ // skip non-reentrant tests when powerloss testing
+ if (test_powerlosses[p].run != run_powerloss_none
+ && !(case_->flags & TEST_REENTRANT)) {
+ continue;
+ }
+
+ cb(data, suite, case_, &test_powerlosses[p]);
+ }
+ }
+ }
+ }
+ }
+
+ test_seen_cleanup(&seen);
+}
+
+
+// how many permutations are there actually in a test case
+struct perm_count_state {
+ size_t total;
+ size_t filtered;
+};
+
+void perm_count(
+ void *data,
+ const struct test_suite *suite,
+ const struct test_case *case_,
+ const test_powerloss_t *powerloss) {
+ struct perm_count_state *state = data;
+ (void)suite;
+ (void)case_;
+ (void)powerloss;
+
+ state->total += 1;
+
+ if (case_->filter && !case_->filter()) {
+ return;
+ }
+
+ state->filtered += 1;
+}
+
+
+// operations we can do
+static void summary(void) {
+ printf("%-23s %7s %7s %7s %11s\n",
+ "", "flags", "suites", "cases", "perms");
+ size_t suites = 0;
+ size_t cases = 0;
+ test_flags_t flags = 0;
+ struct perm_count_state perms = {0, 0};
+
+ for (size_t t = 0; t < test_id_count; t++) {
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ test_define_suite(&test_suites[i]);
+
+ for (size_t j = 0; j < test_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (test_ids[t].name && !(
+ strcmp(test_ids[t].name,
+ test_suites[i].name) == 0
+ || strcmp(test_ids[t].name,
+ test_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ cases += 1;
+ case_forperm(
+ &test_suites[i],
+ &test_suites[i].cases[j],
+ test_ids[t].defines,
+ test_ids[t].define_count,
+ test_ids[t].cycles,
+ test_ids[t].cycle_count,
+ perm_count,
+ &perms);
+ }
+
+ suites += 1;
+ flags |= test_suites[i].flags;
+ }
+ }
+
+ char perm_buf[64];
+ sprintf(perm_buf, "%zu/%zu", perms.filtered, perms.total);
+ char flag_buf[64];
+ sprintf(flag_buf, "%s%s",
+ (flags & TEST_REENTRANT) ? "r" : "",
+ (!flags) ? "-" : "");
+ printf("%-23s %7s %7zu %7zu %11s\n",
+ "TOTAL",
+ flag_buf,
+ suites,
+ cases,
+ perm_buf);
+}
+
+static void list_suites(void) {
+ // at least size so that names fit
+ unsigned name_width = 23;
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ size_t len = strlen(test_suites[i].name);
+ if (len > name_width) {
+ name_width = len;
+ }
+ }
+ name_width = 4*((name_width+1+4-1)/4)-1;
+
+ printf("%-*s %7s %7s %11s\n",
+ name_width, "suite", "flags", "cases", "perms");
+ for (size_t t = 0; t < test_id_count; t++) {
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ test_define_suite(&test_suites[i]);
+
+ size_t cases = 0;
+ struct perm_count_state perms = {0, 0};
+
+ for (size_t j = 0; j < test_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (test_ids[t].name && !(
+ strcmp(test_ids[t].name,
+ test_suites[i].name) == 0
+ || strcmp(test_ids[t].name,
+ test_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ cases += 1;
+ case_forperm(
+ &test_suites[i],
+ &test_suites[i].cases[j],
+ test_ids[t].defines,
+ test_ids[t].define_count,
+ test_ids[t].cycles,
+ test_ids[t].cycle_count,
+ perm_count,
+ &perms);
+ }
+
+ // no tests found?
+ if (!cases) {
+ continue;
+ }
+
+ char perm_buf[64];
+ sprintf(perm_buf, "%zu/%zu", perms.filtered, perms.total);
+ char flag_buf[64];
+ sprintf(flag_buf, "%s%s",
+ (test_suites[i].flags & TEST_REENTRANT) ? "r" : "",
+ (!test_suites[i].flags) ? "-" : "");
+ printf("%-*s %7s %7zu %11s\n",
+ name_width,
+ test_suites[i].name,
+ flag_buf,
+ cases,
+ perm_buf);
+ }
+ }
+}
+
+static void list_cases(void) {
+ // at least size so that names fit
+ unsigned name_width = 23;
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ for (size_t j = 0; j < test_suites[i].case_count; j++) {
+ size_t len = strlen(test_suites[i].cases[j].name);
+ if (len > name_width) {
+ name_width = len;
+ }
+ }
+ }
+ name_width = 4*((name_width+1+4-1)/4)-1;
+
+ printf("%-*s %7s %11s\n", name_width, "case", "flags", "perms");
+ for (size_t t = 0; t < test_id_count; t++) {
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ test_define_suite(&test_suites[i]);
+
+ for (size_t j = 0; j < test_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (test_ids[t].name && !(
+ strcmp(test_ids[t].name,
+ test_suites[i].name) == 0
+ || strcmp(test_ids[t].name,
+ test_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ struct perm_count_state perms = {0, 0};
+ case_forperm(
+ &test_suites[i],
+ &test_suites[i].cases[j],
+ test_ids[t].defines,
+ test_ids[t].define_count,
+ test_ids[t].cycles,
+ test_ids[t].cycle_count,
+ perm_count,
+ &perms);
+
+ char perm_buf[64];
+ sprintf(perm_buf, "%zu/%zu", perms.filtered, perms.total);
+ char flag_buf[64];
+ sprintf(flag_buf, "%s%s",
+ (test_suites[i].cases[j].flags & TEST_REENTRANT)
+ ? "r" : "",
+ (!test_suites[i].cases[j].flags)
+ ? "-" : "");
+ printf("%-*s %7s %11s\n",
+ name_width,
+ test_suites[i].cases[j].name,
+ flag_buf,
+ perm_buf);
+ }
+ }
+ }
+}
+
+static void list_suite_paths(void) {
+ // at least size so that names fit
+ unsigned name_width = 23;
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ size_t len = strlen(test_suites[i].name);
+ if (len > name_width) {
+ name_width = len;
+ }
+ }
+ name_width = 4*((name_width+1+4-1)/4)-1;
+
+ printf("%-*s %s\n", name_width, "suite", "path");
+ for (size_t t = 0; t < test_id_count; t++) {
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ size_t cases = 0;
+
+ for (size_t j = 0; j < test_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (test_ids[t].name && !(
+ strcmp(test_ids[t].name,
+ test_suites[i].name) == 0
+ || strcmp(test_ids[t].name,
+ test_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ cases += 1;
+ }
+
+ // no tests found?
+ if (!cases) {
+ continue;
+ }
+
+ printf("%-*s %s\n",
+ name_width,
+ test_suites[i].name,
+ test_suites[i].path);
+ }
+ }
+}
+
+static void list_case_paths(void) {
+ // at least size so that names fit
+ unsigned name_width = 23;
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ for (size_t j = 0; j < test_suites[i].case_count; j++) {
+ size_t len = strlen(test_suites[i].cases[j].name);
+ if (len > name_width) {
+ name_width = len;
+ }
+ }
+ }
+ name_width = 4*((name_width+1+4-1)/4)-1;
+
+ printf("%-*s %s\n", name_width, "case", "path");
+ for (size_t t = 0; t < test_id_count; t++) {
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ for (size_t j = 0; j < test_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (test_ids[t].name && !(
+ strcmp(test_ids[t].name,
+ test_suites[i].name) == 0
+ || strcmp(test_ids[t].name,
+ test_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ printf("%-*s %s\n",
+ name_width,
+ test_suites[i].cases[j].name,
+ test_suites[i].cases[j].path);
+ }
+ }
+ }
+}
+
+struct list_defines_define {
+ const char *name;
+ intmax_t *values;
+ size_t value_count;
+ size_t value_capacity;
+};
+
+struct list_defines_defines {
+ struct list_defines_define *defines;
+ size_t define_count;
+ size_t define_capacity;
+};
+
+static void list_defines_add(
+ struct list_defines_defines *defines,
+ size_t d) {
+ const char *name = test_define_name(d);
+ intmax_t value = TEST_DEFINE(d);
+
+ // define already in defines?
+ for (size_t i = 0; i < defines->define_count; i++) {
+ if (strcmp(defines->defines[i].name, name) == 0) {
+ // value already in values?
+ for (size_t j = 0; j < defines->defines[i].value_count; j++) {
+ if (defines->defines[i].values[j] == value) {
+ return;
+ }
+ }
+
+ *(intmax_t*)mappend(
+ (void**)&defines->defines[i].values,
+ sizeof(intmax_t),
+ &defines->defines[i].value_count,
+ &defines->defines[i].value_capacity) = value;
+
+ return;
+ }
+ }
+
+ // new define?
+ struct list_defines_define *define = mappend(
+ (void**)&defines->defines,
+ sizeof(struct list_defines_define),
+ &defines->define_count,
+ &defines->define_capacity);
+ define->name = name;
+ define->values = malloc(sizeof(intmax_t));
+ define->values[0] = value;
+ define->value_count = 1;
+ define->value_capacity = 1;
+}
+
+void perm_list_defines(
+ void *data,
+ const struct test_suite *suite,
+ const struct test_case *case_,
+ const test_powerloss_t *powerloss) {
+ struct list_defines_defines *defines = data;
+ (void)suite;
+ (void)case_;
+ (void)powerloss;
+
+ // collect defines
+ for (size_t d = 0;
+ d < lfs_max(suite->define_count,
+ TEST_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ if (d < TEST_IMPLICIT_DEFINE_COUNT
+ || test_define_ispermutation(d)) {
+ list_defines_add(defines, d);
+ }
+ }
+}
+
+void perm_list_permutation_defines(
+ void *data,
+ const struct test_suite *suite,
+ const struct test_case *case_,
+ const test_powerloss_t *powerloss) {
+ struct list_defines_defines *defines = data;
+ (void)suite;
+ (void)case_;
+ (void)powerloss;
+
+ // collect permutation_defines
+ for (size_t d = 0;
+ d < lfs_max(suite->define_count,
+ TEST_IMPLICIT_DEFINE_COUNT);
+ d++) {
+ if (test_define_ispermutation(d)) {
+ list_defines_add(defines, d);
+ }
+ }
+}
+
+extern const test_geometry_t builtin_geometries[];
+
+static void list_defines(void) {
+ struct list_defines_defines defines = {NULL, 0, 0};
+
+ // add defines
+ for (size_t t = 0; t < test_id_count; t++) {
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ test_define_suite(&test_suites[i]);
+
+ for (size_t j = 0; j < test_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (test_ids[t].name && !(
+ strcmp(test_ids[t].name,
+ test_suites[i].name) == 0
+ || strcmp(test_ids[t].name,
+ test_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ case_forperm(
+ &test_suites[i],
+ &test_suites[i].cases[j],
+ test_ids[t].defines,
+ test_ids[t].define_count,
+ test_ids[t].cycles,
+ test_ids[t].cycle_count,
+ perm_list_defines,
+ &defines);
+ }
+ }
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ printf("%s=", defines.defines[i].name);
+ for (size_t j = 0; j < defines.defines[i].value_count; j++) {
+ printf("%jd", defines.defines[i].values[j]);
+ if (j != defines.defines[i].value_count-1) {
+ printf(",");
+ }
+ }
+ printf("\n");
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ free(defines.defines[i].values);
+ }
+ free(defines.defines);
+}
+
+static void list_permutation_defines(void) {
+ struct list_defines_defines defines = {NULL, 0, 0};
+
+ // add permutation defines
+ for (size_t t = 0; t < test_id_count; t++) {
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ test_define_suite(&test_suites[i]);
+
+ for (size_t j = 0; j < test_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (test_ids[t].name && !(
+ strcmp(test_ids[t].name,
+ test_suites[i].name) == 0
+ || strcmp(test_ids[t].name,
+ test_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ case_forperm(
+ &test_suites[i],
+ &test_suites[i].cases[j],
+ test_ids[t].defines,
+ test_ids[t].define_count,
+ test_ids[t].cycles,
+ test_ids[t].cycle_count,
+ perm_list_permutation_defines,
+ &defines);
+ }
+ }
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ printf("%s=", defines.defines[i].name);
+ for (size_t j = 0; j < defines.defines[i].value_count; j++) {
+ printf("%jd", defines.defines[i].values[j]);
+ if (j != defines.defines[i].value_count-1) {
+ printf(",");
+ }
+ }
+ printf("\n");
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ free(defines.defines[i].values);
+ }
+ free(defines.defines);
+}
+
+static void list_implicit_defines(void) {
+ struct list_defines_defines defines = {NULL, 0, 0};
+
+ // yes we do need to define a suite, this does a bit of bookeeping
+ // such as setting up the define cache
+ test_define_suite(&(const struct test_suite){0});
+
+ // make sure to include builtin geometries here
+ extern const test_geometry_t builtin_geometries[];
+ for (size_t g = 0; builtin_geometries[g].name; g++) {
+ test_define_geometry(&builtin_geometries[g]);
+ test_define_flush();
+
+ // add implicit defines
+ for (size_t d = 0; d < TEST_IMPLICIT_DEFINE_COUNT; d++) {
+ list_defines_add(&defines, d);
+ }
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ printf("%s=", defines.defines[i].name);
+ for (size_t j = 0; j < defines.defines[i].value_count; j++) {
+ printf("%jd", defines.defines[i].values[j]);
+ if (j != defines.defines[i].value_count-1) {
+ printf(",");
+ }
+ }
+ printf("\n");
+ }
+
+ for (size_t i = 0; i < defines.define_count; i++) {
+ free(defines.defines[i].values);
+ }
+ free(defines.defines);
+}
+
+
+
+// geometries to test
+
+const test_geometry_t builtin_geometries[] = {
+ {"default", {{0}, TEST_CONST(16), TEST_CONST(512), {0}}},
+ {"eeprom", {{0}, TEST_CONST(1), TEST_CONST(512), {0}}},
+ {"emmc", {{0}, {0}, TEST_CONST(512), {0}}},
+ {"nor", {{0}, TEST_CONST(1), TEST_CONST(4096), {0}}},
+ {"nand", {{0}, TEST_CONST(4096), TEST_CONST(32768), {0}}},
+ {NULL, {{0}, {0}, {0}, {0}}},
+};
+
+const test_geometry_t *test_geometries = builtin_geometries;
+size_t test_geometry_count = 5;
+
+static void list_geometries(void) {
+ // at least size so that names fit
+ unsigned name_width = 23;
+ for (size_t g = 0; builtin_geometries[g].name; g++) {
+ size_t len = strlen(builtin_geometries[g].name);
+ if (len > name_width) {
+ name_width = len;
+ }
+ }
+ name_width = 4*((name_width+1+4-1)/4)-1;
+
+ // yes we do need to define a suite, this does a bit of bookeeping
+ // such as setting up the define cache
+ test_define_suite(&(const struct test_suite){0});
+
+ printf("%-*s %7s %7s %7s %7s %11s\n",
+ name_width, "geometry", "read", "prog", "erase", "count", "size");
+ for (size_t g = 0; builtin_geometries[g].name; g++) {
+ test_define_geometry(&builtin_geometries[g]);
+ test_define_flush();
+ printf("%-*s %7ju %7ju %7ju %7ju %11ju\n",
+ name_width,
+ builtin_geometries[g].name,
+ READ_SIZE,
+ PROG_SIZE,
+ ERASE_SIZE,
+ ERASE_COUNT,
+ ERASE_SIZE*ERASE_COUNT);
+ }
+}
+
+
+// scenarios to run tests under power-loss
+
+static void run_powerloss_none(
+ const lfs_emubd_powercycles_t *cycles,
+ size_t cycle_count,
+ const struct test_suite *suite,
+ const struct test_case *case_) {
+ (void)cycles;
+ (void)cycle_count;
+ (void)suite;
+
+ // create block device and configuration
+ lfs_emubd_t bd;
+
+ struct lfs_config cfg = {
+ .context = &bd,
+ .read = lfs_emubd_read,
+ .prog = lfs_emubd_prog,
+ .erase = lfs_emubd_erase,
+ .sync = lfs_emubd_sync,
+ .read_size = READ_SIZE,
+ .prog_size = PROG_SIZE,
+ .block_size = BLOCK_SIZE,
+ .block_count = BLOCK_COUNT,
+ .block_cycles = BLOCK_CYCLES,
+ .cache_size = CACHE_SIZE,
+ .lookahead_size = LOOKAHEAD_SIZE,
+ .compact_thresh = COMPACT_THRESH,
+ .metadata_max = METADATA_MAX,
+ .inline_max = INLINE_MAX,
+ #ifdef LFS_MULTIVERSION
+ .disk_version = DISK_VERSION,
+ #endif
+ };
+
+ struct lfs_emubd_config bdcfg = {
+ .read_size = READ_SIZE,
+ .prog_size = PROG_SIZE,
+ .erase_size = ERASE_SIZE,
+ .erase_count = ERASE_COUNT,
+ .erase_value = ERASE_VALUE,
+ .erase_cycles = ERASE_CYCLES,
+ .badblock_behavior = BADBLOCK_BEHAVIOR,
+ .disk_path = test_disk_path,
+ .read_sleep = test_read_sleep,
+ .prog_sleep = test_prog_sleep,
+ .erase_sleep = test_erase_sleep,
+ };
+
+ int err = lfs_emubd_create(&cfg, &bdcfg);
+ if (err) {
+ fprintf(stderr, "error: could not create block device: %d\n", err);
+ exit(-1);
+ }
+
+ // run the test
+ printf("running ");
+ perm_printid(suite, case_, NULL, 0);
+ printf("\n");
+
+ case_->run(&cfg);
+
+ printf("finished ");
+ perm_printid(suite, case_, NULL, 0);
+ printf("\n");
+
+ // cleanup
+ err = lfs_emubd_destroy(&cfg);
+ if (err) {
+ fprintf(stderr, "error: could not destroy block device: %d\n", err);
+ exit(-1);
+ }
+}
+
+static void powerloss_longjmp(void *c) {
+ jmp_buf *powerloss_jmp = c;
+ longjmp(*powerloss_jmp, 1);
+}
+
+static void run_powerloss_linear(
+ const lfs_emubd_powercycles_t *cycles,
+ size_t cycle_count,
+ const struct test_suite *suite,
+ const struct test_case *case_) {
+ (void)cycles;
+ (void)cycle_count;
+ (void)suite;
+
+ // create block device and configuration
+ lfs_emubd_t bd;
+ jmp_buf powerloss_jmp;
+ volatile lfs_emubd_powercycles_t i = 1;
+
+ struct lfs_config cfg = {
+ .context = &bd,
+ .read = lfs_emubd_read,
+ .prog = lfs_emubd_prog,
+ .erase = lfs_emubd_erase,
+ .sync = lfs_emubd_sync,
+ .read_size = READ_SIZE,
+ .prog_size = PROG_SIZE,
+ .block_size = BLOCK_SIZE,
+ .block_count = BLOCK_COUNT,
+ .block_cycles = BLOCK_CYCLES,
+ .cache_size = CACHE_SIZE,
+ .lookahead_size = LOOKAHEAD_SIZE,
+ .compact_thresh = COMPACT_THRESH,
+ .metadata_max = METADATA_MAX,
+ .inline_max = INLINE_MAX,
+ #ifdef LFS_MULTIVERSION
+ .disk_version = DISK_VERSION,
+ #endif
+ };
+
+ struct lfs_emubd_config bdcfg = {
+ .read_size = READ_SIZE,
+ .prog_size = PROG_SIZE,
+ .erase_size = ERASE_SIZE,
+ .erase_count = ERASE_COUNT,
+ .erase_value = ERASE_VALUE,
+ .erase_cycles = ERASE_CYCLES,
+ .badblock_behavior = BADBLOCK_BEHAVIOR,
+ .disk_path = test_disk_path,
+ .read_sleep = test_read_sleep,
+ .prog_sleep = test_prog_sleep,
+ .erase_sleep = test_erase_sleep,
+ .power_cycles = i,
+ .powerloss_behavior = POWERLOSS_BEHAVIOR,
+ .powerloss_cb = powerloss_longjmp,
+ .powerloss_data = &powerloss_jmp,
+ };
+
+ int err = lfs_emubd_create(&cfg, &bdcfg);
+ if (err) {
+ fprintf(stderr, "error: could not create block device: %d\n", err);
+ exit(-1);
+ }
+
+ // run the test, increasing power-cycles as power-loss events occur
+ printf("running ");
+ perm_printid(suite, case_, NULL, 0);
+ printf("\n");
+
+ while (true) {
+ if (!setjmp(powerloss_jmp)) {
+ // run the test
+ case_->run(&cfg);
+ break;
+ }
+
+ // power-loss!
+ printf("powerloss ");
+ perm_printid(suite, case_, NULL, 0);
+ printf(":");
+ for (lfs_emubd_powercycles_t j = 1; j <= i; j++) {
+ leb16_print(j);
+ }
+ printf("\n");
+
+ i += 1;
+ lfs_emubd_setpowercycles(&cfg, i);
+ }
+
+ printf("finished ");
+ perm_printid(suite, case_, NULL, 0);
+ printf("\n");
+
+ // cleanup
+ err = lfs_emubd_destroy(&cfg);
+ if (err) {
+ fprintf(stderr, "error: could not destroy block device: %d\n", err);
+ exit(-1);
+ }
+}
+
+static void run_powerloss_log(
+ const lfs_emubd_powercycles_t *cycles,
+ size_t cycle_count,
+ const struct test_suite *suite,
+ const struct test_case *case_) {
+ (void)cycles;
+ (void)cycle_count;
+ (void)suite;
+
+ // create block device and configuration
+ lfs_emubd_t bd;
+ jmp_buf powerloss_jmp;
+ volatile lfs_emubd_powercycles_t i = 1;
+
+ struct lfs_config cfg = {
+ .context = &bd,
+ .read = lfs_emubd_read,
+ .prog = lfs_emubd_prog,
+ .erase = lfs_emubd_erase,
+ .sync = lfs_emubd_sync,
+ .read_size = READ_SIZE,
+ .prog_size = PROG_SIZE,
+ .block_size = BLOCK_SIZE,
+ .block_count = BLOCK_COUNT,
+ .block_cycles = BLOCK_CYCLES,
+ .cache_size = CACHE_SIZE,
+ .lookahead_size = LOOKAHEAD_SIZE,
+ .compact_thresh = COMPACT_THRESH,
+ .metadata_max = METADATA_MAX,
+ .inline_max = INLINE_MAX,
+ #ifdef LFS_MULTIVERSION
+ .disk_version = DISK_VERSION,
+ #endif
+ };
+
+ struct lfs_emubd_config bdcfg = {
+ .read_size = READ_SIZE,
+ .prog_size = PROG_SIZE,
+ .erase_size = ERASE_SIZE,
+ .erase_count = ERASE_COUNT,
+ .erase_value = ERASE_VALUE,
+ .erase_cycles = ERASE_CYCLES,
+ .badblock_behavior = BADBLOCK_BEHAVIOR,
+ .disk_path = test_disk_path,
+ .read_sleep = test_read_sleep,
+ .prog_sleep = test_prog_sleep,
+ .erase_sleep = test_erase_sleep,
+ .power_cycles = i,
+ .powerloss_behavior = POWERLOSS_BEHAVIOR,
+ .powerloss_cb = powerloss_longjmp,
+ .powerloss_data = &powerloss_jmp,
+ };
+
+ int err = lfs_emubd_create(&cfg, &bdcfg);
+ if (err) {
+ fprintf(stderr, "error: could not create block device: %d\n", err);
+ exit(-1);
+ }
+
+ // run the test, increasing power-cycles as power-loss events occur
+ printf("running ");
+ perm_printid(suite, case_, NULL, 0);
+ printf("\n");
+
+ while (true) {
+ if (!setjmp(powerloss_jmp)) {
+ // run the test
+ case_->run(&cfg);
+ break;
+ }
+
+ // power-loss!
+ printf("powerloss ");
+ perm_printid(suite, case_, NULL, 0);
+ printf(":");
+ for (lfs_emubd_powercycles_t j = 1; j <= i; j *= 2) {
+ leb16_print(j);
+ }
+ printf("\n");
+
+ i *= 2;
+ lfs_emubd_setpowercycles(&cfg, i);
+ }
+
+ printf("finished ");
+ perm_printid(suite, case_, NULL, 0);
+ printf("\n");
+
+ // cleanup
+ err = lfs_emubd_destroy(&cfg);
+ if (err) {
+ fprintf(stderr, "error: could not destroy block device: %d\n", err);
+ exit(-1);
+ }
+}
+
+static void run_powerloss_cycles(
+ const lfs_emubd_powercycles_t *cycles,
+ size_t cycle_count,
+ const struct test_suite *suite,
+ const struct test_case *case_) {
+ (void)suite;
+
+ // create block device and configuration
+ lfs_emubd_t bd;
+ jmp_buf powerloss_jmp;
+ volatile size_t i = 0;
+
+ struct lfs_config cfg = {
+ .context = &bd,
+ .read = lfs_emubd_read,
+ .prog = lfs_emubd_prog,
+ .erase = lfs_emubd_erase,
+ .sync = lfs_emubd_sync,
+ .read_size = READ_SIZE,
+ .prog_size = PROG_SIZE,
+ .block_size = BLOCK_SIZE,
+ .block_count = BLOCK_COUNT,
+ .block_cycles = BLOCK_CYCLES,
+ .cache_size = CACHE_SIZE,
+ .lookahead_size = LOOKAHEAD_SIZE,
+ .compact_thresh = COMPACT_THRESH,
+ .metadata_max = METADATA_MAX,
+ .inline_max = INLINE_MAX,
+ #ifdef LFS_MULTIVERSION
+ .disk_version = DISK_VERSION,
+ #endif
+ };
+
+ struct lfs_emubd_config bdcfg = {
+ .read_size = READ_SIZE,
+ .prog_size = PROG_SIZE,
+ .erase_size = ERASE_SIZE,
+ .erase_count = ERASE_COUNT,
+ .erase_value = ERASE_VALUE,
+ .erase_cycles = ERASE_CYCLES,
+ .badblock_behavior = BADBLOCK_BEHAVIOR,
+ .disk_path = test_disk_path,
+ .read_sleep = test_read_sleep,
+ .prog_sleep = test_prog_sleep,
+ .erase_sleep = test_erase_sleep,
+ .power_cycles = (i < cycle_count) ? cycles[i] : 0,
+ .powerloss_behavior = POWERLOSS_BEHAVIOR,
+ .powerloss_cb = powerloss_longjmp,
+ .powerloss_data = &powerloss_jmp,
+ };
+
+ int err = lfs_emubd_create(&cfg, &bdcfg);
+ if (err) {
+ fprintf(stderr, "error: could not create block device: %d\n", err);
+ exit(-1);
+ }
+
+ // run the test, increasing power-cycles as power-loss events occur
+ printf("running ");
+ perm_printid(suite, case_, NULL, 0);
+ printf("\n");
+
+ while (true) {
+ if (!setjmp(powerloss_jmp)) {
+ // run the test
+ case_->run(&cfg);
+ break;
+ }
+
+ // power-loss!
+ assert(i <= cycle_count);
+ printf("powerloss ");
+ perm_printid(suite, case_, cycles, i+1);
+ printf("\n");
+
+ i += 1;
+ lfs_emubd_setpowercycles(&cfg,
+ (i < cycle_count) ? cycles[i] : 0);
+ }
+
+ printf("finished ");
+ perm_printid(suite, case_, NULL, 0);
+ printf("\n");
+
+ // cleanup
+ err = lfs_emubd_destroy(&cfg);
+ if (err) {
+ fprintf(stderr, "error: could not destroy block device: %d\n", err);
+ exit(-1);
+ }
+}
+
+struct powerloss_exhaustive_state {
+ struct lfs_config *cfg;
+
+ lfs_emubd_t *branches;
+ size_t branch_count;
+ size_t branch_capacity;
+};
+
+struct powerloss_exhaustive_cycles {
+ lfs_emubd_powercycles_t *cycles;
+ size_t cycle_count;
+ size_t cycle_capacity;
+};
+
+static void powerloss_exhaustive_branch(void *c) {
+ struct powerloss_exhaustive_state *state = c;
+ // append to branches
+ lfs_emubd_t *branch = mappend(
+ (void**)&state->branches,
+ sizeof(lfs_emubd_t),
+ &state->branch_count,
+ &state->branch_capacity);
+ if (!branch) {
+ fprintf(stderr, "error: exhaustive: out of memory\n");
+ exit(-1);
+ }
+
+ // create copy-on-write copy
+ int err = lfs_emubd_copy(state->cfg, branch);
+ if (err) {
+ fprintf(stderr, "error: exhaustive: could not create bd copy\n");
+ exit(-1);
+ }
+
+ // also trigger on next power cycle
+ lfs_emubd_setpowercycles(state->cfg, 1);
+}
+
+static void run_powerloss_exhaustive_layer(
+ struct powerloss_exhaustive_cycles *cycles,
+ const struct test_suite *suite,
+ const struct test_case *case_,
+ struct lfs_config *cfg,
+ struct lfs_emubd_config *bdcfg,
+ size_t depth) {
+ (void)suite;
+
+ struct powerloss_exhaustive_state state = {
+ .cfg = cfg,
+ .branches = NULL,
+ .branch_count = 0,
+ .branch_capacity = 0,
+ };
+
+ // run through the test without additional powerlosses, collecting possible
+ // branches as we do so
+ lfs_emubd_setpowercycles(state.cfg, depth > 0 ? 1 : 0);
+ bdcfg->powerloss_data = &state;
+
+ // run the tests
+ case_->run(cfg);
+
+ // aggressively clean up memory here to try to keep our memory usage low
+ int err = lfs_emubd_destroy(cfg);
+ if (err) {
+ fprintf(stderr, "error: could not destroy block device: %d\n", err);
+ exit(-1);
+ }
+
+ // recurse into each branch
+ for (size_t i = 0; i < state.branch_count; i++) {
+ // first push and print the branch
+ lfs_emubd_powercycles_t *cycle = mappend(
+ (void**)&cycles->cycles,
+ sizeof(lfs_emubd_powercycles_t),
+ &cycles->cycle_count,
+ &cycles->cycle_capacity);
+ if (!cycle) {
+ fprintf(stderr, "error: exhaustive: out of memory\n");
+ exit(-1);
+ }
+ *cycle = i+1;
+
+ printf("powerloss ");
+ perm_printid(suite, case_, cycles->cycles, cycles->cycle_count);
+ printf("\n");
+
+ // now recurse
+ cfg->context = &state.branches[i];
+ run_powerloss_exhaustive_layer(cycles,
+ suite, case_,
+ cfg, bdcfg, depth-1);
+
+ // pop the cycle
+ cycles->cycle_count -= 1;
+ }
+
+ // clean up memory
+ free(state.branches);
+}
+
+static void run_powerloss_exhaustive(
+ const lfs_emubd_powercycles_t *cycles,
+ size_t cycle_count,
+ const struct test_suite *suite,
+ const struct test_case *case_) {
+ (void)cycles;
+ (void)suite;
+
+ // create block device and configuration
+ lfs_emubd_t bd;
+
+ struct lfs_config cfg = {
+ .context = &bd,
+ .read = lfs_emubd_read,
+ .prog = lfs_emubd_prog,
+ .erase = lfs_emubd_erase,
+ .sync = lfs_emubd_sync,
+ .read_size = READ_SIZE,
+ .prog_size = PROG_SIZE,
+ .block_size = BLOCK_SIZE,
+ .block_count = BLOCK_COUNT,
+ .block_cycles = BLOCK_CYCLES,
+ .cache_size = CACHE_SIZE,
+ .lookahead_size = LOOKAHEAD_SIZE,
+ .compact_thresh = COMPACT_THRESH,
+ .metadata_max = METADATA_MAX,
+ .inline_max = INLINE_MAX,
+ #ifdef LFS_MULTIVERSION
+ .disk_version = DISK_VERSION,
+ #endif
+ };
+
+ struct lfs_emubd_config bdcfg = {
+ .read_size = READ_SIZE,
+ .prog_size = PROG_SIZE,
+ .erase_size = ERASE_SIZE,
+ .erase_count = ERASE_COUNT,
+ .erase_value = ERASE_VALUE,
+ .erase_cycles = ERASE_CYCLES,
+ .badblock_behavior = BADBLOCK_BEHAVIOR,
+ .disk_path = test_disk_path,
+ .read_sleep = test_read_sleep,
+ .prog_sleep = test_prog_sleep,
+ .erase_sleep = test_erase_sleep,
+ .powerloss_behavior = POWERLOSS_BEHAVIOR,
+ .powerloss_cb = powerloss_exhaustive_branch,
+ .powerloss_data = NULL,
+ };
+
+ int err = lfs_emubd_create(&cfg, &bdcfg);
+ if (err) {
+ fprintf(stderr, "error: could not create block device: %d\n", err);
+ exit(-1);
+ }
+
+ // run the test, increasing power-cycles as power-loss events occur
+ printf("running ");
+ perm_printid(suite, case_, NULL, 0);
+ printf("\n");
+
+ // recursively exhaust each layer of powerlosses
+ run_powerloss_exhaustive_layer(
+ &(struct powerloss_exhaustive_cycles){NULL, 0, 0},
+ suite, case_,
+ &cfg, &bdcfg, cycle_count);
+
+ printf("finished ");
+ perm_printid(suite, case_, NULL, 0);
+ printf("\n");
+}
+
+
+const test_powerloss_t builtin_powerlosses[] = {
+ {"none", run_powerloss_none, NULL, 0},
+ {"log", run_powerloss_log, NULL, 0},
+ {"linear", run_powerloss_linear, NULL, 0},
+ {"exhaustive", run_powerloss_exhaustive, NULL, SIZE_MAX},
+ {NULL, NULL, NULL, 0},
+};
+
+const char *const builtin_powerlosses_help[] = {
+ "Run with no power-losses.",
+ "Run with exponentially-decreasing power-losses.",
+ "Run with linearly-decreasing power-losses.",
+ "Run a all permutations of power-losses, this may take a while.",
+ "Run a all permutations of n power-losses.",
+ "Run a custom comma-separated set of power-losses.",
+ "Run a custom leb16-encoded set of power-losses.",
+};
+
+// default to -Pnone,linear, which provides a good heuristic while still
+// running quickly
+const test_powerloss_t *test_powerlosses = (const test_powerloss_t[]){
+ {"none", run_powerloss_none, NULL, 0},
+ {"linear", run_powerloss_linear, NULL, 0},
+};
+size_t test_powerloss_count = 2;
+
+static void list_powerlosses(void) {
+ // at least size so that names fit
+ unsigned name_width = 23;
+ for (size_t i = 0; builtin_powerlosses[i].name; i++) {
+ size_t len = strlen(builtin_powerlosses[i].name);
+ if (len > name_width) {
+ name_width = len;
+ }
+ }
+ name_width = 4*((name_width+1+4-1)/4)-1;
+
+ printf("%-*s %s\n", name_width, "scenario", "description");
+ size_t i = 0;
+ for (; builtin_powerlosses[i].name; i++) {
+ printf("%-*s %s\n",
+ name_width,
+ builtin_powerlosses[i].name,
+ builtin_powerlosses_help[i]);
+ }
+
+ // a couple more options with special parsing
+ printf("%-*s %s\n", name_width, "1,2,3", builtin_powerlosses_help[i+0]);
+ printf("%-*s %s\n", name_width, "{1,2,3}", builtin_powerlosses_help[i+1]);
+ printf("%-*s %s\n", name_width, ":1248g1", builtin_powerlosses_help[i+2]);
+}
+
+
+// global test step count
+size_t test_step = 0;
+
+void perm_run(
+ void *data,
+ const struct test_suite *suite,
+ const struct test_case *case_,
+ const test_powerloss_t *powerloss) {
+ (void)data;
+
+ // skip this step?
+ if (!(test_step >= test_step_start
+ && test_step < test_step_stop
+ && (test_step-test_step_start) % test_step_step == 0)) {
+ test_step += 1;
+ return;
+ }
+ test_step += 1;
+
+ // filter?
+ if (case_->filter && !case_->filter()) {
+ printf("skipped ");
+ perm_printid(suite, case_, NULL, 0);
+ printf("\n");
+ return;
+ }
+
+ powerloss->run(
+ powerloss->cycles, powerloss->cycle_count,
+ suite, case_);
+}
+
+static void run(void) {
+ // ignore disconnected pipes
+ signal(SIGPIPE, SIG_IGN);
+
+ for (size_t t = 0; t < test_id_count; t++) {
+ for (size_t i = 0; i < TEST_SUITE_COUNT; i++) {
+ test_define_suite(&test_suites[i]);
+
+ for (size_t j = 0; j < test_suites[i].case_count; j++) {
+ // does neither suite nor case name match?
+ if (test_ids[t].name && !(
+ strcmp(test_ids[t].name,
+ test_suites[i].name) == 0
+ || strcmp(test_ids[t].name,
+ test_suites[i].cases[j].name) == 0)) {
+ continue;
+ }
+
+ case_forperm(
+ &test_suites[i],
+ &test_suites[i].cases[j],
+ test_ids[t].defines,
+ test_ids[t].define_count,
+ test_ids[t].cycles,
+ test_ids[t].cycle_count,
+ perm_run,
+ NULL);
+ }
+ }
+ }
+}
+
+
+
+// option handling
+enum opt_flags {
+ OPT_HELP = 'h',
+ OPT_SUMMARY = 'Y',
+ OPT_LIST_SUITES = 'l',
+ OPT_LIST_CASES = 'L',
+ OPT_LIST_SUITE_PATHS = 1,
+ OPT_LIST_CASE_PATHS = 2,
+ OPT_LIST_DEFINES = 3,
+ OPT_LIST_PERMUTATION_DEFINES = 4,
+ OPT_LIST_IMPLICIT_DEFINES = 5,
+ OPT_LIST_GEOMETRIES = 6,
+ OPT_LIST_POWERLOSSES = 7,
+ OPT_DEFINE = 'D',
+ OPT_GEOMETRY = 'G',
+ OPT_POWERLOSS = 'P',
+ OPT_STEP = 's',
+ OPT_DISK = 'd',
+ OPT_TRACE = 't',
+ OPT_TRACE_BACKTRACE = 8,
+ OPT_TRACE_PERIOD = 9,
+ OPT_TRACE_FREQ = 10,
+ OPT_READ_SLEEP = 11,
+ OPT_PROG_SLEEP = 12,
+ OPT_ERASE_SLEEP = 13,
+};
+
+const char *short_opts = "hYlLD:G:P:s:d:t:";
+
+const struct option long_opts[] = {
+ {"help", no_argument, NULL, OPT_HELP},
+ {"summary", no_argument, NULL, OPT_SUMMARY},
+ {"list-suites", no_argument, NULL, OPT_LIST_SUITES},
+ {"list-cases", no_argument, NULL, OPT_LIST_CASES},
+ {"list-suite-paths", no_argument, NULL, OPT_LIST_SUITE_PATHS},
+ {"list-case-paths", no_argument, NULL, OPT_LIST_CASE_PATHS},
+ {"list-defines", no_argument, NULL, OPT_LIST_DEFINES},
+ {"list-permutation-defines",
+ no_argument, NULL, OPT_LIST_PERMUTATION_DEFINES},
+ {"list-implicit-defines",
+ no_argument, NULL, OPT_LIST_IMPLICIT_DEFINES},
+ {"list-geometries", no_argument, NULL, OPT_LIST_GEOMETRIES},
+ {"list-powerlosses", no_argument, NULL, OPT_LIST_POWERLOSSES},
+ {"define", required_argument, NULL, OPT_DEFINE},
+ {"geometry", required_argument, NULL, OPT_GEOMETRY},
+ {"powerloss", required_argument, NULL, OPT_POWERLOSS},
+ {"step", required_argument, NULL, OPT_STEP},
+ {"disk", required_argument, NULL, OPT_DISK},
+ {"trace", required_argument, NULL, OPT_TRACE},
+ {"trace-backtrace", no_argument, NULL, OPT_TRACE_BACKTRACE},
+ {"trace-period", required_argument, NULL, OPT_TRACE_PERIOD},
+ {"trace-freq", required_argument, NULL, OPT_TRACE_FREQ},
+ {"read-sleep", required_argument, NULL, OPT_READ_SLEEP},
+ {"prog-sleep", required_argument, NULL, OPT_PROG_SLEEP},
+ {"erase-sleep", required_argument, NULL, OPT_ERASE_SLEEP},
+ {NULL, 0, NULL, 0},
+};
+
+const char *const help_text[] = {
+ "Show this help message.",
+ "Show quick summary.",
+ "List test suites.",
+ "List test cases.",
+ "List the path for each test suite.",
+ "List the path and line number for each test case.",
+ "List all defines in this test-runner.",
+ "List explicit defines in this test-runner.",
+ "List implicit defines in this test-runner.",
+ "List the available disk geometries.",
+ "List the available power-loss scenarios.",
+ "Override a test define.",
+ "Comma-separated list of disk geometries to test.",
+ "Comma-separated list of power-loss scenarios to test.",
+ "Comma-separated range of test permutations to run (start,stop,step).",
+ "Direct block device operations to this file.",
+ "Direct trace output to this file.",
+ "Include a backtrace with every trace statement.",
+ "Sample trace output at this period in cycles.",
+ "Sample trace output at this frequency in hz.",
+ "Artificial read delay in seconds.",
+ "Artificial prog delay in seconds.",
+ "Artificial erase delay in seconds.",
+};
+
+int main(int argc, char **argv) {
+ void (*op)(void) = run;
+
+ size_t test_override_capacity = 0;
+ size_t test_geometry_capacity = 0;
+ size_t test_powerloss_capacity = 0;
+ size_t test_id_capacity = 0;
+
+ // parse options
+ while (true) {
+ int c = getopt_long(argc, argv, short_opts, long_opts, NULL);
+ switch (c) {
+ // generate help message
+ case OPT_HELP: {
+ printf("usage: %s [options] [test_id]\n", argv[0]);
+ printf("\n");
+
+ printf("options:\n");
+ size_t i = 0;
+ while (long_opts[i].name) {
+ size_t indent;
+ if (long_opts[i].has_arg == no_argument) {
+ if (long_opts[i].val >= '0' && long_opts[i].val < 'z') {
+ indent = printf(" -%c, --%s ",
+ long_opts[i].val,
+ long_opts[i].name);
+ } else {
+ indent = printf(" --%s ",
+ long_opts[i].name);
+ }
+ } else {
+ if (long_opts[i].val >= '0' && long_opts[i].val < 'z') {
+ indent = printf(" -%c %s, --%s %s ",
+ long_opts[i].val,
+ long_opts[i].name,
+ long_opts[i].name,
+ long_opts[i].name);
+ } else {
+ indent = printf(" --%s %s ",
+ long_opts[i].name,
+ long_opts[i].name);
+ }
+ }
+
+ // a quick, hacky, byte-level method for text wrapping
+ size_t len = strlen(help_text[i]);
+ size_t j = 0;
+ if (indent < 24) {
+ printf("%*s %.80s\n",
+ (int)(24-1-indent),
+ "",
+ &help_text[i][j]);
+ j += 80;
+ } else {
+ printf("\n");
+ }
+
+ while (j < len) {
+ printf("%24s%.80s\n", "", &help_text[i][j]);
+ j += 80;
+ }
+
+ i += 1;
+ }
+
+ printf("\n");
+ exit(0);
+ }
+ // summary/list flags
+ case OPT_SUMMARY:
+ op = summary;
+ break;
+ case OPT_LIST_SUITES:
+ op = list_suites;
+ break;
+ case OPT_LIST_CASES:
+ op = list_cases;
+ break;
+ case OPT_LIST_SUITE_PATHS:
+ op = list_suite_paths;
+ break;
+ case OPT_LIST_CASE_PATHS:
+ op = list_case_paths;
+ break;
+ case OPT_LIST_DEFINES:
+ op = list_defines;
+ break;
+ case OPT_LIST_PERMUTATION_DEFINES:
+ op = list_permutation_defines;
+ break;
+ case OPT_LIST_IMPLICIT_DEFINES:
+ op = list_implicit_defines;
+ break;
+ case OPT_LIST_GEOMETRIES:
+ op = list_geometries;
+ break;
+ case OPT_LIST_POWERLOSSES:
+ op = list_powerlosses;
+ break;
+ // configuration
+ case OPT_DEFINE: {
+ // allocate space
+ test_override_t *override = mappend(
+ (void**)&test_overrides,
+ sizeof(test_override_t),
+ &test_override_count,
+ &test_override_capacity);
+
+ // parse into string key/intmax_t value, cannibalizing the
+ // arg in the process
+ char *sep = strchr(optarg, '=');
+ char *parsed = NULL;
+ if (!sep) {
+ goto invalid_define;
+ }
+ *sep = '\0';
+ override->name = optarg;
+ optarg = sep+1;
+
+ // parse comma-separated permutations
+ {
+ override->defines = NULL;
+ override->permutations = 0;
+ size_t override_capacity = 0;
+ while (true) {
+ optarg += strspn(optarg, " ");
+
+ if (strncmp(optarg, "range", strlen("range")) == 0) {
+ // range of values
+ optarg += strlen("range");
+ optarg += strspn(optarg, " ");
+ if (*optarg != '(') {
+ goto invalid_define;
+ }
+ optarg += 1;
+
+ intmax_t start = strtoumax(optarg, &parsed, 0);
+ intmax_t stop = -1;
+ intmax_t step = 1;
+ // allow empty string for start=0
+ if (parsed == optarg) {
+ start = 0;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != ',' && *optarg != ')') {
+ goto invalid_define;
+ }
+
+ if (*optarg == ',') {
+ optarg += 1;
+ stop = strtoumax(optarg, &parsed, 0);
+ // allow empty string for stop=end
+ if (parsed == optarg) {
+ stop = -1;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != ',' && *optarg != ')') {
+ goto invalid_define;
+ }
+
+ if (*optarg == ',') {
+ optarg += 1;
+ step = strtoumax(optarg, &parsed, 0);
+ // allow empty string for stop=1
+ if (parsed == optarg) {
+ step = 1;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != ')') {
+ goto invalid_define;
+ }
+ }
+ } else {
+ // single value = stop only
+ stop = start;
+ start = 0;
+ }
+
+ if (*optarg != ')') {
+ goto invalid_define;
+ }
+ optarg += 1;
+
+ // calculate the range of values
+ assert(step != 0);
+ for (intmax_t i = start;
+ (step < 0)
+ ? i > stop
+ : (uintmax_t)i < (uintmax_t)stop;
+ i += step) {
+ *(intmax_t*)mappend(
+ (void**)&override->defines,
+ sizeof(intmax_t),
+ &override->permutations,
+ &override_capacity) = i;
+ }
+ } else if (*optarg != '\0') {
+ // single value
+ intmax_t define = strtoimax(optarg, &parsed, 0);
+ if (parsed == optarg) {
+ goto invalid_define;
+ }
+ optarg = parsed + strspn(parsed, " ");
+ *(intmax_t*)mappend(
+ (void**)&override->defines,
+ sizeof(intmax_t),
+ &override->permutations,
+ &override_capacity) = define;
+ } else {
+ break;
+ }
+
+ if (*optarg == ',') {
+ optarg += 1;
+ }
+ }
+ }
+ assert(override->permutations > 0);
+ break;
+
+invalid_define:
+ fprintf(stderr, "error: invalid define: %s\n", optarg);
+ exit(-1);
+ }
+ case OPT_GEOMETRY: {
+ // reset our geometry scenarios
+ if (test_geometry_capacity > 0) {
+ free((test_geometry_t*)test_geometries);
+ }
+ test_geometries = NULL;
+ test_geometry_count = 0;
+ test_geometry_capacity = 0;
+
+ // parse the comma separated list of disk geometries
+ while (*optarg) {
+ // allocate space
+ test_geometry_t *geometry = mappend(
+ (void**)&test_geometries,
+ sizeof(test_geometry_t),
+ &test_geometry_count,
+ &test_geometry_capacity);
+
+ // parse the disk geometry
+ optarg += strspn(optarg, " ");
+
+ // named disk geometry
+ size_t len = strcspn(optarg, " ,");
+ for (size_t i = 0; builtin_geometries[i].name; i++) {
+ if (len == strlen(builtin_geometries[i].name)
+ && memcmp(optarg,
+ builtin_geometries[i].name,
+ len) == 0) {
+ *geometry = builtin_geometries[i];
+ optarg += len;
+ goto geometry_next;
+ }
+ }
+
+ // comma-separated read/prog/erase/count
+ if (*optarg == '{') {
+ lfs_size_t sizes[4];
+ size_t count = 0;
+
+ char *s = optarg + 1;
+ while (count < 4) {
+ char *parsed = NULL;
+ sizes[count] = strtoumax(s, &parsed, 0);
+ count += 1;
+
+ s = parsed + strspn(parsed, " ");
+ if (*s == ',') {
+ s += 1;
+ continue;
+ } else if (*s == '}') {
+ s += 1;
+ break;
+ } else {
+ goto geometry_unknown;
+ }
+ }
+
+ // allow implicit r=p and p=e for common geometries
+ memset(geometry, 0, sizeof(test_geometry_t));
+ if (count >= 3) {
+ geometry->defines[READ_SIZE_i]
+ = TEST_LIT(sizes[0]);
+ geometry->defines[PROG_SIZE_i]
+ = TEST_LIT(sizes[1]);
+ geometry->defines[ERASE_SIZE_i]
+ = TEST_LIT(sizes[2]);
+ } else if (count >= 2) {
+ geometry->defines[PROG_SIZE_i]
+ = TEST_LIT(sizes[0]);
+ geometry->defines[ERASE_SIZE_i]
+ = TEST_LIT(sizes[1]);
+ } else {
+ geometry->defines[ERASE_SIZE_i]
+ = TEST_LIT(sizes[0]);
+ }
+ if (count >= 4) {
+ geometry->defines[ERASE_COUNT_i]
+ = TEST_LIT(sizes[3]);
+ }
+ optarg = s;
+ goto geometry_next;
+ }
+
+ // leb16-encoded read/prog/erase/count
+ if (*optarg == ':') {
+ lfs_size_t sizes[4];
+ size_t count = 0;
+
+ char *s = optarg + 1;
+ while (true) {
+ char *parsed = NULL;
+ uintmax_t x = leb16_parse(s, &parsed);
+ if (parsed == s || count >= 4) {
+ break;
+ }
+
+ sizes[count] = x;
+ count += 1;
+ s = parsed;
+ }
+
+ // allow implicit r=p and p=e for common geometries
+ memset(geometry, 0, sizeof(test_geometry_t));
+ if (count >= 3) {
+ geometry->defines[READ_SIZE_i]
+ = TEST_LIT(sizes[0]);
+ geometry->defines[PROG_SIZE_i]
+ = TEST_LIT(sizes[1]);
+ geometry->defines[ERASE_SIZE_i]
+ = TEST_LIT(sizes[2]);
+ } else if (count >= 2) {
+ geometry->defines[PROG_SIZE_i]
+ = TEST_LIT(sizes[0]);
+ geometry->defines[ERASE_SIZE_i]
+ = TEST_LIT(sizes[1]);
+ } else {
+ geometry->defines[ERASE_SIZE_i]
+ = TEST_LIT(sizes[0]);
+ }
+ if (count >= 4) {
+ geometry->defines[ERASE_COUNT_i]
+ = TEST_LIT(sizes[3]);
+ }
+ optarg = s;
+ goto geometry_next;
+ }
+
+geometry_unknown:
+ // unknown scenario?
+ fprintf(stderr, "error: unknown disk geometry: %s\n",
+ optarg);
+ exit(-1);
+
+geometry_next:
+ optarg += strspn(optarg, " ");
+ if (*optarg == ',') {
+ optarg += 1;
+ } else if (*optarg == '\0') {
+ break;
+ } else {
+ goto geometry_unknown;
+ }
+ }
+ break;
+ }
+ case OPT_POWERLOSS: {
+ // reset our powerloss scenarios
+ if (test_powerloss_capacity > 0) {
+ free((test_powerloss_t*)test_powerlosses);
+ }
+ test_powerlosses = NULL;
+ test_powerloss_count = 0;
+ test_powerloss_capacity = 0;
+
+ // parse the comma separated list of power-loss scenarios
+ while (*optarg) {
+ // allocate space
+ test_powerloss_t *powerloss = mappend(
+ (void**)&test_powerlosses,
+ sizeof(test_powerloss_t),
+ &test_powerloss_count,
+ &test_powerloss_capacity);
+
+ // parse the power-loss scenario
+ optarg += strspn(optarg, " ");
+
+ // named power-loss scenario
+ size_t len = strcspn(optarg, " ,");
+ for (size_t i = 0; builtin_powerlosses[i].name; i++) {
+ if (len == strlen(builtin_powerlosses[i].name)
+ && memcmp(optarg,
+ builtin_powerlosses[i].name,
+ len) == 0) {
+ *powerloss = builtin_powerlosses[i];
+ optarg += len;
+ goto powerloss_next;
+ }
+ }
+
+ // comma-separated permutation
+ if (*optarg == '{') {
+ lfs_emubd_powercycles_t *cycles = NULL;
+ size_t cycle_count = 0;
+ size_t cycle_capacity = 0;
+
+ char *s = optarg + 1;
+ while (true) {
+ char *parsed = NULL;
+ *(lfs_emubd_powercycles_t*)mappend(
+ (void**)&cycles,
+ sizeof(lfs_emubd_powercycles_t),
+ &cycle_count,
+ &cycle_capacity)
+ = strtoumax(s, &parsed, 0);
+
+ s = parsed + strspn(parsed, " ");
+ if (*s == ',') {
+ s += 1;
+ continue;
+ } else if (*s == '}') {
+ s += 1;
+ break;
+ } else {
+ goto powerloss_unknown;
+ }
+ }
+
+ *powerloss = (test_powerloss_t){
+ .run = run_powerloss_cycles,
+ .cycles = cycles,
+ .cycle_count = cycle_count,
+ };
+ optarg = s;
+ goto powerloss_next;
+ }
+
+ // leb16-encoded permutation
+ if (*optarg == ':') {
+ lfs_emubd_powercycles_t *cycles = NULL;
+ size_t cycle_count = 0;
+ size_t cycle_capacity = 0;
+
+ char *s = optarg + 1;
+ while (true) {
+ char *parsed = NULL;
+ uintmax_t x = leb16_parse(s, &parsed);
+ if (parsed == s) {
+ break;
+ }
+
+ *(lfs_emubd_powercycles_t*)mappend(
+ (void**)&cycles,
+ sizeof(lfs_emubd_powercycles_t),
+ &cycle_count,
+ &cycle_capacity) = x;
+ s = parsed;
+ }
+
+ *powerloss = (test_powerloss_t){
+ .run = run_powerloss_cycles,
+ .cycles = cycles,
+ .cycle_count = cycle_count,
+ };
+ optarg = s;
+ goto powerloss_next;
+ }
+
+ // exhaustive permutations
+ {
+ char *parsed = NULL;
+ size_t count = strtoumax(optarg, &parsed, 0);
+ if (parsed == optarg) {
+ goto powerloss_unknown;
+ }
+ *powerloss = (test_powerloss_t){
+ .run = run_powerloss_exhaustive,
+ .cycles = NULL,
+ .cycle_count = count,
+ };
+ optarg = (char*)parsed;
+ goto powerloss_next;
+ }
+
+powerloss_unknown:
+ // unknown scenario?
+ fprintf(stderr, "error: unknown power-loss scenario: %s\n",
+ optarg);
+ exit(-1);
+
+powerloss_next:
+ optarg += strspn(optarg, " ");
+ if (*optarg == ',') {
+ optarg += 1;
+ } else if (*optarg == '\0') {
+ break;
+ } else {
+ goto powerloss_unknown;
+ }
+ }
+ break;
+ }
+ case OPT_STEP: {
+ char *parsed = NULL;
+ test_step_start = strtoumax(optarg, &parsed, 0);
+ test_step_stop = -1;
+ test_step_step = 1;
+ // allow empty string for start=0
+ if (parsed == optarg) {
+ test_step_start = 0;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != ',' && *optarg != '\0') {
+ goto step_unknown;
+ }
+
+ if (*optarg == ',') {
+ optarg += 1;
+ test_step_stop = strtoumax(optarg, &parsed, 0);
+ // allow empty string for stop=end
+ if (parsed == optarg) {
+ test_step_stop = -1;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != ',' && *optarg != '\0') {
+ goto step_unknown;
+ }
+
+ if (*optarg == ',') {
+ optarg += 1;
+ test_step_step = strtoumax(optarg, &parsed, 0);
+ // allow empty string for stop=1
+ if (parsed == optarg) {
+ test_step_step = 1;
+ }
+ optarg = parsed + strspn(parsed, " ");
+
+ if (*optarg != '\0') {
+ goto step_unknown;
+ }
+ }
+ } else {
+ // single value = stop only
+ test_step_stop = test_step_start;
+ test_step_start = 0;
+ }
+
+ break;
+step_unknown:
+ fprintf(stderr, "error: invalid step: %s\n", optarg);
+ exit(-1);
+ }
+ case OPT_DISK:
+ test_disk_path = optarg;
+ break;
+ case OPT_TRACE:
+ test_trace_path = optarg;
+ break;
+ case OPT_TRACE_BACKTRACE:
+ test_trace_backtrace = true;
+ break;
+ case OPT_TRACE_PERIOD: {
+ char *parsed = NULL;
+ test_trace_period = strtoumax(optarg, &parsed, 0);
+ if (parsed == optarg) {
+ fprintf(stderr, "error: invalid trace-period: %s\n", optarg);
+ exit(-1);
+ }
+ break;
+ }
+ case OPT_TRACE_FREQ: {
+ char *parsed = NULL;
+ test_trace_freq = strtoumax(optarg, &parsed, 0);
+ if (parsed == optarg) {
+ fprintf(stderr, "error: invalid trace-freq: %s\n", optarg);
+ exit(-1);
+ }
+ break;
+ }
+ case OPT_READ_SLEEP: {
+ char *parsed = NULL;
+ double read_sleep = strtod(optarg, &parsed);
+ if (parsed == optarg) {
+ fprintf(stderr, "error: invalid read-sleep: %s\n", optarg);
+ exit(-1);
+ }
+ test_read_sleep = read_sleep*1.0e9;
+ break;
+ }
+ case OPT_PROG_SLEEP: {
+ char *parsed = NULL;
+ double prog_sleep = strtod(optarg, &parsed);
+ if (parsed == optarg) {
+ fprintf(stderr, "error: invalid prog-sleep: %s\n", optarg);
+ exit(-1);
+ }
+ test_prog_sleep = prog_sleep*1.0e9;
+ break;
+ }
+ case OPT_ERASE_SLEEP: {
+ char *parsed = NULL;
+ double erase_sleep = strtod(optarg, &parsed);
+ if (parsed == optarg) {
+ fprintf(stderr, "error: invalid erase-sleep: %s\n", optarg);
+ exit(-1);
+ }
+ test_erase_sleep = erase_sleep*1.0e9;
+ break;
+ }
+ // done parsing
+ case -1:
+ goto getopt_done;
+ // unknown arg, getopt prints a message for us
+ default:
+ exit(-1);
+ }
+ }
+getopt_done: ;
+
+ if (argc > optind) {
+ // reset our test identifier list
+ test_ids = NULL;
+ test_id_count = 0;
+ test_id_capacity = 0;
+ }
+
+ // parse test identifier, if any, cannibalizing the arg in the process
+ for (; argc > optind; optind++) {
+ test_define_t *defines = NULL;
+ size_t define_count = 0;
+ lfs_emubd_powercycles_t *cycles = NULL;
+ size_t cycle_count = 0;
+
+ // parse name, can be suite or case
+ char *name = argv[optind];
+ char *defines_ = strchr(name, ':');
+ if (defines_) {
+ *defines_ = '\0';
+ defines_ += 1;
+ }
+
+ // remove optional path and .toml suffix
+ char *slash = strrchr(name, '/');
+ if (slash) {
+ name = slash+1;
+ }
+
+ size_t name_len = strlen(name);
+ if (name_len > 5 && strcmp(&name[name_len-5], ".toml") == 0) {
+ name[name_len-5] = '\0';
+ }
+
+ if (defines_) {
+ // parse defines
+ char *cycles_ = strchr(defines_, ':');
+ if (cycles_) {
+ *cycles_ = '\0';
+ cycles_ += 1;
+ }
+
+ while (true) {
+ char *parsed;
+ size_t d = leb16_parse(defines_, &parsed);
+ intmax_t v = leb16_parse(parsed, &parsed);
+ if (parsed == defines_) {
+ break;
+ }
+ defines_ = parsed;
+
+ if (d >= define_count) {
+ // align to power of two to avoid any superlinear growth
+ size_t ncount = 1 << lfs_npw2(d+1);
+ defines = realloc(defines,
+ ncount*sizeof(test_define_t));
+ memset(defines+define_count, 0,
+ (ncount-define_count)*sizeof(test_define_t));
+ define_count = ncount;
+ }
+ defines[d] = TEST_LIT(v);
+ }
+
+ if (cycles_) {
+ // parse power cycles
+ size_t cycle_capacity = 0;
+ while (*cycles_ != '\0') {
+ char *parsed = NULL;
+ *(lfs_emubd_powercycles_t*)mappend(
+ (void**)&cycles,
+ sizeof(lfs_emubd_powercycles_t),
+ &cycle_count,
+ &cycle_capacity)
+ = leb16_parse(cycles_, &parsed);
+ if (parsed == cycles_) {
+ fprintf(stderr, "error: "
+ "could not parse test cycles: %s\n",
+ cycles_);
+ exit(-1);
+ }
+ cycles_ = parsed;
+ }
+ }
+ }
+
+ // append to identifier list
+ *(test_id_t*)mappend(
+ (void**)&test_ids,
+ sizeof(test_id_t),
+ &test_id_count,
+ &test_id_capacity) = (test_id_t){
+ .name = name,
+ .defines = defines,
+ .define_count = define_count,
+ .cycles = cycles,
+ .cycle_count = cycle_count,
+ };
+ }
+
+ // do the thing
+ op();
+
+ // cleanup (need to be done for valgrind testing)
+ test_define_cleanup();
+ if (test_overrides) {
+ for (size_t i = 0; i < test_override_count; i++) {
+ free((void*)test_overrides[i].defines);
+ }
+ free((void*)test_overrides);
+ }
+ if (test_geometry_capacity) {
+ free((void*)test_geometries);
+ }
+ if (test_powerloss_capacity) {
+ for (size_t i = 0; i < test_powerloss_count; i++) {
+ free((void*)test_powerlosses[i].cycles);
+ }
+ free((void*)test_powerlosses);
+ }
+ if (test_id_capacity) {
+ for (size_t i = 0; i < test_id_count; i++) {
+ free((void*)test_ids[i].defines);
+ free((void*)test_ids[i].cycles);
+ }
+ free((void*)test_ids);
+ }
+}
diff --git a/packages/littlefs-v2.11.2/runners/test_runner.h b/packages/littlefs-v2.11.2/runners/test_runner.h
new file mode 100644
index 0000000..ecdf9c1
--- /dev/null
+++ b/packages/littlefs-v2.11.2/runners/test_runner.h
@@ -0,0 +1,142 @@
+/*
+ * Runner for littlefs tests
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef TEST_RUNNER_H
+#define TEST_RUNNER_H
+
+
+// override LFS_TRACE
+void test_trace(const char *fmt, ...);
+
+#define LFS_TRACE_(fmt, ...) \
+ test_trace("%s:%d:trace: " fmt "%s\n", \
+ __FILE__, \
+ __LINE__, \
+ __VA_ARGS__)
+#define LFS_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
+#define LFS_EMUBD_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
+
+
+// note these are indirectly included in any generated files
+#include "bd/lfs_emubd.h"
+#include
+
+// give source a chance to define feature macros
+#undef _FEATURES_H
+#undef _STDIO_H
+
+
+// generated test configurations
+struct lfs_config;
+
+enum test_flags {
+ TEST_REENTRANT = 0x1,
+};
+typedef uint8_t test_flags_t;
+
+typedef struct test_define {
+ intmax_t (*cb)(void *data);
+ void *data;
+} test_define_t;
+
+struct test_case {
+ const char *name;
+ const char *path;
+ test_flags_t flags;
+ size_t permutations;
+
+ const test_define_t *defines;
+
+ bool (*filter)(void);
+ void (*run)(struct lfs_config *cfg);
+};
+
+struct test_suite {
+ const char *name;
+ const char *path;
+ test_flags_t flags;
+
+ const char *const *define_names;
+ size_t define_count;
+
+ const struct test_case *cases;
+ size_t case_count;
+};
+
+
+// deterministic prng for pseudo-randomness in testes
+uint32_t test_prng(uint32_t *state);
+
+#define TEST_PRNG(state) test_prng(state)
+
+
+// access generated test defines
+intmax_t test_define(size_t define);
+
+#define TEST_DEFINE(i) test_define(i)
+
+// a few preconfigured defines that control how tests run
+
+#define READ_SIZE_i 0
+#define PROG_SIZE_i 1
+#define ERASE_SIZE_i 2
+#define ERASE_COUNT_i 3
+#define BLOCK_SIZE_i 4
+#define BLOCK_COUNT_i 5
+#define CACHE_SIZE_i 6
+#define LOOKAHEAD_SIZE_i 7
+#define COMPACT_THRESH_i 8
+#define METADATA_MAX_i 9
+#define INLINE_MAX_i 10
+#define BLOCK_CYCLES_i 11
+#define ERASE_VALUE_i 12
+#define ERASE_CYCLES_i 13
+#define BADBLOCK_BEHAVIOR_i 14
+#define POWERLOSS_BEHAVIOR_i 15
+#define DISK_VERSION_i 16
+
+#define READ_SIZE TEST_DEFINE(READ_SIZE_i)
+#define PROG_SIZE TEST_DEFINE(PROG_SIZE_i)
+#define ERASE_SIZE TEST_DEFINE(ERASE_SIZE_i)
+#define ERASE_COUNT TEST_DEFINE(ERASE_COUNT_i)
+#define BLOCK_SIZE TEST_DEFINE(BLOCK_SIZE_i)
+#define BLOCK_COUNT TEST_DEFINE(BLOCK_COUNT_i)
+#define CACHE_SIZE TEST_DEFINE(CACHE_SIZE_i)
+#define LOOKAHEAD_SIZE TEST_DEFINE(LOOKAHEAD_SIZE_i)
+#define COMPACT_THRESH TEST_DEFINE(COMPACT_THRESH_i)
+#define METADATA_MAX TEST_DEFINE(METADATA_MAX_i)
+#define INLINE_MAX TEST_DEFINE(INLINE_MAX_i)
+#define BLOCK_CYCLES TEST_DEFINE(BLOCK_CYCLES_i)
+#define ERASE_VALUE TEST_DEFINE(ERASE_VALUE_i)
+#define ERASE_CYCLES TEST_DEFINE(ERASE_CYCLES_i)
+#define BADBLOCK_BEHAVIOR TEST_DEFINE(BADBLOCK_BEHAVIOR_i)
+#define POWERLOSS_BEHAVIOR TEST_DEFINE(POWERLOSS_BEHAVIOR_i)
+#define DISK_VERSION TEST_DEFINE(DISK_VERSION_i)
+
+#define TEST_IMPLICIT_DEFINES \
+ TEST_DEF(READ_SIZE, PROG_SIZE) \
+ TEST_DEF(PROG_SIZE, ERASE_SIZE) \
+ TEST_DEF(ERASE_SIZE, 0) \
+ TEST_DEF(ERASE_COUNT, (1024*1024)/ERASE_SIZE) \
+ TEST_DEF(BLOCK_SIZE, ERASE_SIZE) \
+ TEST_DEF(BLOCK_COUNT, ERASE_COUNT/lfs_max(BLOCK_SIZE/ERASE_SIZE,1)) \
+ TEST_DEF(CACHE_SIZE, lfs_max(64,lfs_max(READ_SIZE,PROG_SIZE))) \
+ TEST_DEF(LOOKAHEAD_SIZE, 16) \
+ TEST_DEF(COMPACT_THRESH, 0) \
+ TEST_DEF(METADATA_MAX, 0) \
+ TEST_DEF(INLINE_MAX, 0) \
+ TEST_DEF(BLOCK_CYCLES, -1) \
+ TEST_DEF(ERASE_VALUE, 0xff) \
+ TEST_DEF(ERASE_CYCLES, 0) \
+ TEST_DEF(BADBLOCK_BEHAVIOR, LFS_EMUBD_BADBLOCK_PROGERROR) \
+ TEST_DEF(POWERLOSS_BEHAVIOR, LFS_EMUBD_POWERLOSS_NOOP) \
+ TEST_DEF(DISK_VERSION, 0)
+
+#define TEST_GEOMETRY_DEFINE_COUNT 4
+#define TEST_IMPLICIT_DEFINE_COUNT 17
+
+
+#endif
diff --git a/packages/packages.dbsqlite b/packages/packages.dbsqlite
index 8be942c..2c0b57e 100644
Binary files a/packages/packages.dbsqlite and b/packages/packages.dbsqlite differ
diff --git a/packages/pkgs.json b/packages/pkgs.json
index d0e9680..6542332 100644
--- a/packages/pkgs.json
+++ b/packages/pkgs.json
@@ -8,5 +8,10 @@
"path": "/packages/system/sqlite",
"ver": "v3.19.3",
"name": "SQLITE"
+ },
+ {
+ "path": "/packages/system/littlefs",
+ "ver": "v2.11.2",
+ "name": "LITTLEFS"
}
]
\ No newline at end of file
diff --git a/rt-thread/components/fal/src/fal.c b/rt-thread/components/fal/src/fal.c
index 292b492..98672ab 100644
--- a/rt-thread/components/fal/src/fal.c
+++ b/rt-thread/components/fal/src/fal.c
@@ -40,12 +40,12 @@ __exit:
if ((result > 0) && (!init_ok))
{
init_ok = 1;
- log_i("RT-Thread Flash Abstraction Layer initialize success.");
+ log_i("Flash Abstraction Layer initialize success.");
}
else if(result <= 0)
{
init_ok = 0;
- log_e("RT-Thread Flash Abstraction Layer initialize failed.");
+ log_e("Flash Abstraction Layer initialize failed.");
}
return result;
diff --git a/rtconfig.h b/rtconfig.h
index d12218a..656f9fc 100644
--- a/rtconfig.h
+++ b/rtconfig.h
@@ -88,7 +88,7 @@
#define DFS_USING_WORKDIR
#define DFS_FD_MAX 16
#define RT_USING_DFS_V1
-#define DFS_FILESYSTEMS_MAX 4
+#define DFS_FILESYSTEMS_MAX 6
#define DFS_FILESYSTEM_TYPES_MAX 4
#define RT_USING_DFS_ELMFAT
@@ -111,6 +111,11 @@
#define RT_USING_DFS_DEVFS
#define RT_USING_DFS_TMPFS
/* end of DFS: device virtual file system */
+#define RT_USING_FAL
+#define FAL_DEBUG 0
+#define FAL_PART_HAS_TABLE_CFG
+#define FAL_USING_SFUD_PORT
+#define FAL_USING_NOR_FLASH_DEV_NAME "W25Q128"
/* Device Drivers */
@@ -119,6 +124,7 @@
#define RT_USING_SERIAL
#define RT_USING_SERIAL_V1
#define RT_SERIAL_RB_BUFSZ 512
+#define RT_USING_MTD_NOR
#define RT_USING_RTC
#define RT_USING_SOFT_RTC
#define RT_USING_SDIO
@@ -130,6 +136,11 @@
#define RT_USING_SPI
#define RT_USING_QSPI
#define RT_USING_SPI_MSD
+#define RT_USING_SFUD
+#define RT_SFUD_USING_SFDP
+#define RT_SFUD_USING_FLASH_INFO_TABLE
+#define RT_SFUD_USING_QSPI
+#define RT_SFUD_SPI_MAX_HZ 50000000
#define RT_USING_DEV_BUS
#define RT_USING_PIN
@@ -263,6 +274,16 @@
#define PKG_SQLITE_SQL_MAX_LEN 1024
#define PKG_SQLITE_DB_NAME_MAX_LEN 64
#define PKG_USING_SQLITE_V3193
+#define PKG_USING_LITTLEFS
+#define PKG_USING_LITTLEFS_V2112
+#define LFS_READ_SIZE 256
+#define LFS_PROG_SIZE 256
+#define LFS_BLOCK_SIZE 4096
+#define LFS_CACHE_SIZE 256
+#define LFS_BLOCK_CYCLES 500
+#define LFS_THREADSAFE
+#define LFS_LOOKAHEAD_MAX 128
+#define RT_DEF_LFS_DRIVERS 2
/* end of system packages */
/* peripheral libraries and drivers */
diff --git a/rtconfig_preinc.h b/rtconfig_preinc.h
index 59f0fe9..906ba7a 100644
--- a/rtconfig_preinc.h
+++ b/rtconfig_preinc.h
@@ -5,6 +5,7 @@
/* Automatically generated file; DO NOT EDIT. */
/* RT-Thread pre-include file */
+#define LFS_CONFIG lfs_config.h
#define RT_USING_LIBC
#define RT_USING_NEWLIBC
#define _POSIX_C_SOURCE 1