From 427291efbd5b67f82b53f6f09bad18c009eae42e Mon Sep 17 00:00:00 2001 From: BBsan2k Date: Fri, 12 Apr 2024 12:57:58 +0200 Subject: [PATCH 1/7] Add KunaiGC Support --- cube/swiss/Makefile | 1 + cube/swiss/source/devices/deviceHandler.h | 6 +- .../devices/kunaigc/deviceHandler-KunaiGC.c | 250 + .../devices/kunaigc/deviceHandler-KunaiGC.h | 15 + cube/swiss/source/devices/kunaigc/kunaigc.c | 142 + cube/swiss/source/devices/kunaigc/kunaigc.h | 36 + cube/swiss/source/devices/kunaigc/lfs.c | 5819 +++++++++++++++++ cube/swiss/source/devices/kunaigc/lfs.h | 703 ++ cube/swiss/source/devices/kunaigc/lfs_util.c | 34 + cube/swiss/source/devices/kunaigc/lfs_util.h | 245 + cube/swiss/source/devices/kunaigc/spiflash.c | 218 + cube/swiss/source/devices/kunaigc/spiflash.h | 139 + cube/swiss/source/gui/FrameBufferMagic.c | 5 + cube/swiss/source/gui/FrameBufferMagic.h | 3 +- cube/swiss/source/images/images.scf | 1 + cube/swiss/source/images/kunaigc.tif | Bin 0 -> 7628 bytes cube/swiss/source/main.c | 1 + 17 files changed, 7615 insertions(+), 3 deletions(-) mode change 100644 => 100755 cube/swiss/Makefile create mode 100755 cube/swiss/source/devices/kunaigc/deviceHandler-KunaiGC.c create mode 100755 cube/swiss/source/devices/kunaigc/deviceHandler-KunaiGC.h create mode 100755 cube/swiss/source/devices/kunaigc/kunaigc.c create mode 100755 cube/swiss/source/devices/kunaigc/kunaigc.h create mode 100755 cube/swiss/source/devices/kunaigc/lfs.c create mode 100755 cube/swiss/source/devices/kunaigc/lfs.h create mode 100755 cube/swiss/source/devices/kunaigc/lfs_util.c create mode 100755 cube/swiss/source/devices/kunaigc/lfs_util.h create mode 100755 cube/swiss/source/devices/kunaigc/spiflash.c create mode 100755 cube/swiss/source/devices/kunaigc/spiflash.h create mode 100644 cube/swiss/source/images/kunaigc.tif mode change 100644 => 100755 cube/swiss/source/main.c diff --git a/cube/swiss/Makefile b/cube/swiss/Makefile old mode 100644 new mode 100755 index d15fc64a..bf30666c --- a/cube/swiss/Makefile +++ b/cube/swiss/Makefile @@ -37,6 +37,7 @@ SOURCES := source \ source/devices/fsp \ source/devices/ftp \ source/devices/gcloader \ + source/devices/kunaigc \ source/devices/memcard \ source/devices/qoob \ source/devices/smb \ diff --git a/cube/swiss/source/devices/deviceHandler.h b/cube/swiss/source/devices/deviceHandler.h index 17e3c764..b847b154 100644 --- a/cube/swiss/source/devices/deviceHandler.h +++ b/cube/swiss/source/devices/deviceHandler.h @@ -153,7 +153,8 @@ typedef char* (* _fn_status)(file_handle*); #define DEVICE_ID_G 0x10 #define DEVICE_ID_H 0x11 #define DEVICE_ID_I 0x12 -#define DEVICE_ID_MAX DEVICE_ID_I +#define DEVICE_ID_J 0x13 +#define DEVICE_ID_MAX DEVICE_ID_J #define DEVICE_ID_UNK (DEVICE_ID_MAX + 1) typedef struct DEVICEHANDLER_STRUCT DEVICEHANDLER_INTERFACE; @@ -216,6 +217,7 @@ enum DEV_ERRORS { #include "devices/fsp/deviceHandler-FSP.h" #include "devices/gcloader/deviceHandler-gcloader.h" #include "devices/aram/deviceHandler-ARAM.h" +#include "devices/kunaigc/deviceHandler-KunaiGC.h" extern void deviceHandler_setStatEnabled(int enable); extern int deviceHandler_getStatEnabled(); @@ -223,7 +225,7 @@ extern bool deviceHandler_getDeviceAvailable(DEVICEHANDLER_INTERFACE *dev); extern void deviceHandler_setDeviceAvailable(DEVICEHANDLER_INTERFACE *dev, bool availability); extern void deviceHandler_setAllDevicesAvailable(); -#define MAX_DEVICES 20 +#define MAX_DEVICES 21 extern DEVICEHANDLER_INTERFACE* allDevices[MAX_DEVICES]; extern DEVICEHANDLER_INTERFACE* devices[MAX_DEVICE_SLOTS]; diff --git a/cube/swiss/source/devices/kunaigc/deviceHandler-KunaiGC.c b/cube/swiss/source/devices/kunaigc/deviceHandler-KunaiGC.c new file mode 100755 index 00000000..1c3e1a7b --- /dev/null +++ b/cube/swiss/source/devices/kunaigc/deviceHandler-KunaiGC.c @@ -0,0 +1,250 @@ +/* deviceHandler-KunaiGC.c + - device implementation for KunaiGC Flash + by bbsan2k + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "deviceHandler.h" +#include "gui/FrameBufferMagic.h" +#include "gui/IPLFontWrite.h" +#include "swiss.h" +#include "main.h" + +#include "lfs.h" +#include "kunaigc.h" +#include "spiflash.h" +#include "util.h" + + +// variables used by the filesystem +lfs_t lfs; +lfs_file_t lfs_file; + +// configuration of the filesystem is provided by this struct +struct lfs_config cfg = { + // block device operations + .read = kunai_read, + .prog = kunai_write, + .erase = kunai_erase, + .sync = kunai_sync, + + // block device configuration + .read_size = 4, + .prog_size = W25Q80BV_PAGE_SIZE, + .block_size = 4096, + .block_count = 4032, + .cache_size = W25Q80BV_PAGE_SIZE*8, + .lookahead_size = 16, + .block_cycles = 500, +}; + + + +file_handle initial_KunaiGC = + { "/", // directory + 0ULL, // fileBase (u64) + 0, // offset + 0, // size + IS_DIR, + 0 + }; + +device_info initial_KunaiGC_info = { + 0x200000, + 0x200000, + false +}; + +static void readLFSInfo(void) { + initial_KunaiGC_info.totalSpace = (cfg.block_size * cfg.block_count); + initial_KunaiGC_info.freeSpace = initial_KunaiGC_info.totalSpace - (lfs_fs_size(&lfs) * cfg.block_size); +} + +device_info* deviceHandler_KunaiGC_info(file_handle* file) { + return &initial_KunaiGC_info; +} + +s32 deviceHandler_KunaiGC_makeDir(file_handle* file) { + return lfs_mkdir(&lfs, file->name); +} + +s32 deviceHandler_KunaiGC_readDir(file_handle* ffile, file_handle** dir, u32 type) { + lfs_dir_t lfsdir; + struct lfs_info info; + + int err = lfs_dir_open(&lfs, &lfsdir, ffile->name); + + if (err < 0) { + return -1; + } + + // Set everything up to read + int num_entries = 1, i = 1; + *dir = calloc(num_entries, sizeof(file_handle)); + concat_path((*dir)[0].name, ffile->name, ".."); + (*dir)[0].fileAttrib = IS_SPECIAL; + + // Read each entry of the directory + while (lfs_dir_read(&lfs, &lfsdir, &info) > 0) { + errno = 0; + + if(!strcmp(info.name, ".") || !strcmp(info.name, "..")) { + continue; + } + // Do we want this one? + if((type == -1 || ((info.type == LFS_TYPE_DIR) ? (type==IS_DIR) : (type==IS_FILE)))) { + struct lfs_info fstat; + + if(info.type == LFS_TYPE_REG) { + if(!checkExtension(info.name)) continue; + } + // Make sure we have room for this one + if(i == num_entries){ + ++num_entries; + *dir = reallocarray(*dir, num_entries, sizeof(file_handle)); + } + memset(&(*dir)[i], 0, sizeof(file_handle)); + if(concat_path((*dir)[i].name, ffile->name, info.name) < PATHNAME_MAX + && !lfs_stat(&lfs, (*dir)[i].name, &fstat) && fstat.size <= UINT32_MAX) { + (*dir)[i].size = fstat.size; + (*dir)[i].fileAttrib = (fstat.type == LFS_TYPE_DIR) ? IS_DIR : IS_FILE; + ++i; + } + } + }; + + lfs_dir_close(&lfs, &lfsdir); + + return i; +} + +s64 deviceHandler_KunaiGC_seekFile(file_handle* file, s64 where, u32 type) { + if(type == DEVICE_HANDLER_SEEK_SET) file->offset = where; + else if(type == DEVICE_HANDLER_SEEK_CUR) file->offset = file->offset + where; + else if(type == DEVICE_HANDLER_SEEK_END) file->offset = file->size + where; + return file->offset; +} + +s32 deviceHandler_KunaiGC_readFile(file_handle* file, void* buffer, u32 length) { + if (!file->fp) { + if (lfs_file_open(&lfs, &lfs_file, file->name, LFS_O_RDONLY) < 0) return -1; + file->fp = &lfs_file; + } + if(file->size <= 0) { + lfs_file_seek(&lfs, (lfs_file_t*)file->fp, 0, LFS_SEEK_END); + file->size = lfs_file_tell(&lfs, (lfs_file_t*)file->fp); + } + lfs_file_seek(&lfs, (lfs_file_t*)file->fp, file->offset, LFS_SEEK_SET); + size_t bytes_read = lfs_file_read(&lfs, (lfs_file_t*)file->fp, buffer, length); + file->offset = lfs_file_tell(&lfs, (lfs_file_t*)file->fp); + + return bytes_read; +} + +// Assumes a single call to write a file. +s32 deviceHandler_KunaiGC_writeFile(file_handle* file, const void* buffer, u32 length) { + if (!file->fp || !(((lfs_file_t*)file->fp)->flags & LFS_O_WRONLY)) { + if (lfs_file_open(&lfs, &lfs_file, file->name, LFS_O_RDWR | LFS_O_CREAT) < 0) return -1; + file->fp = (void*)&lfs_file; + } + lfs_file_seek(&lfs, (lfs_file_t *) file->fp, file->offset, LFS_SEEK_SET); + + lfs_ssize_t bytes_written = lfs_file_write(&lfs, (lfs_file_t *) file->fp, buffer, length); + file->offset = lfs_file_tell(&lfs, (lfs_file_t *) file->fp); + readLFSInfo(); + return bytes_written; +} + +s32 deviceHandler_KunaiGC_deleteFile(file_handle* file) { + if (file->fp ) { + if (file->fileAttrib == IS_DIR) + lfs_dir_close(&lfs, (lfs_dir_t*)file->fp); + else + lfs_file_close(&lfs, (lfs_file_t*)file->fp); + } + + lfs_remove(&lfs, file->name); + readLFSInfo(); + return 0; +} + +s32 deviceHandler_KunaiGC_renameFile(file_handle* file, char* name) { + return lfs_rename(&lfs, file->name, name); +} + +bool deviceHandler_KunaiGC_test() { + u32 jedec_id = kunai_get_jedecID(); + cfg.block_count = (((1 << (jedec_id & 0xFFUL)) - KUNAI_OFFS) / cfg.block_size); + + return (jedec_id != 0x0) && (jedec_id != UINT32_MAX); +} + + +s32 deviceHandler_KunaiGC_init(file_handle* file) { + if(!deviceHandler_KunaiGC_test()) { + return ENODEV; + } + + if (lfs_mount(&lfs, &cfg) < 0) { + file->status = E_CONNECTFAIL; + return EFAULT; + } + + readLFSInfo(); + + + return 0; +} + +s32 deviceHandler_KunaiGC_closeFile(file_handle* file) { + int ret = 0; + if(file && file->fp) { + ret = lfs_file_close(&lfs, (lfs_file_t*)file->fp); + + file->fp = NULL; + } + return ret; +} + +s32 deviceHandler_KunaiGC_deinit(file_handle* file) { + deviceHandler_KunaiGC_closeFile(file); + initial_KunaiGC_info.freeSpace = 0LL; + initial_KunaiGC_info.totalSpace = 0LL; + + return 0; +} + +char* deviceHandler_KunaiGC_status(file_handle* file) { + return NULL; +} + +DEVICEHANDLER_INTERFACE __device_kunaigc = { + .deviceUniqueId = DEVICE_ID_J, + .hwName = "KunaiGC IPL", + .deviceName = "KunaiGC", + .deviceDescription = "KunaiGC File System", + .deviceTexture = {TEX_KUNAIGC, 112, 57, 112, 57}, + .features = FEAT_READ|FEAT_WRITE|FEAT_CONFIG_DEVICE|FEAT_BOOT_DEVICE|FEAT_AUTOLOAD_DOL, + .location = LOC_SYSTEM, + .initial = &initial_KunaiGC, + .test = deviceHandler_KunaiGC_test, + .info = deviceHandler_KunaiGC_info, + .init = deviceHandler_KunaiGC_init, + .makeDir = deviceHandler_KunaiGC_makeDir, + .readDir = deviceHandler_KunaiGC_readDir, + .seekFile = deviceHandler_KunaiGC_seekFile, + .readFile = deviceHandler_KunaiGC_readFile, + .writeFile = deviceHandler_KunaiGC_writeFile, + .closeFile = deviceHandler_KunaiGC_closeFile, + .deleteFile = deviceHandler_KunaiGC_deleteFile, + .renameFile = deviceHandler_KunaiGC_renameFile, + .deinit = deviceHandler_KunaiGC_deinit, + .status = deviceHandler_KunaiGC_status, +}; diff --git a/cube/swiss/source/devices/kunaigc/deviceHandler-KunaiGC.h b/cube/swiss/source/devices/kunaigc/deviceHandler-KunaiGC.h new file mode 100755 index 00000000..c1d038fb --- /dev/null +++ b/cube/swiss/source/devices/kunaigc/deviceHandler-KunaiGC.h @@ -0,0 +1,15 @@ +/* deviceHandler-Qoob.h + - device interface for Qoob Flash + by emu_kidid + */ + + +#ifndef DEVICE_HANDLER_KUNAIGC_H +#define DEVICE_HANDLER_KUNAIGC_H + +#include "../deviceHandler.h" + +extern DEVICEHANDLER_INTERFACE __device_kunaigc; + +#endif + diff --git a/cube/swiss/source/devices/kunaigc/kunaigc.c b/cube/swiss/source/devices/kunaigc/kunaigc.c new file mode 100755 index 00000000..02f5762a --- /dev/null +++ b/cube/swiss/source/devices/kunaigc/kunaigc.c @@ -0,0 +1,142 @@ +/* + * kunaigc.c + * + * Created on: Jul 11, 2022 + * Author: mancloud + */ + + +#include "kunaigc.h" +#include + +//wait for "WIP" flag being unset +void kunai_wait() { + kunai_enable_passthrough(); + spiflash_wait(); + kunai_disable_passthrough(); +} + +void kunai_disable_passthrough(void) { + EXI_Deselect(EXI_CHANNEL_0); + EXI_Unlock(EXI_CHANNEL_0); +} + +void kunai_enable_passthrough(void) { + s32 retVal = 0; + uint8_t repetitions = 3; + do { + u32 addr = 0x80000000; //for passthrough we need to send one '1' and 31 '0' and afterwards whatever we want + EXI_Lock(EXI_CHANNEL_0, EXI_DEVICE_1, NULL); + EXI_Select(EXI_CHANNEL_0, EXI_DEVICE_1, EXI_SPEED32MHZ); + EXI_Imm(EXI_CHANNEL_0, &addr, 4, EXI_WRITE, NULL); + retVal = EXI_Sync(EXI_CHANNEL_0); + } while(retVal <= 0 && --repetitions); +} + + +uint32_t kunai_get_jedecID(void) { + uint32_t jedecID = 0; + kunai_reenable(); + kunai_enable_passthrough(); + jedecID = spiflash_jedec_id(); + kunai_disable_passthrough(); + kunai_disable(); + return jedecID; +} + +int kunai_read(const struct lfs_config *c, lfs_block_t block, lfs_off_t off, void *buffer, lfs_size_t size) { + int retVal = 0; + if(size) { + uint32_t * p_data = buffer; + kunai_reenable(); + kunai_enable_passthrough(); + spiflash_read_start_fast((block * c->block_size) + off + KUNAI_OFFS); + for(lfs_size_t i = size; i > 0; i-=(c->read_size)){ + *(p_data++) = spiflash_read_uint32(); + } + kunai_disable_passthrough(); + kunai_disable(); + } else { + retVal = LFS_ERR_IO; + } + + return retVal; +} + +int kunai_write(const struct lfs_config *c, lfs_block_t block, lfs_off_t off, const void *buffer, lfs_size_t size) { + int retVal = 0; + if(size) { + uint32_t * p_data = (uint32_t *) buffer; + kunai_reenable(); + + for(lfs_size_t i = size; i > 0; i -= c->prog_size) { + + kunai_enable_passthrough(); + spiflash_write_enable(); + kunai_disable_passthrough(); + + kunai_enable_passthrough(); + spiflash_cmd_addr_start(W25Q80BV_CMD_PAGE_PROG, (block * c->block_size) + KUNAI_OFFS + off); + for(uint8_t ii = 0; ii < (W25Q80BV_PAGE_SIZE/4); ii++) { + + spiflash_write_uint32(*p_data); + p_data++; + } + kunai_disable_passthrough(); + kunai_wait(); + off += c->prog_size; + } + + kunai_disable(); + } else { + retVal = LFS_ERR_IO; + } + return retVal; +} + +int kunai_erase(const struct lfs_config *c, lfs_block_t block) { + int retVal = 0; + kunai_reenable(); + kunai_sector_erase(block * c->block_size + KUNAI_OFFS); + kunai_disable(); + return retVal; +} + +int kunai_sync(const struct lfs_config *c) { return 0;} + +void kunai_disable(void) { + u32 addr = 0xc0000000; + u32 data = 6 << 24; + EXI_Lock(EXI_CHANNEL_0, EXI_DEVICE_1, NULL); + EXI_Select(EXI_CHANNEL_0, EXI_DEVICE_1, EXI_SPEED8MHZ); + EXI_Imm(EXI_CHANNEL_0, &addr, 4, EXI_WRITE, NULL); + EXI_Sync(EXI_CHANNEL_0); + EXI_Imm(EXI_CHANNEL_0, &data, 4, EXI_WRITE, NULL); + EXI_Sync(EXI_CHANNEL_0); + EXI_Deselect(EXI_CHANNEL_0); + EXI_Unlock(EXI_CHANNEL_0); +} + +void kunai_reenable(void) { + u32 addr = 0xc0000000; + u32 data = 1 << 24; + EXI_Lock(EXI_CHANNEL_0, EXI_DEVICE_1, NULL); + EXI_Select(EXI_CHANNEL_0, EXI_DEVICE_1, EXI_SPEED8MHZ); + EXI_Imm(EXI_CHANNEL_0, &addr, 4, EXI_WRITE, NULL); + EXI_Sync(EXI_CHANNEL_0); + EXI_Imm(EXI_CHANNEL_0, &data, 4, EXI_WRITE, NULL); + EXI_Sync(EXI_CHANNEL_0); + EXI_Deselect(EXI_CHANNEL_0); + EXI_Unlock(EXI_CHANNEL_0); +} + +void kunai_sector_erase(uint32_t addr) { + kunai_enable_passthrough(); + spiflash_write_enable(); + kunai_disable_passthrough(); + kunai_enable_passthrough(); + spiflash_cmd_addr_start(W25Q80BV_CMD_ERASE_4K, addr); + kunai_disable_passthrough(); + kunai_wait(); +} + diff --git a/cube/swiss/source/devices/kunaigc/kunaigc.h b/cube/swiss/source/devices/kunaigc/kunaigc.h new file mode 100755 index 00000000..ff530355 --- /dev/null +++ b/cube/swiss/source/devices/kunaigc/kunaigc.h @@ -0,0 +1,36 @@ +/* + * kunaigc.h + * + * Created on: Jul 11, 2022 + * Author: mancloud + */ + +#ifndef KUNAIGC_H_ +#define KUNAIGC_H_ + +#include +#include + + +#include "spiflash.h" +#include "lfs.h" + +#define KUNAI_OFFS (256*1024) //first 512KiB are reserver for loader + recovery + +void kunai_sector_erase(uint32_t addr); +void kunai_disable_passthrough(void); +void kunai_enable_passthrough(void); +uint32_t kunai_get_jedecID(void); +uint32_t kunai_read_32bit(uint32_t addr); + +int kunai_read(const struct lfs_config *c, lfs_block_t block, lfs_off_t off, void *buffer, lfs_size_t size); +int kunai_write(const struct lfs_config *c, lfs_block_t block, lfs_off_t off, const void *buffer, lfs_size_t size); +int kunai_erase(const struct lfs_config *c, lfs_block_t block); +int kunai_sync(const struct lfs_config *c); + +void kunai_write_32bit(uint32_t data, uint32_t addr); +int8_t kunai_write_page(uint32_t * data, uint32_t addr, bool verify); +void kunai_disable(void); +void kunai_reenable(void); + +#endif /* KUNAIGC_H_ */ diff --git a/cube/swiss/source/devices/kunaigc/lfs.c b/cube/swiss/source/devices/kunaigc/lfs.c new file mode 100755 index 00000000..117595e0 --- /dev/null +++ b/cube/swiss/source/devices/kunaigc/lfs.c @@ -0,0 +1,5819 @@ +/* + * The little filesystem + * + * Copyright (c) 2022, The littlefs authors. + * Copyright (c) 2017, Arm Limited. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + */ +#include "lfs.h" +#include "lfs_util.h" + + +// some constants used throughout the code +#define LFS_BLOCK_NULL ((lfs_block_t)-1) +#define LFS_BLOCK_INLINE ((lfs_block_t)-2) + +enum { + LFS_OK_RELOCATED = 1, + LFS_OK_DROPPED = 2, + LFS_OK_ORPHANED = 3, +}; + +enum { + LFS_CMP_EQ = 0, + LFS_CMP_LT = 1, + LFS_CMP_GT = 2, +}; + + +/// Caching block device operations /// + +static inline void lfs_cache_drop(lfs_t *lfs, lfs_cache_t *rcache) { + // do not zero, cheaper if cache is readonly or only going to be + // written with identical data (during relocates) + (void)lfs; + rcache->block = LFS_BLOCK_NULL; +} + +static inline void lfs_cache_zero(lfs_t *lfs, lfs_cache_t *pcache) { + // zero to avoid information leak + memset(pcache->buffer, 0xff, lfs->cfg->cache_size); + pcache->block = LFS_BLOCK_NULL; +} + +static int lfs_bd_read(lfs_t *lfs, + const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint, + lfs_block_t block, lfs_off_t off, + void *buffer, lfs_size_t size) { + uint8_t *data = buffer; + if (block >= lfs->cfg->block_count || + off+size > lfs->cfg->block_size) { + return LFS_ERR_CORRUPT; + } + + while (size > 0) { + lfs_size_t diff = size; + + if (pcache && block == pcache->block && + off < pcache->off + pcache->size) { + if (off >= pcache->off) { + // is already in pcache? + diff = lfs_min(diff, pcache->size - (off-pcache->off)); + memcpy(data, &pcache->buffer[off-pcache->off], diff); + + data += diff; + off += diff; + size -= diff; + continue; + } + + // pcache takes priority + diff = lfs_min(diff, pcache->off-off); + } + + if (block == rcache->block && + off < rcache->off + rcache->size) { + if (off >= rcache->off) { + // is already in rcache? + diff = lfs_min(diff, rcache->size - (off-rcache->off)); + memcpy(data, &rcache->buffer[off-rcache->off], diff); + + data += diff; + off += diff; + size -= diff; + continue; + } + + // rcache takes priority + diff = lfs_min(diff, rcache->off-off); + } + + if (size >= hint && off % lfs->cfg->read_size == 0 && + size >= lfs->cfg->read_size) { + // bypass cache? + diff = lfs_aligndown(diff, lfs->cfg->read_size); + int err = lfs->cfg->read(lfs->cfg, block, off, data, diff); + if (err) { + return err; + } + + data += diff; + off += diff; + size -= diff; + continue; + } + + // load to cache, first condition can no longer fail + LFS_ASSERT(block < lfs->cfg->block_count); + rcache->block = block; + rcache->off = lfs_aligndown(off, lfs->cfg->read_size); + rcache->size = lfs_min( + lfs_min( + lfs_alignup(off+hint, lfs->cfg->read_size), + lfs->cfg->block_size) + - rcache->off, + lfs->cfg->cache_size); + int err = lfs->cfg->read(lfs->cfg, rcache->block, + rcache->off, rcache->buffer, rcache->size); + LFS_ASSERT(err <= 0); + if (err) { + return err; + } + } + + return 0; +} + +static int lfs_bd_cmp(lfs_t *lfs, + const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint, + lfs_block_t block, lfs_off_t off, + const void *buffer, lfs_size_t size) { + const uint8_t *data = buffer; + lfs_size_t diff = 0; + + for (lfs_off_t i = 0; i < size; i += diff) { + uint8_t dat[8]; + + diff = lfs_min(size-i, sizeof(dat)); + int res = lfs_bd_read(lfs, + pcache, rcache, hint-i, + block, off+i, &dat, diff); + if (res) { + return res; + } + + res = memcmp(dat, data + i, diff); + if (res) { + return res < 0 ? LFS_CMP_LT : LFS_CMP_GT; + } + } + + return LFS_CMP_EQ; +} + +#ifndef LFS_READONLY +static int lfs_bd_flush(lfs_t *lfs, + lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) { + if (pcache->block != LFS_BLOCK_NULL && pcache->block != LFS_BLOCK_INLINE) { + LFS_ASSERT(pcache->block < lfs->cfg->block_count); + lfs_size_t diff = lfs_alignup(pcache->size, lfs->cfg->prog_size); + int err = lfs->cfg->prog(lfs->cfg, pcache->block, + pcache->off, pcache->buffer, diff); + LFS_ASSERT(err <= 0); + if (err) { + return err; + } + + if (validate) { + // check data on disk + lfs_cache_drop(lfs, rcache); + int res = lfs_bd_cmp(lfs, + NULL, rcache, diff, + pcache->block, pcache->off, pcache->buffer, diff); + if (res < 0) { + return res; + } + + if (res != LFS_CMP_EQ) { + return LFS_ERR_CORRUPT; + } + } + + lfs_cache_zero(lfs, pcache); + } + + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_bd_sync(lfs_t *lfs, + lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) { + lfs_cache_drop(lfs, rcache); + + int err = lfs_bd_flush(lfs, pcache, rcache, validate); + if (err) { + return err; + } + + err = lfs->cfg->sync(lfs->cfg); + LFS_ASSERT(err <= 0); + return err; +} +#endif + +#ifndef LFS_READONLY +static int lfs_bd_prog(lfs_t *lfs, + lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate, + lfs_block_t block, lfs_off_t off, + const void *buffer, lfs_size_t size) { + const uint8_t *data = buffer; + LFS_ASSERT(block == LFS_BLOCK_INLINE || block < lfs->cfg->block_count); + LFS_ASSERT(off + size <= lfs->cfg->block_size); + + while (size > 0) { + if (block == pcache->block && + off >= pcache->off && + off < pcache->off + lfs->cfg->cache_size) { + // already fits in pcache? + lfs_size_t diff = lfs_min(size, + lfs->cfg->cache_size - (off-pcache->off)); + memcpy(&pcache->buffer[off-pcache->off], data, diff); + + data += diff; + off += diff; + size -= diff; + + pcache->size = lfs_max(pcache->size, off - pcache->off); + if (pcache->size == lfs->cfg->cache_size) { + // eagerly flush out pcache if we fill up + int err = lfs_bd_flush(lfs, pcache, rcache, validate); + if (err) { + return err; + } + } + + continue; + } + + // pcache must have been flushed, either by programming and + // entire block or manually flushing the pcache + LFS_ASSERT(pcache->block == LFS_BLOCK_NULL); + + // prepare pcache, first condition can no longer fail + pcache->block = block; + pcache->off = lfs_aligndown(off, lfs->cfg->prog_size); + pcache->size = 0; + } + + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_bd_erase(lfs_t *lfs, lfs_block_t block) { + LFS_ASSERT(block < lfs->cfg->block_count); + int err = lfs->cfg->erase(lfs->cfg, block); + LFS_ASSERT(err <= 0); + return err; +} +#endif + + +/// Small type-level utilities /// +// operations on block pairs +static inline void lfs_pair_swap(lfs_block_t pair[2]) { + lfs_block_t t = pair[0]; + pair[0] = pair[1]; + pair[1] = t; +} + +static inline bool lfs_pair_isnull(const lfs_block_t pair[2]) { + return pair[0] == LFS_BLOCK_NULL || pair[1] == LFS_BLOCK_NULL; +} + +static inline int lfs_pair_cmp( + const lfs_block_t paira[2], + const lfs_block_t pairb[2]) { + return !(paira[0] == pairb[0] || paira[1] == pairb[1] || + paira[0] == pairb[1] || paira[1] == pairb[0]); +} + +#ifndef LFS_READONLY +static inline bool lfs_pair_sync( + const lfs_block_t paira[2], + const lfs_block_t pairb[2]) { + return (paira[0] == pairb[0] && paira[1] == pairb[1]) || + (paira[0] == pairb[1] && paira[1] == pairb[0]); +} +#endif + +static inline void lfs_pair_fromle32(lfs_block_t pair[2]) { + pair[0] = lfs_fromle32(pair[0]); + pair[1] = lfs_fromle32(pair[1]); +} + +#ifndef LFS_READONLY +static inline void lfs_pair_tole32(lfs_block_t pair[2]) { + pair[0] = lfs_tole32(pair[0]); + pair[1] = lfs_tole32(pair[1]); +} +#endif + +// operations on 32-bit entry tags +typedef uint32_t lfs_tag_t; +typedef int32_t lfs_stag_t; + +#define LFS_MKTAG(type, id, size) \ + (((lfs_tag_t)(type) << 20) | ((lfs_tag_t)(id) << 10) | (lfs_tag_t)(size)) + +#define LFS_MKTAG_IF(cond, type, id, size) \ + ((cond) ? LFS_MKTAG(type, id, size) : LFS_MKTAG(LFS_FROM_NOOP, 0, 0)) + +#define LFS_MKTAG_IF_ELSE(cond, type1, id1, size1, type2, id2, size2) \ + ((cond) ? LFS_MKTAG(type1, id1, size1) : LFS_MKTAG(type2, id2, size2)) + +static inline bool lfs_tag_isvalid(lfs_tag_t tag) { + return !(tag & 0x80000000); +} + +static inline bool lfs_tag_isdelete(lfs_tag_t tag) { + return ((int32_t)(tag << 22) >> 22) == -1; +} + +static inline uint16_t lfs_tag_type1(lfs_tag_t tag) { + return (tag & 0x70000000) >> 20; +} + +static inline uint16_t lfs_tag_type3(lfs_tag_t tag) { + return (tag & 0x7ff00000) >> 20; +} + +static inline uint8_t lfs_tag_chunk(lfs_tag_t tag) { + return (tag & 0x0ff00000) >> 20; +} + +static inline int8_t lfs_tag_splice(lfs_tag_t tag) { + return (int8_t)lfs_tag_chunk(tag); +} + +static inline uint16_t lfs_tag_id(lfs_tag_t tag) { + return (tag & 0x000ffc00) >> 10; +} + +static inline lfs_size_t lfs_tag_size(lfs_tag_t tag) { + return tag & 0x000003ff; +} + +static inline lfs_size_t lfs_tag_dsize(lfs_tag_t tag) { + return sizeof(tag) + lfs_tag_size(tag + lfs_tag_isdelete(tag)); +} + +// operations on attributes in attribute lists +struct lfs_mattr { + lfs_tag_t tag; + const void *buffer; +}; + +struct lfs_diskoff { + lfs_block_t block; + lfs_off_t off; +}; + +#define LFS_MKATTRS(...) \ + (struct lfs_mattr[]){__VA_ARGS__}, \ + sizeof((struct lfs_mattr[]){__VA_ARGS__}) / sizeof(struct lfs_mattr) + +// operations on global state +static inline void lfs_gstate_xor(lfs_gstate_t *a, const lfs_gstate_t *b) { + for (int i = 0; i < 3; i++) { + ((uint32_t*)a)[i] ^= ((const uint32_t*)b)[i]; + } +} + +static inline bool lfs_gstate_iszero(const lfs_gstate_t *a) { + for (int i = 0; i < 3; i++) { + if (((uint32_t*)a)[i] != 0) { + return false; + } + } + return true; +} + +#ifndef LFS_READONLY +static inline bool lfs_gstate_hasorphans(const lfs_gstate_t *a) { + return lfs_tag_size(a->tag); +} + +static inline uint8_t lfs_gstate_getorphans(const lfs_gstate_t *a) { + return lfs_tag_size(a->tag); +} + +static inline bool lfs_gstate_hasmove(const lfs_gstate_t *a) { + return lfs_tag_type1(a->tag); +} +#endif + +static inline bool lfs_gstate_hasmovehere(const lfs_gstate_t *a, + const lfs_block_t *pair) { + return lfs_tag_type1(a->tag) && lfs_pair_cmp(a->pair, pair) == 0; +} + +static inline void lfs_gstate_fromle32(lfs_gstate_t *a) { + a->tag = lfs_fromle32(a->tag); + a->pair[0] = lfs_fromle32(a->pair[0]); + a->pair[1] = lfs_fromle32(a->pair[1]); +} + +#ifndef LFS_READONLY +static inline void lfs_gstate_tole32(lfs_gstate_t *a) { + a->tag = lfs_tole32(a->tag); + a->pair[0] = lfs_tole32(a->pair[0]); + a->pair[1] = lfs_tole32(a->pair[1]); +} +#endif + +// other endianness operations +static void lfs_ctz_fromle32(struct lfs_ctz *ctz) { + ctz->head = lfs_fromle32(ctz->head); + ctz->size = lfs_fromle32(ctz->size); +} + +#ifndef LFS_READONLY +static void lfs_ctz_tole32(struct lfs_ctz *ctz) { + ctz->head = lfs_tole32(ctz->head); + ctz->size = lfs_tole32(ctz->size); +} +#endif + +static inline void lfs_superblock_fromle32(lfs_superblock_t *superblock) { + superblock->version = lfs_fromle32(superblock->version); + superblock->block_size = lfs_fromle32(superblock->block_size); + superblock->block_count = lfs_fromle32(superblock->block_count); + superblock->name_max = lfs_fromle32(superblock->name_max); + superblock->file_max = lfs_fromle32(superblock->file_max); + superblock->attr_max = lfs_fromle32(superblock->attr_max); +} + +#ifndef LFS_READONLY +static inline void lfs_superblock_tole32(lfs_superblock_t *superblock) { + superblock->version = lfs_tole32(superblock->version); + superblock->block_size = lfs_tole32(superblock->block_size); + superblock->block_count = lfs_tole32(superblock->block_count); + superblock->name_max = lfs_tole32(superblock->name_max); + superblock->file_max = lfs_tole32(superblock->file_max); + superblock->attr_max = lfs_tole32(superblock->attr_max); +} +#endif + +#ifndef LFS_NO_ASSERT +static bool lfs_mlist_isopen(struct lfs_mlist *head, + struct lfs_mlist *node) { + for (struct lfs_mlist **p = &head; *p; p = &(*p)->next) { + if (*p == (struct lfs_mlist*)node) { + return true; + } + } + + return false; +} +#endif + +static void lfs_mlist_remove(lfs_t *lfs, struct lfs_mlist *mlist) { + for (struct lfs_mlist **p = &lfs->mlist; *p; p = &(*p)->next) { + if (*p == mlist) { + *p = (*p)->next; + break; + } + } +} + +static void lfs_mlist_append(lfs_t *lfs, struct lfs_mlist *mlist) { + mlist->next = lfs->mlist; + lfs->mlist = mlist; +} + + +/// Internal operations predeclared here /// +#ifndef LFS_READONLY +static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir, + const struct lfs_mattr *attrs, int attrcount); +static int lfs_dir_compact(lfs_t *lfs, + lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount, + lfs_mdir_t *source, uint16_t begin, uint16_t end); +static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file, + const void *buffer, lfs_size_t size); +static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file, + const void *buffer, lfs_size_t size); +static int lfs_file_rawsync(lfs_t *lfs, lfs_file_t *file); +static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file); +static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file); + +static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss); +static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans); +static void lfs_fs_prepmove(lfs_t *lfs, + uint16_t id, const lfs_block_t pair[2]); +static int lfs_fs_pred(lfs_t *lfs, const lfs_block_t dir[2], + lfs_mdir_t *pdir); +static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t dir[2], + lfs_mdir_t *parent); +static int lfs_fs_forceconsistency(lfs_t *lfs); +#endif + +#ifdef LFS_MIGRATE +static int lfs1_traverse(lfs_t *lfs, + int (*cb)(void*, lfs_block_t), void *data); +#endif + +static int lfs_dir_rawrewind(lfs_t *lfs, lfs_dir_t *dir); + +static lfs_ssize_t lfs_file_flushedread(lfs_t *lfs, lfs_file_t *file, + void *buffer, lfs_size_t size); +static lfs_ssize_t lfs_file_rawread(lfs_t *lfs, lfs_file_t *file, + void *buffer, lfs_size_t size); +static int lfs_file_rawclose(lfs_t *lfs, lfs_file_t *file); +static lfs_soff_t lfs_file_rawsize(lfs_t *lfs, lfs_file_t *file); + +static lfs_ssize_t lfs_fs_rawsize(lfs_t *lfs); +static int lfs_fs_rawtraverse(lfs_t *lfs, + int (*cb)(void *data, lfs_block_t block), void *data, + bool includeorphans); + +static int lfs_deinit(lfs_t *lfs); +static int lfs_rawunmount(lfs_t *lfs); + + +/// Block allocator /// +#ifndef LFS_READONLY +static int lfs_alloc_lookahead(void *p, lfs_block_t block) { + lfs_t *lfs = (lfs_t*)p; + lfs_block_t off = ((block - lfs->free.off) + + lfs->cfg->block_count) % lfs->cfg->block_count; + + if (off < lfs->free.size) { + lfs->free.buffer[off / 32] |= 1U << (off % 32); + } + + return 0; +} +#endif + +// indicate allocated blocks have been committed into the filesystem, this +// is to prevent blocks from being garbage collected in the middle of a +// commit operation +static void lfs_alloc_ack(lfs_t *lfs) { + lfs->free.ack = lfs->cfg->block_count; +} + +// drop the lookahead buffer, this is done during mounting and failed +// traversals in order to avoid invalid lookahead state +static void lfs_alloc_drop(lfs_t *lfs) { + lfs->free.size = 0; + lfs->free.i = 0; + lfs_alloc_ack(lfs); +} + +#ifndef LFS_READONLY +static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) { + while (true) { + while (lfs->free.i != lfs->free.size) { + lfs_block_t off = lfs->free.i; + lfs->free.i += 1; + lfs->free.ack -= 1; + + if (!(lfs->free.buffer[off / 32] & (1U << (off % 32)))) { + // found a free block + *block = (lfs->free.off + off) % lfs->cfg->block_count; + + // eagerly find next off so an alloc ack can + // discredit old lookahead blocks + while (lfs->free.i != lfs->free.size && + (lfs->free.buffer[lfs->free.i / 32] + & (1U << (lfs->free.i % 32)))) { + lfs->free.i += 1; + lfs->free.ack -= 1; + } + + return 0; + } + } + + // check if we have looked at all blocks since last ack + if (lfs->free.ack == 0) { + LFS_ERROR("No more free space %"PRIu32, + lfs->free.i + lfs->free.off); + return LFS_ERR_NOSPC; + } + + lfs->free.off = (lfs->free.off + lfs->free.size) + % lfs->cfg->block_count; + lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size, lfs->free.ack); + lfs->free.i = 0; + + // find mask of free blocks from tree + memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size); + int err = lfs_fs_rawtraverse(lfs, lfs_alloc_lookahead, lfs, true); + if (err) { + lfs_alloc_drop(lfs); + return err; + } + } +} +#endif + +/// Metadata pair and directory operations /// +static lfs_stag_t lfs_dir_getslice(lfs_t *lfs, const lfs_mdir_t *dir, + lfs_tag_t gmask, lfs_tag_t gtag, + lfs_off_t goff, void *gbuffer, lfs_size_t gsize) { + lfs_off_t off = dir->off; + lfs_tag_t ntag = dir->etag; + lfs_stag_t gdiff = 0; + + if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair) && + lfs_tag_id(gmask) != 0 && + lfs_tag_id(lfs->gdisk.tag) <= lfs_tag_id(gtag)) { + // synthetic moves + gdiff -= LFS_MKTAG(0, 1, 0); + } + + // iterate over dir block backwards (for faster lookups) + while (off >= sizeof(lfs_tag_t) + lfs_tag_dsize(ntag)) { + off -= lfs_tag_dsize(ntag); + lfs_tag_t tag = ntag; + int err = lfs_bd_read(lfs, + NULL, &lfs->rcache, sizeof(ntag), + dir->pair[0], off, &ntag, sizeof(ntag)); + if (err) { + return err; + } + + ntag = (lfs_frombe32(ntag) ^ tag) & 0x7fffffff; + + if (lfs_tag_id(gmask) != 0 && + lfs_tag_type1(tag) == LFS_TYPE_SPLICE && + lfs_tag_id(tag) <= lfs_tag_id(gtag - gdiff)) { + if (tag == (LFS_MKTAG(LFS_TYPE_CREATE, 0, 0) | + (LFS_MKTAG(0, 0x3ff, 0) & (gtag - gdiff)))) { + // found where we were created + return LFS_ERR_NOENT; + } + + // move around splices + gdiff += LFS_MKTAG(0, lfs_tag_splice(tag), 0); + } + + if ((gmask & tag) == (gmask & (gtag - gdiff))) { + if (lfs_tag_isdelete(tag)) { + return LFS_ERR_NOENT; + } + + lfs_size_t diff = lfs_min(lfs_tag_size(tag), gsize); + err = lfs_bd_read(lfs, + NULL, &lfs->rcache, diff, + dir->pair[0], off+sizeof(tag)+goff, gbuffer, diff); + if (err) { + return err; + } + + memset((uint8_t*)gbuffer + diff, 0, gsize - diff); + + return tag + gdiff; + } + } + + return LFS_ERR_NOENT; +} + +static lfs_stag_t lfs_dir_get(lfs_t *lfs, const lfs_mdir_t *dir, + lfs_tag_t gmask, lfs_tag_t gtag, void *buffer) { + return lfs_dir_getslice(lfs, dir, + gmask, gtag, + 0, buffer, lfs_tag_size(gtag)); +} + +static int lfs_dir_getread(lfs_t *lfs, const lfs_mdir_t *dir, + const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint, + lfs_tag_t gmask, lfs_tag_t gtag, + lfs_off_t off, void *buffer, lfs_size_t size) { + uint8_t *data = buffer; + if (off+size > lfs->cfg->block_size) { + return LFS_ERR_CORRUPT; + } + + while (size > 0) { + lfs_size_t diff = size; + + if (pcache && pcache->block == LFS_BLOCK_INLINE && + off < pcache->off + pcache->size) { + if (off >= pcache->off) { + // is already in pcache? + diff = lfs_min(diff, pcache->size - (off-pcache->off)); + memcpy(data, &pcache->buffer[off-pcache->off], diff); + + data += diff; + off += diff; + size -= diff; + continue; + } + + // pcache takes priority + diff = lfs_min(diff, pcache->off-off); + } + + if (rcache->block == LFS_BLOCK_INLINE && + off < rcache->off + rcache->size) { + if (off >= rcache->off) { + // is already in rcache? + diff = lfs_min(diff, rcache->size - (off-rcache->off)); + memcpy(data, &rcache->buffer[off-rcache->off], diff); + + data += diff; + off += diff; + size -= diff; + continue; + } + + // rcache takes priority + diff = lfs_min(diff, rcache->off-off); + } + + // load to cache, first condition can no longer fail + rcache->block = LFS_BLOCK_INLINE; + rcache->off = lfs_aligndown(off, lfs->cfg->read_size); + rcache->size = lfs_min(lfs_alignup(off+hint, lfs->cfg->read_size), + lfs->cfg->cache_size); + int err = lfs_dir_getslice(lfs, dir, gmask, gtag, + rcache->off, rcache->buffer, rcache->size); + if (err < 0) { + return err; + } + } + + return 0; +} + +#ifndef LFS_READONLY +static int lfs_dir_traverse_filter(void *p, + lfs_tag_t tag, const void *buffer) { + lfs_tag_t *filtertag = p; + (void)buffer; + + // which mask depends on unique bit in tag structure + uint32_t mask = (tag & LFS_MKTAG(0x100, 0, 0)) + ? LFS_MKTAG(0x7ff, 0x3ff, 0) + : LFS_MKTAG(0x700, 0x3ff, 0); + + // check for redundancy + if ((mask & tag) == (mask & *filtertag) || + lfs_tag_isdelete(*filtertag) || + (LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) == ( + LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) | + (LFS_MKTAG(0, 0x3ff, 0) & *filtertag))) { + *filtertag = LFS_MKTAG(LFS_FROM_NOOP, 0, 0); + return true; + } + + // check if we need to adjust for created/deleted tags + if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE && + lfs_tag_id(tag) <= lfs_tag_id(*filtertag)) { + *filtertag += LFS_MKTAG(0, lfs_tag_splice(tag), 0); + } + + return false; +} +#endif + +#ifndef LFS_READONLY +// maximum recursive depth of lfs_dir_traverse, the deepest call: +// +// traverse with commit +// '-> traverse with move +// '-> traverse with filter +// +#define LFS_DIR_TRAVERSE_DEPTH 3 + +struct lfs_dir_traverse { + const lfs_mdir_t *dir; + lfs_off_t off; + lfs_tag_t ptag; + const struct lfs_mattr *attrs; + int attrcount; + + lfs_tag_t tmask; + lfs_tag_t ttag; + uint16_t begin; + uint16_t end; + int16_t diff; + + int (*cb)(void *data, lfs_tag_t tag, const void *buffer); + void *data; + + lfs_tag_t tag; + const void *buffer; + struct lfs_diskoff disk; +}; + +static int lfs_dir_traverse(lfs_t *lfs, + const lfs_mdir_t *dir, lfs_off_t off, lfs_tag_t ptag, + const struct lfs_mattr *attrs, int attrcount, + lfs_tag_t tmask, lfs_tag_t ttag, + uint16_t begin, uint16_t end, int16_t diff, + int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) { + // This function in inherently recursive, but bounded. To allow tool-based + // analysis without unnecessary code-cost we use an explicit stack + struct lfs_dir_traverse stack[LFS_DIR_TRAVERSE_DEPTH-1]; + unsigned sp = 0; + int res; + + // iterate over directory and attrs + lfs_tag_t tag; + const void *buffer; + struct lfs_diskoff disk; + while (true) { + { + if (off+lfs_tag_dsize(ptag) < dir->off) { + off += lfs_tag_dsize(ptag); + int err = lfs_bd_read(lfs, + NULL, &lfs->rcache, sizeof(tag), + dir->pair[0], off, &tag, sizeof(tag)); + if (err) { + return err; + } + + tag = (lfs_frombe32(tag) ^ ptag) | 0x80000000; + disk.block = dir->pair[0]; + disk.off = off+sizeof(lfs_tag_t); + buffer = &disk; + ptag = tag; + } else if (attrcount > 0) { + tag = attrs[0].tag; + buffer = attrs[0].buffer; + attrs += 1; + attrcount -= 1; + } else { + // finished traversal, pop from stack? + res = 0; + break; + } + + // do we need to filter? + lfs_tag_t mask = LFS_MKTAG(0x7ff, 0, 0); + if ((mask & tmask & tag) != (mask & tmask & ttag)) { + continue; + } + + if (lfs_tag_id(tmask) != 0) { + LFS_ASSERT(sp < LFS_DIR_TRAVERSE_DEPTH); + // recurse, scan for duplicates, and update tag based on + // creates/deletes + stack[sp] = (struct lfs_dir_traverse){ + .dir = dir, + .off = off, + .ptag = ptag, + .attrs = attrs, + .attrcount = attrcount, + .tmask = tmask, + .ttag = ttag, + .begin = begin, + .end = end, + .diff = diff, + .cb = cb, + .data = data, + .tag = tag, + .buffer = buffer, + .disk = disk, + }; + sp += 1; + + dir = dir; + off = off; + ptag = ptag; + attrs = attrs; + attrcount = attrcount; + tmask = 0; + ttag = 0; + begin = 0; + end = 0; + diff = 0; + cb = lfs_dir_traverse_filter; + data = &stack[sp-1].tag; + continue; + } + } + +popped: + // in filter range? + if (lfs_tag_id(tmask) != 0 && + !(lfs_tag_id(tag) >= begin && lfs_tag_id(tag) < end)) { + continue; + } + + // handle special cases for mcu-side operations + if (lfs_tag_type3(tag) == LFS_FROM_NOOP) { + // do nothing + } else if (lfs_tag_type3(tag) == LFS_FROM_MOVE) { + // Without this condition, lfs_dir_traverse can exhibit an + // extremely expensive O(n^3) of nested loops when renaming. + // This happens because lfs_dir_traverse tries to filter tags by + // the tags in the source directory, triggering a second + // lfs_dir_traverse with its own filter operation. + // + // traverse with commit + // '-> traverse with filter + // '-> traverse with move + // '-> traverse with filter + // + // However we don't actually care about filtering the second set of + // tags, since duplicate tags have no effect when filtering. + // + // This check skips this unnecessary recursive filtering explicitly, + // reducing this runtime from O(n^3) to O(n^2). + if (cb == lfs_dir_traverse_filter) { + continue; + } + + // recurse into move + stack[sp] = (struct lfs_dir_traverse){ + .dir = dir, + .off = off, + .ptag = ptag, + .attrs = attrs, + .attrcount = attrcount, + .tmask = tmask, + .ttag = ttag, + .begin = begin, + .end = end, + .diff = diff, + .cb = cb, + .data = data, + .tag = LFS_MKTAG(LFS_FROM_NOOP, 0, 0), + }; + sp += 1; + + uint16_t fromid = lfs_tag_size(tag); + uint16_t toid = lfs_tag_id(tag); + dir = buffer; + off = 0; + ptag = 0xffffffff; + attrs = NULL; + attrcount = 0; + tmask = LFS_MKTAG(0x600, 0x3ff, 0); + ttag = LFS_MKTAG(LFS_TYPE_STRUCT, 0, 0); + begin = fromid; + end = fromid+1; + diff = toid-fromid+diff; + } else if (lfs_tag_type3(tag) == LFS_FROM_USERATTRS) { + for (unsigned i = 0; i < lfs_tag_size(tag); i++) { + const struct lfs_attr *a = buffer; + res = cb(data, LFS_MKTAG(LFS_TYPE_USERATTR + a[i].type, + lfs_tag_id(tag) + diff, a[i].size), a[i].buffer); + if (res < 0) { + return res; + } + + if (res) { + break; + } + } + } else { + res = cb(data, tag + LFS_MKTAG(0, diff, 0), buffer); + if (res < 0) { + return res; + } + + if (res) { + break; + } + } + } + + if (sp > 0) { + // pop from the stack and return, fortunately all pops share + // a destination + dir = stack[sp-1].dir; + off = stack[sp-1].off; + ptag = stack[sp-1].ptag; + attrs = stack[sp-1].attrs; + attrcount = stack[sp-1].attrcount; + tmask = stack[sp-1].tmask; + ttag = stack[sp-1].ttag; + begin = stack[sp-1].begin; + end = stack[sp-1].end; + diff = stack[sp-1].diff; + cb = stack[sp-1].cb; + data = stack[sp-1].data; + tag = stack[sp-1].tag; + buffer = stack[sp-1].buffer; + disk = stack[sp-1].disk; + sp -= 1; + goto popped; + } else { + return res; + } +} +#endif + +static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs, + lfs_mdir_t *dir, const lfs_block_t pair[2], + lfs_tag_t fmask, lfs_tag_t ftag, uint16_t *id, + int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) { + // we can find tag very efficiently during a fetch, since we're already + // scanning the entire directory + lfs_stag_t besttag = -1; + + // if either block address is invalid we return LFS_ERR_CORRUPT here, + // otherwise later writes to the pair could fail + if (pair[0] >= lfs->cfg->block_count || pair[1] >= lfs->cfg->block_count) { + return LFS_ERR_CORRUPT; + } + + // find the block with the most recent revision + uint32_t revs[2] = {0, 0}; + int r = 0; + for (int i = 0; i < 2; i++) { + int err = lfs_bd_read(lfs, + NULL, &lfs->rcache, sizeof(revs[i]), + pair[i], 0, &revs[i], sizeof(revs[i])); + revs[i] = lfs_fromle32(revs[i]); + if (err && err != LFS_ERR_CORRUPT) { + return err; + } + + if (err != LFS_ERR_CORRUPT && + lfs_scmp(revs[i], revs[(i+1)%2]) > 0) { + r = i; + } + } + + dir->pair[0] = pair[(r+0)%2]; + dir->pair[1] = pair[(r+1)%2]; + dir->rev = revs[(r+0)%2]; + dir->off = 0; // nonzero = found some commits + + // now scan tags to fetch the actual dir and find possible match + for (int i = 0; i < 2; i++) { + lfs_off_t off = 0; + lfs_tag_t ptag = 0xffffffff; + + uint16_t tempcount = 0; + lfs_block_t temptail[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL}; + bool tempsplit = false; + lfs_stag_t tempbesttag = besttag; + + dir->rev = lfs_tole32(dir->rev); + uint32_t crc = lfs_crc(0xffffffff, &dir->rev, sizeof(dir->rev)); + dir->rev = lfs_fromle32(dir->rev); + + while (true) { + // extract next tag + lfs_tag_t tag; + off += lfs_tag_dsize(ptag); + int err = lfs_bd_read(lfs, + NULL, &lfs->rcache, lfs->cfg->block_size, + dir->pair[0], off, &tag, sizeof(tag)); + if (err) { + if (err == LFS_ERR_CORRUPT) { + // can't continue? + dir->erased = false; + break; + } + return err; + } + + crc = lfs_crc(crc, &tag, sizeof(tag)); + tag = lfs_frombe32(tag) ^ ptag; + + // next commit not yet programmed or we're not in valid range + if (!lfs_tag_isvalid(tag)) { + dir->erased = (lfs_tag_type1(ptag) == LFS_TYPE_CRC && + dir->off % lfs->cfg->prog_size == 0); + break; + } else if (off + lfs_tag_dsize(tag) > lfs->cfg->block_size) { + dir->erased = false; + break; + } + + ptag = tag; + + if (lfs_tag_type1(tag) == LFS_TYPE_CRC) { + // check the crc attr + uint32_t dcrc; + err = lfs_bd_read(lfs, + NULL, &lfs->rcache, lfs->cfg->block_size, + dir->pair[0], off+sizeof(tag), &dcrc, sizeof(dcrc)); + if (err) { + if (err == LFS_ERR_CORRUPT) { + dir->erased = false; + break; + } + return err; + } + dcrc = lfs_fromle32(dcrc); + + if (crc != dcrc) { + dir->erased = false; + break; + } + + // reset the next bit if we need to + ptag ^= (lfs_tag_t)(lfs_tag_chunk(tag) & 1U) << 31; + + // toss our crc into the filesystem seed for + // pseudorandom numbers, note we use another crc here + // as a collection function because it is sufficiently + // random and convenient + lfs->seed = lfs_crc(lfs->seed, &crc, sizeof(crc)); + + // update with what's found so far + besttag = tempbesttag; + dir->off = off + lfs_tag_dsize(tag); + dir->etag = ptag; + dir->count = tempcount; + dir->tail[0] = temptail[0]; + dir->tail[1] = temptail[1]; + dir->split = tempsplit; + + // reset crc + crc = 0xffffffff; + continue; + } + + // crc the entry first, hopefully leaving it in the cache + for (lfs_off_t j = sizeof(tag); j < lfs_tag_dsize(tag); j++) { + uint8_t dat; + err = lfs_bd_read(lfs, + NULL, &lfs->rcache, lfs->cfg->block_size, + dir->pair[0], off+j, &dat, 1); + if (err) { + if (err == LFS_ERR_CORRUPT) { + dir->erased = false; + break; + } + return err; + } + + crc = lfs_crc(crc, &dat, 1); + } + + // directory modification tags? + if (lfs_tag_type1(tag) == LFS_TYPE_NAME) { + // increase count of files if necessary + if (lfs_tag_id(tag) >= tempcount) { + tempcount = lfs_tag_id(tag) + 1; + } + } else if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE) { + tempcount += lfs_tag_splice(tag); + + if (tag == (LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) | + (LFS_MKTAG(0, 0x3ff, 0) & tempbesttag))) { + tempbesttag |= 0x80000000; + } else if (tempbesttag != -1 && + lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) { + tempbesttag += LFS_MKTAG(0, lfs_tag_splice(tag), 0); + } + } else if (lfs_tag_type1(tag) == LFS_TYPE_TAIL) { + tempsplit = (lfs_tag_chunk(tag) & 1); + + err = lfs_bd_read(lfs, + NULL, &lfs->rcache, lfs->cfg->block_size, + dir->pair[0], off+sizeof(tag), &temptail, 8); + if (err) { + if (err == LFS_ERR_CORRUPT) { + dir->erased = false; + break; + } + } + lfs_pair_fromle32(temptail); + } + + // found a match for our fetcher? + if ((fmask & tag) == (fmask & ftag)) { + int res = cb(data, tag, &(struct lfs_diskoff){ + dir->pair[0], off+sizeof(tag)}); + if (res < 0) { + if (res == LFS_ERR_CORRUPT) { + dir->erased = false; + break; + } + return res; + } + + if (res == LFS_CMP_EQ) { + // found a match + tempbesttag = tag; + } else if ((LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) == + (LFS_MKTAG(0x7ff, 0x3ff, 0) & tempbesttag)) { + // found an identical tag, but contents didn't match + // this must mean that our besttag has been overwritten + tempbesttag = -1; + } else if (res == LFS_CMP_GT && + lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) { + // found a greater match, keep track to keep things sorted + tempbesttag = tag | 0x80000000; + } + } + } + + // consider what we have good enough + if (dir->off > 0) { + // synthetic move + if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair)) { + if (lfs_tag_id(lfs->gdisk.tag) == lfs_tag_id(besttag)) { + besttag |= 0x80000000; + } else if (besttag != -1 && + lfs_tag_id(lfs->gdisk.tag) < lfs_tag_id(besttag)) { + besttag -= LFS_MKTAG(0, 1, 0); + } + } + + // found tag? or found best id? + if (id) { + *id = lfs_min(lfs_tag_id(besttag), dir->count); + } + + if (lfs_tag_isvalid(besttag)) { + return besttag; + } else if (lfs_tag_id(besttag) < dir->count) { + return LFS_ERR_NOENT; + } else { + return 0; + } + } + + // failed, try the other block? + lfs_pair_swap(dir->pair); + dir->rev = revs[(r+1)%2]; + } + + LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}", + dir->pair[0], dir->pair[1]); + return LFS_ERR_CORRUPT; +} + +static int lfs_dir_fetch(lfs_t *lfs, + lfs_mdir_t *dir, const lfs_block_t pair[2]) { + // note, mask=-1, tag=-1 can never match a tag since this + // pattern has the invalid bit set + return (int)lfs_dir_fetchmatch(lfs, dir, pair, + (lfs_tag_t)-1, (lfs_tag_t)-1, NULL, NULL, NULL); +} + +static int lfs_dir_getgstate(lfs_t *lfs, const lfs_mdir_t *dir, + lfs_gstate_t *gstate) { + lfs_gstate_t temp; + lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x7ff, 0, 0), + LFS_MKTAG(LFS_TYPE_MOVESTATE, 0, sizeof(temp)), &temp); + if (res < 0 && res != LFS_ERR_NOENT) { + return res; + } + + if (res != LFS_ERR_NOENT) { + // xor together to find resulting gstate + lfs_gstate_fromle32(&temp); + lfs_gstate_xor(gstate, &temp); + } + + return 0; +} + +static int lfs_dir_getinfo(lfs_t *lfs, lfs_mdir_t *dir, + uint16_t id, struct lfs_info *info) { + if (id == 0x3ff) { + // special case for root + strcpy(info->name, "/"); + info->type = LFS_TYPE_DIR; + return 0; + } + + lfs_stag_t tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x780, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_NAME, id, lfs->name_max+1), info->name); + if (tag < 0) { + return (int)tag; + } + + info->type = lfs_tag_type3(tag); + + struct lfs_ctz ctz; + tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz); + if (tag < 0) { + return (int)tag; + } + lfs_ctz_fromle32(&ctz); + + if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) { + info->size = ctz.size; + } else if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) { + info->size = lfs_tag_size(tag); + } + + return 0; +} + +struct lfs_dir_find_match { + lfs_t *lfs; + const void *name; + lfs_size_t size; +}; + +static int lfs_dir_find_match(void *data, + lfs_tag_t tag, const void *buffer) { + struct lfs_dir_find_match *name = data; + lfs_t *lfs = name->lfs; + const struct lfs_diskoff *disk = buffer; + + // compare with disk + lfs_size_t diff = lfs_min(name->size, lfs_tag_size(tag)); + int res = lfs_bd_cmp(lfs, + NULL, &lfs->rcache, diff, + disk->block, disk->off, name->name, diff); + if (res != LFS_CMP_EQ) { + return res; + } + + // only equal if our size is still the same + if (name->size != lfs_tag_size(tag)) { + return (name->size < lfs_tag_size(tag)) ? LFS_CMP_LT : LFS_CMP_GT; + } + + // found a match! + return LFS_CMP_EQ; +} + +static lfs_stag_t lfs_dir_find(lfs_t *lfs, lfs_mdir_t *dir, + const char **path, uint16_t *id) { + // we reduce path to a single name if we can find it + const char *name = *path; + if (id) { + *id = 0x3ff; + } + + // default to root dir + lfs_stag_t tag = LFS_MKTAG(LFS_TYPE_DIR, 0x3ff, 0); + dir->tail[0] = lfs->root[0]; + dir->tail[1] = lfs->root[1]; + + while (true) { +nextname: + // skip slashes + name += strspn(name, "/"); + lfs_size_t namelen = strcspn(name, "/"); + + // skip '.' and root '..' + if ((namelen == 1 && memcmp(name, ".", 1) == 0) || + (namelen == 2 && memcmp(name, "..", 2) == 0)) { + name += namelen; + goto nextname; + } + + // skip if matched by '..' in name + const char *suffix = name + namelen; + lfs_size_t sufflen; + int depth = 1; + while (true) { + suffix += strspn(suffix, "/"); + sufflen = strcspn(suffix, "/"); + if (sufflen == 0) { + break; + } + + if (sufflen == 2 && memcmp(suffix, "..", 2) == 0) { + depth -= 1; + if (depth == 0) { + name = suffix + sufflen; + goto nextname; + } + } else { + depth += 1; + } + + suffix += sufflen; + } + + // found path + if (name[0] == '\0') { + return tag; + } + + // update what we've found so far + *path = name; + + // only continue if we hit a directory + if (lfs_tag_type3(tag) != LFS_TYPE_DIR) { + return LFS_ERR_NOTDIR; + } + + // grab the entry data + if (lfs_tag_id(tag) != 0x3ff) { + lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), dir->tail); + if (res < 0) { + return res; + } + lfs_pair_fromle32(dir->tail); + } + + // find entry matching name + while (true) { + tag = lfs_dir_fetchmatch(lfs, dir, dir->tail, + LFS_MKTAG(0x780, 0, 0), + LFS_MKTAG(LFS_TYPE_NAME, 0, namelen), + // are we last name? + (strchr(name, '/') == NULL) ? id : NULL, + lfs_dir_find_match, &(struct lfs_dir_find_match){ + lfs, name, namelen}); + if (tag < 0) { + return tag; + } + + if (tag) { + break; + } + + if (!dir->split) { + return LFS_ERR_NOENT; + } + } + + // to next name + name += namelen; + } +} + +// commit logic +struct lfs_commit { + lfs_block_t block; + lfs_off_t off; + lfs_tag_t ptag; + uint32_t crc; + + lfs_off_t begin; + lfs_off_t end; +}; + +#ifndef LFS_READONLY +static int lfs_dir_commitprog(lfs_t *lfs, struct lfs_commit *commit, + const void *buffer, lfs_size_t size) { + int err = lfs_bd_prog(lfs, + &lfs->pcache, &lfs->rcache, false, + commit->block, commit->off , + (const uint8_t*)buffer, size); + if (err) { + return err; + } + + commit->crc = lfs_crc(commit->crc, buffer, size); + commit->off += size; + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_dir_commitattr(lfs_t *lfs, struct lfs_commit *commit, + lfs_tag_t tag, const void *buffer) { + // check if we fit + lfs_size_t dsize = lfs_tag_dsize(tag); + if (commit->off + dsize > commit->end) { + return LFS_ERR_NOSPC; + } + + // write out tag + lfs_tag_t ntag = lfs_tobe32((tag & 0x7fffffff) ^ commit->ptag); + int err = lfs_dir_commitprog(lfs, commit, &ntag, sizeof(ntag)); + if (err) { + return err; + } + + if (!(tag & 0x80000000)) { + // from memory + err = lfs_dir_commitprog(lfs, commit, buffer, dsize-sizeof(tag)); + if (err) { + return err; + } + } else { + // from disk + const struct lfs_diskoff *disk = buffer; + for (lfs_off_t i = 0; i < dsize-sizeof(tag); i++) { + // rely on caching to make this efficient + uint8_t dat; + err = lfs_bd_read(lfs, + NULL, &lfs->rcache, dsize-sizeof(tag)-i, + disk->block, disk->off+i, &dat, 1); + if (err) { + return err; + } + + err = lfs_dir_commitprog(lfs, commit, &dat, 1); + if (err) { + return err; + } + } + } + + commit->ptag = tag & 0x7fffffff; + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) { + // align to program units + const lfs_off_t end = lfs_alignup(commit->off + 2*sizeof(uint32_t), + lfs->cfg->prog_size); + + lfs_off_t off1 = 0; + uint32_t crc1 = 0; + + // create crc tags to fill up remainder of commit, note that + // padding is not crced, which lets fetches skip padding but + // makes committing a bit more complicated + while (commit->off < end) { + lfs_off_t off = commit->off + sizeof(lfs_tag_t); + lfs_off_t noff = lfs_min(end - off, 0x3fe) + off; + if (noff < end) { + noff = lfs_min(noff, end - 2*sizeof(uint32_t)); + } + + // read erased state from next program unit + lfs_tag_t tag = 0xffffffff; + int err = lfs_bd_read(lfs, + NULL, &lfs->rcache, sizeof(tag), + commit->block, noff, &tag, sizeof(tag)); + if (err && err != LFS_ERR_CORRUPT) { + return err; + } + + // build crc tag + bool reset = ~lfs_frombe32(tag) >> 31; + tag = LFS_MKTAG(LFS_TYPE_CRC + reset, 0x3ff, noff - off); + + // write out crc + uint32_t footer[2]; + footer[0] = lfs_tobe32(tag ^ commit->ptag); + commit->crc = lfs_crc(commit->crc, &footer[0], sizeof(footer[0])); + footer[1] = lfs_tole32(commit->crc); + err = lfs_bd_prog(lfs, + &lfs->pcache, &lfs->rcache, false, + commit->block, commit->off, &footer, sizeof(footer)); + if (err) { + return err; + } + + // keep track of non-padding checksum to verify + if (off1 == 0) { + off1 = commit->off + sizeof(uint32_t); + crc1 = commit->crc; + } + + commit->off += sizeof(tag)+lfs_tag_size(tag); + commit->ptag = tag ^ ((lfs_tag_t)reset << 31); + commit->crc = 0xffffffff; // reset crc for next "commit" + } + + // flush buffers + int err = lfs_bd_sync(lfs, &lfs->pcache, &lfs->rcache, false); + if (err) { + return err; + } + + // successful commit, check checksums to make sure + lfs_off_t off = commit->begin; + lfs_off_t noff = off1; + while (off < end) { + uint32_t crc = 0xffffffff; + for (lfs_off_t i = off; i < noff+sizeof(uint32_t); i++) { + // check against written crc, may catch blocks that + // become readonly and match our commit size exactly + if (i == off1 && crc != crc1) { + return LFS_ERR_CORRUPT; + } + + // leave it up to caching to make this efficient + uint8_t dat; + err = lfs_bd_read(lfs, + NULL, &lfs->rcache, noff+sizeof(uint32_t)-i, + commit->block, i, &dat, 1); + if (err) { + return err; + } + + crc = lfs_crc(crc, &dat, 1); + } + + // detected write error? + if (crc != 0) { + return LFS_ERR_CORRUPT; + } + + // skip padding + off = lfs_min(end - noff, 0x3fe) + noff; + if (off < end) { + off = lfs_min(off, end - 2*sizeof(uint32_t)); + } + noff = off + sizeof(uint32_t); + } + + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_dir_alloc(lfs_t *lfs, lfs_mdir_t *dir) { + // allocate pair of dir blocks (backwards, so we write block 1 first) + for (int i = 0; i < 2; i++) { + int err = lfs_alloc(lfs, &dir->pair[(i+1)%2]); + if (err) { + return err; + } + } + + // zero for reproducibility in case initial block is unreadable + dir->rev = 0; + + // rather than clobbering one of the blocks we just pretend + // the revision may be valid + int err = lfs_bd_read(lfs, + NULL, &lfs->rcache, sizeof(dir->rev), + dir->pair[0], 0, &dir->rev, sizeof(dir->rev)); + dir->rev = lfs_fromle32(dir->rev); + if (err && err != LFS_ERR_CORRUPT) { + return err; + } + + // to make sure we don't immediately evict, align the new revision count + // to our block_cycles modulus, see lfs_dir_compact for why our modulus + // is tweaked this way + if (lfs->cfg->block_cycles > 0) { + dir->rev = lfs_alignup(dir->rev, ((lfs->cfg->block_cycles+1)|1)); + } + + // set defaults + dir->off = sizeof(dir->rev); + dir->etag = 0xffffffff; + dir->count = 0; + dir->tail[0] = LFS_BLOCK_NULL; + dir->tail[1] = LFS_BLOCK_NULL; + dir->erased = false; + dir->split = false; + + // don't write out yet, let caller take care of that + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_dir_drop(lfs_t *lfs, lfs_mdir_t *dir, lfs_mdir_t *tail) { + // steal state + int err = lfs_dir_getgstate(lfs, tail, &lfs->gdelta); + if (err) { + return err; + } + + // steal tail + lfs_pair_tole32(tail->tail); + err = lfs_dir_commit(lfs, dir, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_TAIL + tail->split, 0x3ff, 8), tail->tail})); + lfs_pair_fromle32(tail->tail); + if (err) { + return err; + } + + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_dir_split(lfs_t *lfs, + lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount, + lfs_mdir_t *source, uint16_t split, uint16_t end) { + // create tail metadata pair + lfs_mdir_t tail; + int err = lfs_dir_alloc(lfs, &tail); + if (err) { + return err; + } + + tail.split = dir->split; + tail.tail[0] = dir->tail[0]; + tail.tail[1] = dir->tail[1]; + + // note we don't care about LFS_OK_RELOCATED + int res = lfs_dir_compact(lfs, &tail, attrs, attrcount, source, split, end); + if (res < 0) { + return res; + } + + dir->tail[0] = tail.pair[0]; + dir->tail[1] = tail.pair[1]; + dir->split = true; + + // update root if needed + if (lfs_pair_cmp(dir->pair, lfs->root) == 0 && split == 0) { + lfs->root[0] = tail.pair[0]; + lfs->root[1] = tail.pair[1]; + } + + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_dir_commit_size(void *p, lfs_tag_t tag, const void *buffer) { + lfs_size_t *size = p; + (void)buffer; + + *size += lfs_tag_dsize(tag); + return 0; +} +#endif + +#ifndef LFS_READONLY +struct lfs_dir_commit_commit { + lfs_t *lfs; + struct lfs_commit *commit; +}; +#endif + +#ifndef LFS_READONLY +static int lfs_dir_commit_commit(void *p, lfs_tag_t tag, const void *buffer) { + struct lfs_dir_commit_commit *commit = p; + return lfs_dir_commitattr(commit->lfs, commit->commit, tag, buffer); +} +#endif + +#ifndef LFS_READONLY +static bool lfs_dir_needsrelocation(lfs_t *lfs, lfs_mdir_t *dir) { + // If our revision count == n * block_cycles, we should force a relocation, + // this is how littlefs wear-levels at the metadata-pair level. Note that we + // actually use (block_cycles+1)|1, this is to avoid two corner cases: + // 1. block_cycles = 1, which would prevent relocations from terminating + // 2. block_cycles = 2n, which, due to aliasing, would only ever relocate + // one metadata block in the pair, effectively making this useless + return (lfs->cfg->block_cycles > 0 + && ((dir->rev + 1) % ((lfs->cfg->block_cycles+1)|1) == 0)); +} +#endif + +#ifndef LFS_READONLY +static int lfs_dir_compact(lfs_t *lfs, + lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount, + lfs_mdir_t *source, uint16_t begin, uint16_t end) { + // save some state in case block is bad + bool relocated = false; + bool tired = lfs_dir_needsrelocation(lfs, dir); + + // increment revision count + dir->rev += 1; + + // do not proactively relocate blocks during migrations, this + // can cause a number of failure states such: clobbering the + // v1 superblock if we relocate root, and invalidating directory + // pointers if we relocate the head of a directory. On top of + // this, relocations increase the overall complexity of + // lfs_migration, which is already a delicate operation. +#ifdef LFS_MIGRATE + if (lfs->lfs1) { + tired = false; + } +#endif + + if (tired && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) != 0) { + // we're writing too much, time to relocate + goto relocate; + } + + // begin loop to commit compaction to blocks until a compact sticks + while (true) { + { + // setup commit state + struct lfs_commit commit = { + .block = dir->pair[1], + .off = 0, + .ptag = 0xffffffff, + .crc = 0xffffffff, + + .begin = 0, + .end = (lfs->cfg->metadata_max ? + lfs->cfg->metadata_max : lfs->cfg->block_size) - 8, + }; + + // erase block to write to + int err = lfs_bd_erase(lfs, dir->pair[1]); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + + // write out header + dir->rev = lfs_tole32(dir->rev); + err = lfs_dir_commitprog(lfs, &commit, + &dir->rev, sizeof(dir->rev)); + dir->rev = lfs_fromle32(dir->rev); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + + // traverse the directory, this time writing out all unique tags + err = lfs_dir_traverse(lfs, + source, 0, 0xffffffff, attrs, attrcount, + LFS_MKTAG(0x400, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_NAME, 0, 0), + begin, end, -begin, + lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){ + lfs, &commit}); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + + // commit tail, which may be new after last size check + if (!lfs_pair_isnull(dir->tail)) { + lfs_pair_tole32(dir->tail); + err = lfs_dir_commitattr(lfs, &commit, + LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8), + dir->tail); + lfs_pair_fromle32(dir->tail); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + } + + // bring over gstate? + lfs_gstate_t delta = {0}; + if (!relocated) { + lfs_gstate_xor(&delta, &lfs->gdisk); + lfs_gstate_xor(&delta, &lfs->gstate); + } + lfs_gstate_xor(&delta, &lfs->gdelta); + delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff); + + err = lfs_dir_getgstate(lfs, dir, &delta); + if (err) { + return err; + } + + if (!lfs_gstate_iszero(&delta)) { + lfs_gstate_tole32(&delta); + err = lfs_dir_commitattr(lfs, &commit, + LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff, + sizeof(delta)), &delta); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + } + + // complete commit with crc + err = lfs_dir_commitcrc(lfs, &commit); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + + // successful compaction, swap dir pair to indicate most recent + LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0); + lfs_pair_swap(dir->pair); + dir->count = end - begin; + dir->off = commit.off; + dir->etag = commit.ptag; + // update gstate + lfs->gdelta = (lfs_gstate_t){0}; + if (!relocated) { + lfs->gdisk = lfs->gstate; + } + } + break; + +relocate: + // commit was corrupted, drop caches and prepare to relocate block + relocated = true; + lfs_cache_drop(lfs, &lfs->pcache); + if (!tired) { + LFS_DEBUG("Bad block at 0x%"PRIx32, dir->pair[1]); + } + + // can't relocate superblock, filesystem is now frozen + if (lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) { + LFS_WARN("Superblock 0x%"PRIx32" has become unwritable", + dir->pair[1]); + return LFS_ERR_NOSPC; + } + + // relocate half of pair + int err = lfs_alloc(lfs, &dir->pair[1]); + if (err && (err != LFS_ERR_NOSPC || !tired)) { + return err; + } + + tired = false; + continue; + } + + return relocated ? LFS_OK_RELOCATED : 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_dir_splittingcompact(lfs_t *lfs, lfs_mdir_t *dir, + const struct lfs_mattr *attrs, int attrcount, + lfs_mdir_t *source, uint16_t begin, uint16_t end) { + while (true) { + // find size of first split, we do this by halving the split until + // the metadata is guaranteed to fit + // + // Note that this isn't a true binary search, we never increase the + // split size. This may result in poorly distributed metadata but isn't + // worth the extra code size or performance hit to fix. + lfs_size_t split = begin; + while (end - split > 1) { + lfs_size_t size = 0; + int err = lfs_dir_traverse(lfs, + source, 0, 0xffffffff, attrs, attrcount, + LFS_MKTAG(0x400, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_NAME, 0, 0), + split, end, -split, + lfs_dir_commit_size, &size); + if (err) { + return err; + } + + // space is complicated, we need room for tail, crc, gstate, + // cleanup delete, and we cap at half a block to give room + // for metadata updates. + if (end - split < 0xff + && size <= lfs_min(lfs->cfg->block_size - 36, + lfs_alignup( + (lfs->cfg->metadata_max + ? lfs->cfg->metadata_max + : lfs->cfg->block_size)/2, + lfs->cfg->prog_size))) { + break; + } + + split = split + ((end - split) / 2); + } + + if (split == begin) { + // no split needed + break; + } + + // split into two metadata pairs and continue + int err = lfs_dir_split(lfs, dir, attrs, attrcount, + source, split, end); + if (err && err != LFS_ERR_NOSPC) { + return err; + } + + if (err) { + // we can't allocate a new block, try to compact with degraded + // performance + LFS_WARN("Unable to split {0x%"PRIx32", 0x%"PRIx32"}", + dir->pair[0], dir->pair[1]); + break; + } else { + end = split; + } + } + + if (lfs_dir_needsrelocation(lfs, dir) + && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) { + // oh no! we're writing too much to the superblock, + // should we expand? + lfs_ssize_t size = lfs_fs_rawsize(lfs); + if (size < 0) { + return size; + } + + // do we have extra space? littlefs can't reclaim this space + // by itself, so expand cautiously + if ((lfs_size_t)size < lfs->cfg->block_count/2) { + LFS_DEBUG("Expanding superblock at rev %"PRIu32, dir->rev); + int err = lfs_dir_split(lfs, dir, attrs, attrcount, + source, begin, end); + if (err && err != LFS_ERR_NOSPC) { + return err; + } + + if (err) { + // welp, we tried, if we ran out of space there's not much + // we can do, we'll error later if we've become frozen + LFS_WARN("Unable to expand superblock"); + } else { + end = begin; + } + } + } + + return lfs_dir_compact(lfs, dir, attrs, attrcount, source, begin, end); +} +#endif + +#ifndef LFS_READONLY +static int lfs_dir_relocatingcommit(lfs_t *lfs, lfs_mdir_t *dir, + const lfs_block_t pair[2], + const struct lfs_mattr *attrs, int attrcount, + lfs_mdir_t *pdir) { + int state = 0; + + // calculate changes to the directory + bool hasdelete = false; + for (int i = 0; i < attrcount; i++) { + if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE) { + dir->count += 1; + } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE) { + LFS_ASSERT(dir->count > 0); + dir->count -= 1; + hasdelete = true; + } else if (lfs_tag_type1(attrs[i].tag) == LFS_TYPE_TAIL) { + dir->tail[0] = ((lfs_block_t*)attrs[i].buffer)[0]; + dir->tail[1] = ((lfs_block_t*)attrs[i].buffer)[1]; + dir->split = (lfs_tag_chunk(attrs[i].tag) & 1); + lfs_pair_fromle32(dir->tail); + } + } + + // should we actually drop the directory block? + if (hasdelete && dir->count == 0) { + LFS_ASSERT(pdir); + int err = lfs_fs_pred(lfs, dir->pair, pdir); + if (err && err != LFS_ERR_NOENT) { + return err; + } + + if (err != LFS_ERR_NOENT && pdir->split) { + state = LFS_OK_DROPPED; + goto fixmlist; + } + } + + if (dir->erased) { + // try to commit + struct lfs_commit commit = { + .block = dir->pair[0], + .off = dir->off, + .ptag = dir->etag, + .crc = 0xffffffff, + + .begin = dir->off, + .end = (lfs->cfg->metadata_max ? + lfs->cfg->metadata_max : lfs->cfg->block_size) - 8, + }; + + // traverse attrs that need to be written out + lfs_pair_tole32(dir->tail); + int err = lfs_dir_traverse(lfs, + dir, dir->off, dir->etag, attrs, attrcount, + 0, 0, 0, 0, 0, + lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){ + lfs, &commit}); + lfs_pair_fromle32(dir->tail); + if (err) { + if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) { + goto compact; + } + return err; + } + + // commit any global diffs if we have any + lfs_gstate_t delta = {0}; + lfs_gstate_xor(&delta, &lfs->gstate); + lfs_gstate_xor(&delta, &lfs->gdisk); + lfs_gstate_xor(&delta, &lfs->gdelta); + delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff); + if (!lfs_gstate_iszero(&delta)) { + err = lfs_dir_getgstate(lfs, dir, &delta); + if (err) { + return err; + } + + lfs_gstate_tole32(&delta); + err = lfs_dir_commitattr(lfs, &commit, + LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff, + sizeof(delta)), &delta); + if (err) { + if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) { + goto compact; + } + return err; + } + } + + // finalize commit with the crc + err = lfs_dir_commitcrc(lfs, &commit); + if (err) { + if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) { + goto compact; + } + return err; + } + + // successful commit, update dir + LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0); + dir->off = commit.off; + dir->etag = commit.ptag; + // and update gstate + lfs->gdisk = lfs->gstate; + lfs->gdelta = (lfs_gstate_t){0}; + + goto fixmlist; + } + +compact: + // fall back to compaction + lfs_cache_drop(lfs, &lfs->pcache); + + state = lfs_dir_splittingcompact(lfs, dir, attrs, attrcount, + dir, 0, dir->count); + if (state < 0) { + return state; + } + + goto fixmlist; + +fixmlist:; + // this complicated bit of logic is for fixing up any active + // metadata-pairs that we may have affected + // + // note we have to make two passes since the mdir passed to + // lfs_dir_commit could also be in this list, and even then + // we need to copy the pair so they don't get clobbered if we refetch + // our mdir. + lfs_block_t oldpair[2] = {pair[0], pair[1]}; + for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) { + if (lfs_pair_cmp(d->m.pair, oldpair) == 0) { + d->m = *dir; + if (d->m.pair != pair) { + for (int i = 0; i < attrcount; i++) { + if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE && + d->id == lfs_tag_id(attrs[i].tag)) { + d->m.pair[0] = LFS_BLOCK_NULL; + d->m.pair[1] = LFS_BLOCK_NULL; + } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE && + d->id > lfs_tag_id(attrs[i].tag)) { + d->id -= 1; + if (d->type == LFS_TYPE_DIR) { + ((lfs_dir_t*)d)->pos -= 1; + } + } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE && + d->id >= lfs_tag_id(attrs[i].tag)) { + d->id += 1; + if (d->type == LFS_TYPE_DIR) { + ((lfs_dir_t*)d)->pos += 1; + } + } + } + } + + while (d->id >= d->m.count && d->m.split) { + // we split and id is on tail now + d->id -= d->m.count; + int err = lfs_dir_fetch(lfs, &d->m, d->m.tail); + if (err) { + return err; + } + } + } + } + + return state; +} +#endif + +#ifndef LFS_READONLY +static int lfs_dir_orphaningcommit(lfs_t *lfs, lfs_mdir_t *dir, + const struct lfs_mattr *attrs, int attrcount) { + // check for any inline files that aren't RAM backed and + // forcefully evict them, needed for filesystem consistency + for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) { + if (dir != &f->m && lfs_pair_cmp(f->m.pair, dir->pair) == 0 && + f->type == LFS_TYPE_REG && (f->flags & LFS_F_INLINE) && + f->ctz.size > lfs->cfg->cache_size) { + int err = lfs_file_outline(lfs, f); + if (err) { + return err; + } + + err = lfs_file_flush(lfs, f); + if (err) { + return err; + } + } + } + + lfs_block_t lpair[2] = {dir->pair[0], dir->pair[1]}; + lfs_mdir_t ldir = *dir; + lfs_mdir_t pdir; + int state = lfs_dir_relocatingcommit(lfs, &ldir, dir->pair, + attrs, attrcount, &pdir); + if (state < 0) { + return state; + } + + // update if we're not in mlist, note we may have already been + // updated if we are in mlist + if (lfs_pair_cmp(dir->pair, lpair) == 0) { + *dir = ldir; + } + + // commit was successful, but may require other changes in the + // filesystem, these would normally be tail recursive, but we have + // flattened them here avoid unbounded stack usage + + // need to drop? + if (state == LFS_OK_DROPPED) { + // steal state + int err = lfs_dir_getgstate(lfs, dir, &lfs->gdelta); + if (err) { + return err; + } + + // steal tail, note that this can't create a recursive drop + lpair[0] = pdir.pair[0]; + lpair[1] = pdir.pair[1]; + lfs_pair_tole32(dir->tail); + state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8), + dir->tail}), + NULL); + lfs_pair_fromle32(dir->tail); + if (state < 0) { + return state; + } + + ldir = pdir; + } + + // need to relocate? + bool orphans = false; + while (state == LFS_OK_RELOCATED) { + LFS_DEBUG("Relocating {0x%"PRIx32", 0x%"PRIx32"} " + "-> {0x%"PRIx32", 0x%"PRIx32"}", + lpair[0], lpair[1], ldir.pair[0], ldir.pair[1]); + state = 0; + + // update internal root + if (lfs_pair_cmp(lpair, lfs->root) == 0) { + lfs->root[0] = ldir.pair[0]; + lfs->root[1] = ldir.pair[1]; + } + + // update internally tracked dirs + for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) { + if (lfs_pair_cmp(lpair, d->m.pair) == 0) { + d->m.pair[0] = ldir.pair[0]; + d->m.pair[1] = ldir.pair[1]; + } + + if (d->type == LFS_TYPE_DIR && + lfs_pair_cmp(lpair, ((lfs_dir_t*)d)->head) == 0) { + ((lfs_dir_t*)d)->head[0] = ldir.pair[0]; + ((lfs_dir_t*)d)->head[1] = ldir.pair[1]; + } + } + + // find parent + lfs_stag_t tag = lfs_fs_parent(lfs, lpair, &pdir); + if (tag < 0 && tag != LFS_ERR_NOENT) { + return tag; + } + + bool hasparent = (tag != LFS_ERR_NOENT); + if (tag != LFS_ERR_NOENT) { + // note that if we have a parent, we must have a pred, so this will + // always create an orphan + int err = lfs_fs_preporphans(lfs, +1); + if (err) { + return err; + } + + // fix pending move in this pair? this looks like an optimization but + // is in fact _required_ since relocating may outdate the move. + uint16_t moveid = 0x3ff; + if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) { + moveid = lfs_tag_id(lfs->gstate.tag); + LFS_DEBUG("Fixing move while relocating " + "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n", + pdir.pair[0], pdir.pair[1], moveid); + lfs_fs_prepmove(lfs, 0x3ff, NULL); + if (moveid < lfs_tag_id(tag)) { + tag -= LFS_MKTAG(0, 1, 0); + } + } + + lfs_block_t ppair[2] = {pdir.pair[0], pdir.pair[1]}; + lfs_pair_tole32(ldir.pair); + state = lfs_dir_relocatingcommit(lfs, &pdir, ppair, LFS_MKATTRS( + {LFS_MKTAG_IF(moveid != 0x3ff, + LFS_TYPE_DELETE, moveid, 0), NULL}, + {tag, ldir.pair}), + NULL); + lfs_pair_fromle32(ldir.pair); + if (state < 0) { + return state; + } + + if (state == LFS_OK_RELOCATED) { + lpair[0] = ppair[0]; + lpair[1] = ppair[1]; + ldir = pdir; + orphans = true; + continue; + } + } + + // find pred + int err = lfs_fs_pred(lfs, lpair, &pdir); + if (err && err != LFS_ERR_NOENT) { + return err; + } + LFS_ASSERT(!(hasparent && err == LFS_ERR_NOENT)); + + // if we can't find dir, it must be new + if (err != LFS_ERR_NOENT) { + if (lfs_gstate_hasorphans(&lfs->gstate)) { + // next step, clean up orphans + err = lfs_fs_preporphans(lfs, -hasparent); + if (err) { + return err; + } + } + + // fix pending move in this pair? this looks like an optimization + // but is in fact _required_ since relocating may outdate the move. + uint16_t moveid = 0x3ff; + if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) { + moveid = lfs_tag_id(lfs->gstate.tag); + LFS_DEBUG("Fixing move while relocating " + "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n", + pdir.pair[0], pdir.pair[1], moveid); + lfs_fs_prepmove(lfs, 0x3ff, NULL); + } + + // replace bad pair, either we clean up desync, or no desync occured + lpair[0] = pdir.pair[0]; + lpair[1] = pdir.pair[1]; + lfs_pair_tole32(ldir.pair); + state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS( + {LFS_MKTAG_IF(moveid != 0x3ff, + LFS_TYPE_DELETE, moveid, 0), NULL}, + {LFS_MKTAG(LFS_TYPE_TAIL + pdir.split, 0x3ff, 8), + ldir.pair}), + NULL); + lfs_pair_fromle32(ldir.pair); + if (state < 0) { + return state; + } + + ldir = pdir; + } + } + + return orphans ? LFS_OK_ORPHANED : 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir, + const struct lfs_mattr *attrs, int attrcount) { + int orphans = lfs_dir_orphaningcommit(lfs, dir, attrs, attrcount); + if (orphans < 0) { + return orphans; + } + + if (orphans) { + // make sure we've removed all orphans, this is a noop if there + // are none, but if we had nested blocks failures we may have + // created some + int err = lfs_fs_deorphan(lfs, false); + if (err) { + return err; + } + } + + return 0; +} +#endif + + +/// Top level directory operations /// +#ifndef LFS_READONLY +static int lfs_rawmkdir(lfs_t *lfs, const char *path) { + // deorphan if we haven't yet, needed at most once after poweron + int err = lfs_fs_forceconsistency(lfs); + if (err) { + return err; + } + + struct lfs_mlist cwd; + cwd.next = lfs->mlist; + uint16_t id; + err = lfs_dir_find(lfs, &cwd.m, &path, &id); + if (!(err == LFS_ERR_NOENT && id != 0x3ff)) { + return (err < 0) ? err : LFS_ERR_EXIST; + } + + // check that name fits + lfs_size_t nlen = strlen(path); + if (nlen > lfs->name_max) { + return LFS_ERR_NAMETOOLONG; + } + + // build up new directory + lfs_alloc_ack(lfs); + lfs_mdir_t dir; + err = lfs_dir_alloc(lfs, &dir); + if (err) { + return err; + } + + // find end of list + lfs_mdir_t pred = cwd.m; + while (pred.split) { + err = lfs_dir_fetch(lfs, &pred, pred.tail); + if (err) { + return err; + } + } + + // setup dir + lfs_pair_tole32(pred.tail); + err = lfs_dir_commit(lfs, &dir, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), pred.tail})); + lfs_pair_fromle32(pred.tail); + if (err) { + return err; + } + + // current block not end of list? + if (cwd.m.split) { + // update tails, this creates a desync + err = lfs_fs_preporphans(lfs, +1); + if (err) { + return err; + } + + // it's possible our predecessor has to be relocated, and if + // our parent is our predecessor's predecessor, this could have + // caused our parent to go out of date, fortunately we can hook + // ourselves into littlefs to catch this + cwd.type = 0; + cwd.id = 0; + lfs->mlist = &cwd; + + lfs_pair_tole32(dir.pair); + err = lfs_dir_commit(lfs, &pred, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair})); + lfs_pair_fromle32(dir.pair); + if (err) { + lfs->mlist = cwd.next; + return err; + } + + lfs->mlist = cwd.next; + err = lfs_fs_preporphans(lfs, -1); + if (err) { + return err; + } + } + + // now insert into our parent block + lfs_pair_tole32(dir.pair); + err = lfs_dir_commit(lfs, &cwd.m, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL}, + {LFS_MKTAG(LFS_TYPE_DIR, id, nlen), path}, + {LFS_MKTAG(LFS_TYPE_DIRSTRUCT, id, 8), dir.pair}, + {LFS_MKTAG_IF(!cwd.m.split, + LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair})); + lfs_pair_fromle32(dir.pair); + if (err) { + return err; + } + + return 0; +} +#endif + +static int lfs_dir_rawopen(lfs_t *lfs, lfs_dir_t *dir, const char *path) { + lfs_stag_t tag = lfs_dir_find(lfs, &dir->m, &path, NULL); + if (tag < 0) { + return tag; + } + + if (lfs_tag_type3(tag) != LFS_TYPE_DIR) { + return LFS_ERR_NOTDIR; + } + + lfs_block_t pair[2]; + if (lfs_tag_id(tag) == 0x3ff) { + // handle root dir separately + pair[0] = lfs->root[0]; + pair[1] = lfs->root[1]; + } else { + // get dir pair from parent + lfs_stag_t res = lfs_dir_get(lfs, &dir->m, LFS_MKTAG(0x700, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair); + if (res < 0) { + return res; + } + lfs_pair_fromle32(pair); + } + + // fetch first pair + int err = lfs_dir_fetch(lfs, &dir->m, pair); + if (err) { + return err; + } + + // setup entry + dir->head[0] = dir->m.pair[0]; + dir->head[1] = dir->m.pair[1]; + dir->id = 0; + dir->pos = 0; + + // add to list of mdirs + dir->type = LFS_TYPE_DIR; + lfs_mlist_append(lfs, (struct lfs_mlist *)dir); + + return 0; +} + +static int lfs_dir_rawclose(lfs_t *lfs, lfs_dir_t *dir) { + // remove from list of mdirs + lfs_mlist_remove(lfs, (struct lfs_mlist *)dir); + + return 0; +} + +static int lfs_dir_rawread(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) { + memset(info, 0, sizeof(*info)); + + // special offset for '.' and '..' + if (dir->pos == 0) { + info->type = LFS_TYPE_DIR; + strcpy(info->name, "."); + dir->pos += 1; + return true; + } else if (dir->pos == 1) { + info->type = LFS_TYPE_DIR; + strcpy(info->name, ".."); + dir->pos += 1; + return true; + } + + while (true) { + if (dir->id == dir->m.count) { + if (!dir->m.split) { + return false; + } + + int err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail); + if (err) { + return err; + } + + dir->id = 0; + } + + int err = lfs_dir_getinfo(lfs, &dir->m, dir->id, info); + if (err && err != LFS_ERR_NOENT) { + return err; + } + + dir->id += 1; + if (err != LFS_ERR_NOENT) { + break; + } + } + + dir->pos += 1; + return true; +} + +static int lfs_dir_rawseek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) { + // simply walk from head dir + int err = lfs_dir_rawrewind(lfs, dir); + if (err) { + return err; + } + + // first two for ./.. + dir->pos = lfs_min(2, off); + off -= dir->pos; + + // skip superblock entry + dir->id = (off > 0 && lfs_pair_cmp(dir->head, lfs->root) == 0); + + while (off > 0) { + int diff = lfs_min(dir->m.count - dir->id, off); + dir->id += diff; + dir->pos += diff; + off -= diff; + + if (dir->id == dir->m.count) { + if (!dir->m.split) { + return LFS_ERR_INVAL; + } + + err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail); + if (err) { + return err; + } + + dir->id = 0; + } + } + + return 0; +} + +static lfs_soff_t lfs_dir_rawtell(lfs_t *lfs, lfs_dir_t *dir) { + (void)lfs; + return dir->pos; +} + +static int lfs_dir_rawrewind(lfs_t *lfs, lfs_dir_t *dir) { + // reload the head dir + int err = lfs_dir_fetch(lfs, &dir->m, dir->head); + if (err) { + return err; + } + + dir->id = 0; + dir->pos = 0; + return 0; +} + + +/// File index list operations /// +static int lfs_ctz_index(lfs_t *lfs, lfs_off_t *off) { + lfs_off_t size = *off; + lfs_off_t b = lfs->cfg->block_size - 2*4; + lfs_off_t i = size / b; + if (i == 0) { + return 0; + } + + i = (size - 4*(lfs_popc(i-1)+2)) / b; + *off = size - b*i - 4*lfs_popc(i); + return i; +} + +static int lfs_ctz_find(lfs_t *lfs, + const lfs_cache_t *pcache, lfs_cache_t *rcache, + lfs_block_t head, lfs_size_t size, + lfs_size_t pos, lfs_block_t *block, lfs_off_t *off) { + if (size == 0) { + *block = LFS_BLOCK_NULL; + *off = 0; + return 0; + } + + lfs_off_t current = lfs_ctz_index(lfs, &(lfs_off_t){size-1}); + lfs_off_t target = lfs_ctz_index(lfs, &pos); + + while (current > target) { + lfs_size_t skip = lfs_min( + lfs_npw2(current-target+1) - 1, + lfs_ctz(current)); + + int err = lfs_bd_read(lfs, + pcache, rcache, sizeof(head), + head, 4*skip, &head, sizeof(head)); + head = lfs_fromle32(head); + if (err) { + return err; + } + + current -= 1 << skip; + } + + *block = head; + *off = pos; + return 0; +} + +#ifndef LFS_READONLY +static int lfs_ctz_extend(lfs_t *lfs, + lfs_cache_t *pcache, lfs_cache_t *rcache, + lfs_block_t head, lfs_size_t size, + lfs_block_t *block, lfs_off_t *off) { + while (true) { + // go ahead and grab a block + lfs_block_t nblock; + int err = lfs_alloc(lfs, &nblock); + if (err) { + return err; + } + + { + err = lfs_bd_erase(lfs, nblock); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + + if (size == 0) { + *block = nblock; + *off = 0; + return 0; + } + + lfs_size_t noff = size - 1; + lfs_off_t index = lfs_ctz_index(lfs, &noff); + noff = noff + 1; + + // just copy out the last block if it is incomplete + if (noff != lfs->cfg->block_size) { + for (lfs_off_t i = 0; i < noff; i++) { + uint8_t data; + err = lfs_bd_read(lfs, + NULL, rcache, noff-i, + head, i, &data, 1); + if (err) { + return err; + } + + err = lfs_bd_prog(lfs, + pcache, rcache, true, + nblock, i, &data, 1); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + } + + *block = nblock; + *off = noff; + return 0; + } + + // append block + index += 1; + lfs_size_t skips = lfs_ctz(index) + 1; + lfs_block_t nhead = head; + for (lfs_off_t i = 0; i < skips; i++) { + nhead = lfs_tole32(nhead); + err = lfs_bd_prog(lfs, pcache, rcache, true, + nblock, 4*i, &nhead, 4); + nhead = lfs_fromle32(nhead); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + + if (i != skips-1) { + err = lfs_bd_read(lfs, + NULL, rcache, sizeof(nhead), + nhead, 4*i, &nhead, sizeof(nhead)); + nhead = lfs_fromle32(nhead); + if (err) { + return err; + } + } + } + + *block = nblock; + *off = 4*skips; + return 0; + } + +relocate: + LFS_DEBUG("Bad block at 0x%"PRIx32, nblock); + + // just clear cache and try a new block + lfs_cache_drop(lfs, pcache); + } +} +#endif + +static int lfs_ctz_traverse(lfs_t *lfs, + const lfs_cache_t *pcache, lfs_cache_t *rcache, + lfs_block_t head, lfs_size_t size, + int (*cb)(void*, lfs_block_t), void *data) { + if (size == 0) { + return 0; + } + + lfs_off_t index = lfs_ctz_index(lfs, &(lfs_off_t){size-1}); + + while (true) { + int err = cb(data, head); + if (err) { + return err; + } + + if (index == 0) { + return 0; + } + + lfs_block_t heads[2]; + int count = 2 - (index & 1); + err = lfs_bd_read(lfs, + pcache, rcache, count*sizeof(head), + head, 0, &heads, count*sizeof(head)); + heads[0] = lfs_fromle32(heads[0]); + heads[1] = lfs_fromle32(heads[1]); + if (err) { + return err; + } + + for (int i = 0; i < count-1; i++) { + err = cb(data, heads[i]); + if (err) { + return err; + } + } + + head = heads[count-1]; + index -= count; + } +} + + +/// Top level file operations /// +static int lfs_file_rawopencfg(lfs_t *lfs, lfs_file_t *file, + const char *path, int flags, + const struct lfs_file_config *cfg) { +#ifndef LFS_READONLY + // deorphan if we haven't yet, needed at most once after poweron + if ((flags & LFS_O_WRONLY) == LFS_O_WRONLY) { + int err = lfs_fs_forceconsistency(lfs); + if (err) { + return err; + } + } +#else + LFS_ASSERT((flags & LFS_O_RDONLY) == LFS_O_RDONLY); +#endif + + // setup simple file details + int err; + file->cfg = cfg; + file->flags = flags; + file->pos = 0; + file->off = 0; + file->cache.buffer = NULL; + + // allocate entry for file if it doesn't exist + lfs_stag_t tag = lfs_dir_find(lfs, &file->m, &path, &file->id); + if (tag < 0 && !(tag == LFS_ERR_NOENT && file->id != 0x3ff)) { + err = tag; + goto cleanup; + } + + // get id, add to list of mdirs to catch update changes + file->type = LFS_TYPE_REG; + lfs_mlist_append(lfs, (struct lfs_mlist *)file); + +#ifdef LFS_READONLY + if (tag == LFS_ERR_NOENT) { + err = LFS_ERR_NOENT; + goto cleanup; +#else + if (tag == LFS_ERR_NOENT) { + if (!(flags & LFS_O_CREAT)) { + err = LFS_ERR_NOENT; + goto cleanup; + } + + // check that name fits + lfs_size_t nlen = strlen(path); + if (nlen > lfs->name_max) { + err = LFS_ERR_NAMETOOLONG; + goto cleanup; + } + + // get next slot and create entry to remember name + err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_CREATE, file->id, 0), NULL}, + {LFS_MKTAG(LFS_TYPE_REG, file->id, nlen), path}, + {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0), NULL})); + + // it may happen that the file name doesn't fit in the metadata blocks, e.g., a 256 byte file name will + // not fit in a 128 byte block. + err = (err == LFS_ERR_NOSPC) ? LFS_ERR_NAMETOOLONG : err; + if (err) { + goto cleanup; + } + + tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, 0); + } else if (flags & LFS_O_EXCL) { + err = LFS_ERR_EXIST; + goto cleanup; +#endif + } else if (lfs_tag_type3(tag) != LFS_TYPE_REG) { + err = LFS_ERR_ISDIR; + goto cleanup; +#ifndef LFS_READONLY + } else if (flags & LFS_O_TRUNC) { + // truncate if requested + tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0); + file->flags |= LFS_F_DIRTY; +#endif + } else { + // try to load what's on disk, if it's inlined we'll fix it later + tag = lfs_dir_get(lfs, &file->m, LFS_MKTAG(0x700, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_STRUCT, file->id, 8), &file->ctz); + if (tag < 0) { + err = tag; + goto cleanup; + } + lfs_ctz_fromle32(&file->ctz); + } + + // fetch attrs + for (unsigned i = 0; i < file->cfg->attr_count; i++) { + // if opened for read / read-write operations + if ((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY) { + lfs_stag_t res = lfs_dir_get(lfs, &file->m, + LFS_MKTAG(0x7ff, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_USERATTR + file->cfg->attrs[i].type, + file->id, file->cfg->attrs[i].size), + file->cfg->attrs[i].buffer); + if (res < 0 && res != LFS_ERR_NOENT) { + err = res; + goto cleanup; + } + } + +#ifndef LFS_READONLY + // if opened for write / read-write operations + if ((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY) { + if (file->cfg->attrs[i].size > lfs->attr_max) { + err = LFS_ERR_NOSPC; + goto cleanup; + } + + file->flags |= LFS_F_DIRTY; + } +#endif + } + + // allocate buffer if needed + if (file->cfg->buffer) { + file->cache.buffer = file->cfg->buffer; + } else { + file->cache.buffer = lfs_malloc(lfs->cfg->cache_size); + if (!file->cache.buffer) { + err = LFS_ERR_NOMEM; + goto cleanup; + } + } + + // zero to avoid information leak + lfs_cache_zero(lfs, &file->cache); + + if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) { + // load inline files + file->ctz.head = LFS_BLOCK_INLINE; + file->ctz.size = lfs_tag_size(tag); + file->flags |= LFS_F_INLINE; + file->cache.block = file->ctz.head; + file->cache.off = 0; + file->cache.size = lfs->cfg->cache_size; + + // don't always read (may be new/trunc file) + if (file->ctz.size > 0) { + lfs_stag_t res = lfs_dir_get(lfs, &file->m, + LFS_MKTAG(0x700, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_STRUCT, file->id, + lfs_min(file->cache.size, 0x3fe)), + file->cache.buffer); + if (res < 0) { + err = res; + goto cleanup; + } + } + } + + return 0; + +cleanup: + // clean up lingering resources +#ifndef LFS_READONLY + file->flags |= LFS_F_ERRED; +#endif + lfs_file_rawclose(lfs, file); + return err; +} + +static int lfs_file_rawopen(lfs_t *lfs, lfs_file_t *file, + const char *path, int flags) { + static const struct lfs_file_config defaults = {0}; + int err = lfs_file_rawopencfg(lfs, file, path, flags, &defaults); + return err; +} + +static int lfs_file_rawclose(lfs_t *lfs, lfs_file_t *file) { +#ifndef LFS_READONLY + int err = lfs_file_rawsync(lfs, file); +#else + int err = 0; +#endif + + // remove from list of mdirs + lfs_mlist_remove(lfs, (struct lfs_mlist*)file); + + // clean up memory + if (!file->cfg->buffer) { + lfs_free(file->cache.buffer); + } + + return err; +} + + +#ifndef LFS_READONLY +static int lfs_file_relocate(lfs_t *lfs, lfs_file_t *file) { + while (true) { + // just relocate what exists into new block + lfs_block_t nblock; + int err = lfs_alloc(lfs, &nblock); + if (err) { + return err; + } + + err = lfs_bd_erase(lfs, nblock); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + + // either read from dirty cache or disk + for (lfs_off_t i = 0; i < file->off; i++) { + uint8_t data; + if (file->flags & LFS_F_INLINE) { + err = lfs_dir_getread(lfs, &file->m, + // note we evict inline files before they can be dirty + NULL, &file->cache, file->off-i, + LFS_MKTAG(0xfff, 0x1ff, 0), + LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0), + i, &data, 1); + if (err) { + return err; + } + } else { + err = lfs_bd_read(lfs, + &file->cache, &lfs->rcache, file->off-i, + file->block, i, &data, 1); + if (err) { + return err; + } + } + + err = lfs_bd_prog(lfs, + &lfs->pcache, &lfs->rcache, true, + nblock, i, &data, 1); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + } + + // copy over new state of file + memcpy(file->cache.buffer, lfs->pcache.buffer, lfs->cfg->cache_size); + file->cache.block = lfs->pcache.block; + file->cache.off = lfs->pcache.off; + file->cache.size = lfs->pcache.size; + lfs_cache_zero(lfs, &lfs->pcache); + + file->block = nblock; + file->flags |= LFS_F_WRITING; + return 0; + +relocate: + LFS_DEBUG("Bad block at 0x%"PRIx32, nblock); + + // just clear cache and try a new block + lfs_cache_drop(lfs, &lfs->pcache); + } +} +#endif + +#ifndef LFS_READONLY +static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file) { + file->off = file->pos; + lfs_alloc_ack(lfs); + int err = lfs_file_relocate(lfs, file); + if (err) { + return err; + } + + file->flags &= ~LFS_F_INLINE; + return 0; +} +#endif + +static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file) { + if (file->flags & LFS_F_READING) { + if (!(file->flags & LFS_F_INLINE)) { + lfs_cache_drop(lfs, &file->cache); + } + file->flags &= ~LFS_F_READING; + } + +#ifndef LFS_READONLY + if (file->flags & LFS_F_WRITING) { + lfs_off_t pos = file->pos; + + if (!(file->flags & LFS_F_INLINE)) { + // copy over anything after current branch + lfs_file_t orig = { + .ctz.head = file->ctz.head, + .ctz.size = file->ctz.size, + .flags = LFS_O_RDONLY, + .pos = file->pos, + .cache = lfs->rcache, + }; + lfs_cache_drop(lfs, &lfs->rcache); + + while (file->pos < file->ctz.size) { + // copy over a byte at a time, leave it up to caching + // to make this efficient + uint8_t data; + lfs_ssize_t res = lfs_file_flushedread(lfs, &orig, &data, 1); + if (res < 0) { + return res; + } + + res = lfs_file_flushedwrite(lfs, file, &data, 1); + if (res < 0) { + return res; + } + + // keep our reference to the rcache in sync + if (lfs->rcache.block != LFS_BLOCK_NULL) { + lfs_cache_drop(lfs, &orig.cache); + lfs_cache_drop(lfs, &lfs->rcache); + } + } + + // write out what we have + while (true) { + int err = lfs_bd_flush(lfs, &file->cache, &lfs->rcache, true); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + + break; + +relocate: + LFS_DEBUG("Bad block at 0x%"PRIx32, file->block); + err = lfs_file_relocate(lfs, file); + if (err) { + return err; + } + } + } else { + file->pos = lfs_max(file->pos, file->ctz.size); + } + + // actual file updates + file->ctz.head = file->block; + file->ctz.size = file->pos; + file->flags &= ~LFS_F_WRITING; + file->flags |= LFS_F_DIRTY; + + file->pos = pos; + } +#endif + + return 0; +} + +#ifndef LFS_READONLY +static int lfs_file_rawsync(lfs_t *lfs, lfs_file_t *file) { + if (file->flags & LFS_F_ERRED) { + // it's not safe to do anything if our file errored + return 0; + } + + int err = lfs_file_flush(lfs, file); + if (err) { + file->flags |= LFS_F_ERRED; + return err; + } + + + if ((file->flags & LFS_F_DIRTY) && + !lfs_pair_isnull(file->m.pair)) { + // update dir entry + uint16_t type; + const void *buffer; + lfs_size_t size; + struct lfs_ctz ctz; + if (file->flags & LFS_F_INLINE) { + // inline the whole file + type = LFS_TYPE_INLINESTRUCT; + buffer = file->cache.buffer; + size = file->ctz.size; + } else { + // update the ctz reference + type = LFS_TYPE_CTZSTRUCT; + // copy ctz so alloc will work during a relocate + ctz = file->ctz; + lfs_ctz_tole32(&ctz); + buffer = &ctz; + size = sizeof(ctz); + } + + // commit file data and attributes + err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS( + {LFS_MKTAG(type, file->id, size), buffer}, + {LFS_MKTAG(LFS_FROM_USERATTRS, file->id, + file->cfg->attr_count), file->cfg->attrs})); + if (err) { + file->flags |= LFS_F_ERRED; + return err; + } + + file->flags &= ~LFS_F_DIRTY; + } + + return 0; +} +#endif + +static lfs_ssize_t lfs_file_flushedread(lfs_t *lfs, lfs_file_t *file, + void *buffer, lfs_size_t size) { + uint8_t *data = buffer; + lfs_size_t nsize = size; + + if (file->pos >= file->ctz.size) { + // eof if past end + return 0; + } + + size = lfs_min(size, file->ctz.size - file->pos); + nsize = size; + + while (nsize > 0) { + // check if we need a new block + if (!(file->flags & LFS_F_READING) || + file->off == lfs->cfg->block_size) { + if (!(file->flags & LFS_F_INLINE)) { + int err = lfs_ctz_find(lfs, NULL, &file->cache, + file->ctz.head, file->ctz.size, + file->pos, &file->block, &file->off); + if (err) { + return err; + } + } else { + file->block = LFS_BLOCK_INLINE; + file->off = file->pos; + } + + file->flags |= LFS_F_READING; + } + + // read as much as we can in current block + lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off); + if (file->flags & LFS_F_INLINE) { + int err = lfs_dir_getread(lfs, &file->m, + NULL, &file->cache, lfs->cfg->block_size, + LFS_MKTAG(0xfff, 0x1ff, 0), + LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0), + file->off, data, diff); + if (err) { + return err; + } + } else { + int err = lfs_bd_read(lfs, + NULL, &file->cache, lfs->cfg->block_size, + file->block, file->off, data, diff); + if (err) { + return err; + } + } + + file->pos += diff; + file->off += diff; + data += diff; + nsize -= diff; + } + + return size; +} + +static lfs_ssize_t lfs_file_rawread(lfs_t *lfs, lfs_file_t *file, + void *buffer, lfs_size_t size) { + LFS_ASSERT((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY); + +#ifndef LFS_READONLY + if (file->flags & LFS_F_WRITING) { + // flush out any writes + int err = lfs_file_flush(lfs, file); + if (err) { + return err; + } + } +#endif + + return lfs_file_flushedread(lfs, file, buffer, size); +} + + +#ifndef LFS_READONLY +static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file, + const void *buffer, lfs_size_t size) { + const uint8_t *data = buffer; + lfs_size_t nsize = size; + + if ((file->flags & LFS_F_INLINE) && + lfs_max(file->pos+nsize, file->ctz.size) > + lfs_min(0x3fe, lfs_min( + lfs->cfg->cache_size, + (lfs->cfg->metadata_max ? + lfs->cfg->metadata_max : lfs->cfg->block_size) / 8))) { + // inline file doesn't fit anymore + int err = lfs_file_outline(lfs, file); + if (err) { + file->flags |= LFS_F_ERRED; + return err; + } + } + + while (nsize > 0) { + // check if we need a new block + if (!(file->flags & LFS_F_WRITING) || + file->off == lfs->cfg->block_size) { + if (!(file->flags & LFS_F_INLINE)) { + if (!(file->flags & LFS_F_WRITING) && file->pos > 0) { + // find out which block we're extending from + int err = lfs_ctz_find(lfs, NULL, &file->cache, + file->ctz.head, file->ctz.size, + file->pos-1, &file->block, &file->off); + if (err) { + file->flags |= LFS_F_ERRED; + return err; + } + + // mark cache as dirty since we may have read data into it + lfs_cache_zero(lfs, &file->cache); + } + + // extend file with new blocks + lfs_alloc_ack(lfs); + int err = lfs_ctz_extend(lfs, &file->cache, &lfs->rcache, + file->block, file->pos, + &file->block, &file->off); + if (err) { + file->flags |= LFS_F_ERRED; + return err; + } + } else { + file->block = LFS_BLOCK_INLINE; + file->off = file->pos; + } + + file->flags |= LFS_F_WRITING; + } + + // program as much as we can in current block + lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off); + while (true) { + int err = lfs_bd_prog(lfs, &file->cache, &lfs->rcache, true, + file->block, file->off, data, diff); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + file->flags |= LFS_F_ERRED; + return err; + } + + break; +relocate: + err = lfs_file_relocate(lfs, file); + if (err) { + file->flags |= LFS_F_ERRED; + return err; + } + } + + file->pos += diff; + file->off += diff; + data += diff; + nsize -= diff; + + lfs_alloc_ack(lfs); + } + + return size; +} + +static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file, + const void *buffer, lfs_size_t size) { + LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY); + + if (file->flags & LFS_F_READING) { + // drop any reads + int err = lfs_file_flush(lfs, file); + if (err) { + return err; + } + } + + if ((file->flags & LFS_O_APPEND) && file->pos < file->ctz.size) { + file->pos = file->ctz.size; + } + + if (file->pos + size > lfs->file_max) { + // Larger than file limit? + return LFS_ERR_FBIG; + } + + if (!(file->flags & LFS_F_WRITING) && file->pos > file->ctz.size) { + // fill with zeros + lfs_off_t pos = file->pos; + file->pos = file->ctz.size; + + while (file->pos < pos) { + lfs_ssize_t res = lfs_file_flushedwrite(lfs, file, &(uint8_t){0}, 1); + if (res < 0) { + return res; + } + } + } + + lfs_ssize_t nsize = lfs_file_flushedwrite(lfs, file, buffer, size); + if (nsize < 0) { + return nsize; + } + + file->flags &= ~LFS_F_ERRED; + return nsize; +} +#endif + +static lfs_soff_t lfs_file_rawseek(lfs_t *lfs, lfs_file_t *file, + lfs_soff_t off, int whence) { + // find new pos + lfs_off_t npos = file->pos; + if (whence == LFS_SEEK_SET) { + npos = off; + } else if (whence == LFS_SEEK_CUR) { + if ((lfs_soff_t)file->pos + off < 0) { + return LFS_ERR_INVAL; + } else { + npos = file->pos + off; + } + } else if (whence == LFS_SEEK_END) { + lfs_soff_t res = lfs_file_rawsize(lfs, file) + off; + if (res < 0) { + return LFS_ERR_INVAL; + } else { + npos = res; + } + } + + if (npos > lfs->file_max) { + // file position out of range + return LFS_ERR_INVAL; + } + + if (file->pos == npos) { + // noop - position has not changed + return npos; + } + + // if we're only reading and our new offset is still in the file's cache + // we can avoid flushing and needing to reread the data + if ( +#ifndef LFS_READONLY + !(file->flags & LFS_F_WRITING) +#else + true +#endif + ) { + int oindex = lfs_ctz_index(lfs, &(lfs_off_t){file->pos}); + lfs_off_t noff = npos; + int nindex = lfs_ctz_index(lfs, &noff); + if (oindex == nindex + && noff >= file->cache.off + && noff < file->cache.off + file->cache.size) { + file->pos = npos; + file->off = noff; + return npos; + } + } + + // write out everything beforehand, may be noop if rdonly + int err = lfs_file_flush(lfs, file); + if (err) { + return err; + } + + // update pos + file->pos = npos; + return npos; +} + +#ifndef LFS_READONLY +static int lfs_file_rawtruncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) { + LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY); + + if (size > LFS_FILE_MAX) { + return LFS_ERR_INVAL; + } + + lfs_off_t pos = file->pos; + lfs_off_t oldsize = lfs_file_rawsize(lfs, file); + if (size < oldsize) { + // need to flush since directly changing metadata + int err = lfs_file_flush(lfs, file); + if (err) { + return err; + } + + // lookup new head in ctz skip list + err = lfs_ctz_find(lfs, NULL, &file->cache, + file->ctz.head, file->ctz.size, + size, &file->block, &file->off); + if (err) { + return err; + } + + // need to set pos/block/off consistently so seeking back to + // the old position does not get confused + file->pos = size; + file->ctz.head = file->block; + file->ctz.size = size; + file->flags |= LFS_F_DIRTY | LFS_F_READING; + } else if (size > oldsize) { + // flush+seek if not already at end + lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_END); + if (res < 0) { + return (int)res; + } + + // fill with zeros + while (file->pos < size) { + res = lfs_file_rawwrite(lfs, file, &(uint8_t){0}, 1); + if (res < 0) { + return (int)res; + } + } + } + + // restore pos + lfs_soff_t res = lfs_file_rawseek(lfs, file, pos, LFS_SEEK_SET); + if (res < 0) { + return (int)res; + } + + return 0; +} +#endif + +static lfs_soff_t lfs_file_rawtell(lfs_t *lfs, lfs_file_t *file) { + (void)lfs; + return file->pos; +} + +static int lfs_file_rawrewind(lfs_t *lfs, lfs_file_t *file) { + lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_SET); + if (res < 0) { + return (int)res; + } + + return 0; +} + +static lfs_soff_t lfs_file_rawsize(lfs_t *lfs, lfs_file_t *file) { + (void)lfs; + +#ifndef LFS_READONLY + if (file->flags & LFS_F_WRITING) { + return lfs_max(file->pos, file->ctz.size); + } +#endif + + return file->ctz.size; +} + + +/// General fs operations /// +static int lfs_rawstat(lfs_t *lfs, const char *path, struct lfs_info *info) { + lfs_mdir_t cwd; + lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL); + if (tag < 0) { + return (int)tag; + } + + return lfs_dir_getinfo(lfs, &cwd, lfs_tag_id(tag), info); +} + +#ifndef LFS_READONLY +static int lfs_rawremove(lfs_t *lfs, const char *path) { + // deorphan if we haven't yet, needed at most once after poweron + int err = lfs_fs_forceconsistency(lfs); + if (err) { + return err; + } + + lfs_mdir_t cwd; + lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL); + if (tag < 0 || lfs_tag_id(tag) == 0x3ff) { + return (tag < 0) ? (int)tag : LFS_ERR_INVAL; + } + + struct lfs_mlist dir; + dir.next = lfs->mlist; + if (lfs_tag_type3(tag) == LFS_TYPE_DIR) { + // must be empty before removal + lfs_block_t pair[2]; + lfs_stag_t res = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x700, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair); + if (res < 0) { + return (int)res; + } + lfs_pair_fromle32(pair); + + err = lfs_dir_fetch(lfs, &dir.m, pair); + if (err) { + return err; + } + + if (dir.m.count > 0 || dir.m.split) { + return LFS_ERR_NOTEMPTY; + } + + // mark fs as orphaned + err = lfs_fs_preporphans(lfs, +1); + if (err) { + return err; + } + + // I know it's crazy but yes, dir can be changed by our parent's + // commit (if predecessor is child) + dir.type = 0; + dir.id = 0; + lfs->mlist = &dir; + } + + // delete the entry + err = lfs_dir_commit(lfs, &cwd, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(tag), 0), NULL})); + if (err) { + lfs->mlist = dir.next; + return err; + } + + lfs->mlist = dir.next; + if (lfs_tag_type3(tag) == LFS_TYPE_DIR) { + // fix orphan + err = lfs_fs_preporphans(lfs, -1); + if (err) { + return err; + } + + err = lfs_fs_pred(lfs, dir.m.pair, &cwd); + if (err) { + return err; + } + + err = lfs_dir_drop(lfs, &cwd, &dir.m); + if (err) { + return err; + } + } + + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_rawrename(lfs_t *lfs, const char *oldpath, const char *newpath) { + // deorphan if we haven't yet, needed at most once after poweron + int err = lfs_fs_forceconsistency(lfs); + if (err) { + return err; + } + + // find old entry + lfs_mdir_t oldcwd; + lfs_stag_t oldtag = lfs_dir_find(lfs, &oldcwd, &oldpath, NULL); + if (oldtag < 0 || lfs_tag_id(oldtag) == 0x3ff) { + return (oldtag < 0) ? (int)oldtag : LFS_ERR_INVAL; + } + + // find new entry + lfs_mdir_t newcwd; + uint16_t newid; + lfs_stag_t prevtag = lfs_dir_find(lfs, &newcwd, &newpath, &newid); + if ((prevtag < 0 || lfs_tag_id(prevtag) == 0x3ff) && + !(prevtag == LFS_ERR_NOENT && newid != 0x3ff)) { + return (prevtag < 0) ? (int)prevtag : LFS_ERR_INVAL; + } + + // if we're in the same pair there's a few special cases... + bool samepair = (lfs_pair_cmp(oldcwd.pair, newcwd.pair) == 0); + uint16_t newoldid = lfs_tag_id(oldtag); + + struct lfs_mlist prevdir; + prevdir.next = lfs->mlist; + if (prevtag == LFS_ERR_NOENT) { + // check that name fits + lfs_size_t nlen = strlen(newpath); + if (nlen > lfs->name_max) { + return LFS_ERR_NAMETOOLONG; + } + + // there is a small chance we are being renamed in the same + // directory/ to an id less than our old id, the global update + // to handle this is a bit messy + if (samepair && newid <= newoldid) { + newoldid += 1; + } + } else if (lfs_tag_type3(prevtag) != lfs_tag_type3(oldtag)) { + return LFS_ERR_ISDIR; + } else if (samepair && newid == newoldid) { + // we're renaming to ourselves?? + return 0; + } else if (lfs_tag_type3(prevtag) == LFS_TYPE_DIR) { + // must be empty before removal + lfs_block_t prevpair[2]; + lfs_stag_t res = lfs_dir_get(lfs, &newcwd, LFS_MKTAG(0x700, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_STRUCT, newid, 8), prevpair); + if (res < 0) { + return (int)res; + } + lfs_pair_fromle32(prevpair); + + // must be empty before removal + err = lfs_dir_fetch(lfs, &prevdir.m, prevpair); + if (err) { + return err; + } + + if (prevdir.m.count > 0 || prevdir.m.split) { + return LFS_ERR_NOTEMPTY; + } + + // mark fs as orphaned + err = lfs_fs_preporphans(lfs, +1); + if (err) { + return err; + } + + // I know it's crazy but yes, dir can be changed by our parent's + // commit (if predecessor is child) + prevdir.type = 0; + prevdir.id = 0; + lfs->mlist = &prevdir; + } + + if (!samepair) { + lfs_fs_prepmove(lfs, newoldid, oldcwd.pair); + } + + // move over all attributes + err = lfs_dir_commit(lfs, &newcwd, LFS_MKATTRS( + {LFS_MKTAG_IF(prevtag != LFS_ERR_NOENT, + LFS_TYPE_DELETE, newid, 0), NULL}, + {LFS_MKTAG(LFS_TYPE_CREATE, newid, 0), NULL}, + {LFS_MKTAG(lfs_tag_type3(oldtag), newid, strlen(newpath)), newpath}, + {LFS_MKTAG(LFS_FROM_MOVE, newid, lfs_tag_id(oldtag)), &oldcwd}, + {LFS_MKTAG_IF(samepair, + LFS_TYPE_DELETE, newoldid, 0), NULL})); + if (err) { + lfs->mlist = prevdir.next; + return err; + } + + // let commit clean up after move (if we're different! otherwise move + // logic already fixed it for us) + if (!samepair && lfs_gstate_hasmove(&lfs->gstate)) { + // prep gstate and delete move id + lfs_fs_prepmove(lfs, 0x3ff, NULL); + err = lfs_dir_commit(lfs, &oldcwd, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(oldtag), 0), NULL})); + if (err) { + lfs->mlist = prevdir.next; + return err; + } + } + + lfs->mlist = prevdir.next; + if (prevtag != LFS_ERR_NOENT + && lfs_tag_type3(prevtag) == LFS_TYPE_DIR) { + // fix orphan + err = lfs_fs_preporphans(lfs, -1); + if (err) { + return err; + } + + err = lfs_fs_pred(lfs, prevdir.m.pair, &newcwd); + if (err) { + return err; + } + + err = lfs_dir_drop(lfs, &newcwd, &prevdir.m); + if (err) { + return err; + } + } + + return 0; +} +#endif + +static lfs_ssize_t lfs_rawgetattr(lfs_t *lfs, const char *path, + uint8_t type, void *buffer, lfs_size_t size) { + lfs_mdir_t cwd; + lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL); + if (tag < 0) { + return tag; + } + + uint16_t id = lfs_tag_id(tag); + if (id == 0x3ff) { + // special case for root + id = 0; + int err = lfs_dir_fetch(lfs, &cwd, lfs->root); + if (err) { + return err; + } + } + + tag = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x7ff, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_USERATTR + type, + id, lfs_min(size, lfs->attr_max)), + buffer); + if (tag < 0) { + if (tag == LFS_ERR_NOENT) { + return LFS_ERR_NOATTR; + } + + return tag; + } + + return lfs_tag_size(tag); +} + +#ifndef LFS_READONLY +static int lfs_commitattr(lfs_t *lfs, const char *path, + uint8_t type, const void *buffer, lfs_size_t size) { + lfs_mdir_t cwd; + lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL); + if (tag < 0) { + return tag; + } + + uint16_t id = lfs_tag_id(tag); + if (id == 0x3ff) { + // special case for root + id = 0; + int err = lfs_dir_fetch(lfs, &cwd, lfs->root); + if (err) { + return err; + } + } + + return lfs_dir_commit(lfs, &cwd, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_USERATTR + type, id, size), buffer})); +} +#endif + +#ifndef LFS_READONLY +static int lfs_rawsetattr(lfs_t *lfs, const char *path, + uint8_t type, const void *buffer, lfs_size_t size) { + if (size > lfs->attr_max) { + return LFS_ERR_NOSPC; + } + + return lfs_commitattr(lfs, path, type, buffer, size); +} +#endif + +#ifndef LFS_READONLY +static int lfs_rawremoveattr(lfs_t *lfs, const char *path, uint8_t type) { + return lfs_commitattr(lfs, path, type, NULL, 0x3ff); +} +#endif + + +/// Filesystem operations /// +static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) { + lfs->cfg = cfg; + int err = 0; + + // validate that the lfs-cfg sizes were initiated properly before + // performing any arithmetic logics with them + LFS_ASSERT(lfs->cfg->read_size != 0); + LFS_ASSERT(lfs->cfg->prog_size != 0); + LFS_ASSERT(lfs->cfg->cache_size != 0); + + // check that block size is a multiple of cache size is a multiple + // of prog and read sizes + LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->read_size == 0); + LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->prog_size == 0); + LFS_ASSERT(lfs->cfg->block_size % lfs->cfg->cache_size == 0); + + // check that the block size is large enough to fit ctz pointers + LFS_ASSERT(4*lfs_npw2(0xffffffff / (lfs->cfg->block_size-2*4)) + <= lfs->cfg->block_size); + + // block_cycles = 0 is no longer supported. + // + // block_cycles is the number of erase cycles before littlefs evicts + // metadata logs as a part of wear leveling. Suggested values are in the + // range of 100-1000, or set block_cycles to -1 to disable block-level + // wear-leveling. + LFS_ASSERT(lfs->cfg->block_cycles != 0); + + + // setup read cache + if (lfs->cfg->read_buffer) { + lfs->rcache.buffer = lfs->cfg->read_buffer; + } else { + lfs->rcache.buffer = lfs_malloc(lfs->cfg->cache_size); + if (!lfs->rcache.buffer) { + err = LFS_ERR_NOMEM; + goto cleanup; + } + } + + // setup program cache + if (lfs->cfg->prog_buffer) { + lfs->pcache.buffer = lfs->cfg->prog_buffer; + } else { + lfs->pcache.buffer = lfs_malloc(lfs->cfg->cache_size); + if (!lfs->pcache.buffer) { + err = LFS_ERR_NOMEM; + goto cleanup; + } + } + + // zero to avoid information leaks + lfs_cache_zero(lfs, &lfs->rcache); + lfs_cache_zero(lfs, &lfs->pcache); + + // setup lookahead, must be multiple of 64-bits, 32-bit aligned + LFS_ASSERT(lfs->cfg->lookahead_size > 0); + LFS_ASSERT(lfs->cfg->lookahead_size % 8 == 0 && + (uintptr_t)lfs->cfg->lookahead_buffer % 4 == 0); + if (lfs->cfg->lookahead_buffer) { + lfs->free.buffer = lfs->cfg->lookahead_buffer; + } else { + lfs->free.buffer = lfs_malloc(lfs->cfg->lookahead_size); + if (!lfs->free.buffer) { + err = LFS_ERR_NOMEM; + goto cleanup; + } + } + + // check that the size limits are sane + LFS_ASSERT(lfs->cfg->name_max <= LFS_NAME_MAX); + lfs->name_max = lfs->cfg->name_max; + if (!lfs->name_max) { + lfs->name_max = LFS_NAME_MAX; + } + + LFS_ASSERT(lfs->cfg->file_max <= LFS_FILE_MAX); + lfs->file_max = lfs->cfg->file_max; + if (!lfs->file_max) { + lfs->file_max = LFS_FILE_MAX; + } + + LFS_ASSERT(lfs->cfg->attr_max <= LFS_ATTR_MAX); + lfs->attr_max = lfs->cfg->attr_max; + if (!lfs->attr_max) { + lfs->attr_max = LFS_ATTR_MAX; + } + + LFS_ASSERT(lfs->cfg->metadata_max <= lfs->cfg->block_size); + + // setup default state + lfs->root[0] = LFS_BLOCK_NULL; + lfs->root[1] = LFS_BLOCK_NULL; + lfs->mlist = NULL; + lfs->seed = 0; + lfs->gdisk = (lfs_gstate_t){0}; + lfs->gstate = (lfs_gstate_t){0}; + lfs->gdelta = (lfs_gstate_t){0}; +#ifdef LFS_MIGRATE + lfs->lfs1 = NULL; +#endif + + return 0; + +cleanup: + lfs_deinit(lfs); + return err; +} + +static int lfs_deinit(lfs_t *lfs) { + // free allocated memory + if (!lfs->cfg->read_buffer) { + lfs_free(lfs->rcache.buffer); + } + + if (!lfs->cfg->prog_buffer) { + lfs_free(lfs->pcache.buffer); + } + + if (!lfs->cfg->lookahead_buffer) { + lfs_free(lfs->free.buffer); + } + + return 0; +} + +#ifndef LFS_READONLY +static int lfs_rawformat(lfs_t *lfs, const struct lfs_config *cfg) { + int err = 0; + { + err = lfs_init(lfs, cfg); + if (err) { + return err; + } + + // create free lookahead + memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size); + lfs->free.off = 0; + lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size, + lfs->cfg->block_count); + lfs->free.i = 0; + lfs_alloc_ack(lfs); + + // create root dir + lfs_mdir_t root; + err = lfs_dir_alloc(lfs, &root); + if (err) { + goto cleanup; + } + + // write one superblock + lfs_superblock_t superblock = { + .version = LFS_DISK_VERSION, + .block_size = lfs->cfg->block_size, + .block_count = lfs->cfg->block_count, + .name_max = lfs->name_max, + .file_max = lfs->file_max, + .attr_max = lfs->attr_max, + }; + + lfs_superblock_tole32(&superblock); + err = lfs_dir_commit(lfs, &root, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL}, + {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"}, + {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)), + &superblock})); + if (err) { + goto cleanup; + } + + // force compaction to prevent accidentally mounting any + // older version of littlefs that may live on disk + root.erased = false; + err = lfs_dir_commit(lfs, &root, NULL, 0); + if (err) { + goto cleanup; + } + + // sanity check that fetch works + err = lfs_dir_fetch(lfs, &root, (const lfs_block_t[2]){0, 1}); + if (err) { + goto cleanup; + } + } + +cleanup: + lfs_deinit(lfs); + return err; + +} +#endif + +static int lfs_rawmount(lfs_t *lfs, const struct lfs_config *cfg) { + int err = lfs_init(lfs, cfg); + if (err) { + return err; + } + + // scan directory blocks for superblock and any global updates + lfs_mdir_t dir = {.tail = {0, 1}}; + lfs_block_t cycle = 0; + while (!lfs_pair_isnull(dir.tail)) { + if (cycle >= lfs->cfg->block_count/2) { + // loop detected + err = LFS_ERR_CORRUPT; + goto cleanup; + } + cycle += 1; + + // fetch next block in tail list + lfs_stag_t tag = lfs_dir_fetchmatch(lfs, &dir, dir.tail, + LFS_MKTAG(0x7ff, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), + NULL, + lfs_dir_find_match, &(struct lfs_dir_find_match){ + lfs, "littlefs", 8}); + if (tag < 0) { + err = tag; + goto cleanup; + } + + // has superblock? + if (tag && !lfs_tag_isdelete(tag)) { + // update root + lfs->root[0] = dir.pair[0]; + lfs->root[1] = dir.pair[1]; + + // grab superblock + lfs_superblock_t superblock; + tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x7ff, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)), + &superblock); + if (tag < 0) { + err = tag; + goto cleanup; + } + lfs_superblock_fromle32(&superblock); + + // check version + uint16_t major_version = (0xffff & (superblock.version >> 16)); + uint16_t minor_version = (0xffff & (superblock.version >> 0)); + if ((major_version != LFS_DISK_VERSION_MAJOR || + minor_version > LFS_DISK_VERSION_MINOR)) { + LFS_ERROR("Invalid version v%"PRIu16".%"PRIu16, + major_version, minor_version); + err = LFS_ERR_INVAL; + goto cleanup; + } + + // check superblock configuration + if (superblock.name_max) { + if (superblock.name_max > lfs->name_max) { + LFS_ERROR("Unsupported name_max (%"PRIu32" > %"PRIu32")", + superblock.name_max, lfs->name_max); + err = LFS_ERR_INVAL; + goto cleanup; + } + + lfs->name_max = superblock.name_max; + } + + if (superblock.file_max) { + if (superblock.file_max > lfs->file_max) { + LFS_ERROR("Unsupported file_max (%"PRIu32" > %"PRIu32")", + superblock.file_max, lfs->file_max); + err = LFS_ERR_INVAL; + goto cleanup; + } + + lfs->file_max = superblock.file_max; + } + + if (superblock.attr_max) { + if (superblock.attr_max > lfs->attr_max) { + LFS_ERROR("Unsupported attr_max (%"PRIu32" > %"PRIu32")", + superblock.attr_max, lfs->attr_max); + err = LFS_ERR_INVAL; + goto cleanup; + } + + lfs->attr_max = superblock.attr_max; + } + + if (superblock.block_count != lfs->cfg->block_count) { + LFS_ERROR("Invalid block count (%"PRIu32" != %"PRIu32")", + superblock.block_count, lfs->cfg->block_count); + err = LFS_ERR_INVAL; + goto cleanup; + } + + if (superblock.block_size != lfs->cfg->block_size) { + LFS_ERROR("Invalid block size (%"PRIu32" != %"PRIu32")", + superblock.block_count, lfs->cfg->block_count); + err = LFS_ERR_INVAL; + goto cleanup; + } + } + + // has gstate? + err = lfs_dir_getgstate(lfs, &dir, &lfs->gstate); + if (err) { + goto cleanup; + } + } + + // found superblock? + if (lfs_pair_isnull(lfs->root)) { + err = LFS_ERR_INVAL; + goto cleanup; + } + + // update littlefs with gstate + if (!lfs_gstate_iszero(&lfs->gstate)) { + LFS_DEBUG("Found pending gstate 0x%08"PRIx32"%08"PRIx32"%08"PRIx32, + lfs->gstate.tag, + lfs->gstate.pair[0], + lfs->gstate.pair[1]); + } + lfs->gstate.tag += !lfs_tag_isvalid(lfs->gstate.tag); + lfs->gdisk = lfs->gstate; + + // setup free lookahead, to distribute allocations uniformly across + // boots, we start the allocator at a random location + lfs->free.off = lfs->seed % lfs->cfg->block_count; + lfs_alloc_drop(lfs); + + return 0; + +cleanup: + lfs_rawunmount(lfs); + return err; +} + +static int lfs_rawunmount(lfs_t *lfs) { + return lfs_deinit(lfs); +} + + +/// Filesystem filesystem operations /// +int lfs_fs_rawtraverse(lfs_t *lfs, + int (*cb)(void *data, lfs_block_t block), void *data, + bool includeorphans) { + // iterate over metadata pairs + lfs_mdir_t dir = {.tail = {0, 1}}; + +#ifdef LFS_MIGRATE + // also consider v1 blocks during migration + if (lfs->lfs1) { + int err = lfs1_traverse(lfs, cb, data); + if (err) { + return err; + } + + dir.tail[0] = lfs->root[0]; + dir.tail[1] = lfs->root[1]; + } +#endif + + lfs_block_t cycle = 0; + while (!lfs_pair_isnull(dir.tail)) { + if (cycle >= lfs->cfg->block_count/2) { + // loop detected + return LFS_ERR_CORRUPT; + } + cycle += 1; + + for (int i = 0; i < 2; i++) { + int err = cb(data, dir.tail[i]); + if (err) { + return err; + } + } + + // iterate through ids in directory + int err = lfs_dir_fetch(lfs, &dir, dir.tail); + if (err) { + return err; + } + + for (uint16_t id = 0; id < dir.count; id++) { + struct lfs_ctz ctz; + lfs_stag_t tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x700, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz); + if (tag < 0) { + if (tag == LFS_ERR_NOENT) { + continue; + } + return tag; + } + lfs_ctz_fromle32(&ctz); + + if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) { + err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache, + ctz.head, ctz.size, cb, data); + if (err) { + return err; + } + } else if (includeorphans && + lfs_tag_type3(tag) == LFS_TYPE_DIRSTRUCT) { + for (int i = 0; i < 2; i++) { + err = cb(data, (&ctz.head)[i]); + if (err) { + return err; + } + } + } + } + } + +#ifndef LFS_READONLY + // iterate over any open files + for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) { + if (f->type != LFS_TYPE_REG) { + continue; + } + + if ((f->flags & LFS_F_DIRTY) && !(f->flags & LFS_F_INLINE)) { + int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache, + f->ctz.head, f->ctz.size, cb, data); + if (err) { + return err; + } + } + + if ((f->flags & LFS_F_WRITING) && !(f->flags & LFS_F_INLINE)) { + int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache, + f->block, f->pos, cb, data); + if (err) { + return err; + } + } + } +#endif + + return 0; +} + +#ifndef LFS_READONLY +static int lfs_fs_pred(lfs_t *lfs, + const lfs_block_t pair[2], lfs_mdir_t *pdir) { + // iterate over all directory directory entries + pdir->tail[0] = 0; + pdir->tail[1] = 1; + lfs_block_t cycle = 0; + while (!lfs_pair_isnull(pdir->tail)) { + if (cycle >= lfs->cfg->block_count/2) { + // loop detected + return LFS_ERR_CORRUPT; + } + cycle += 1; + + if (lfs_pair_cmp(pdir->tail, pair) == 0) { + return 0; + } + + int err = lfs_dir_fetch(lfs, pdir, pdir->tail); + if (err) { + return err; + } + } + + return LFS_ERR_NOENT; +} +#endif + +#ifndef LFS_READONLY +struct lfs_fs_parent_match { + lfs_t *lfs; + const lfs_block_t pair[2]; +}; +#endif + +#ifndef LFS_READONLY +static int lfs_fs_parent_match(void *data, + lfs_tag_t tag, const void *buffer) { + struct lfs_fs_parent_match *find = data; + lfs_t *lfs = find->lfs; + const struct lfs_diskoff *disk = buffer; + (void)tag; + + lfs_block_t child[2]; + int err = lfs_bd_read(lfs, + &lfs->pcache, &lfs->rcache, lfs->cfg->block_size, + disk->block, disk->off, &child, sizeof(child)); + if (err) { + return err; + } + + lfs_pair_fromle32(child); + return (lfs_pair_cmp(child, find->pair) == 0) ? LFS_CMP_EQ : LFS_CMP_LT; +} +#endif + +#ifndef LFS_READONLY +static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t pair[2], + lfs_mdir_t *parent) { + // use fetchmatch with callback to find pairs + parent->tail[0] = 0; + parent->tail[1] = 1; + lfs_block_t cycle = 0; + while (!lfs_pair_isnull(parent->tail)) { + if (cycle >= lfs->cfg->block_count/2) { + // loop detected + return LFS_ERR_CORRUPT; + } + cycle += 1; + + lfs_stag_t tag = lfs_dir_fetchmatch(lfs, parent, parent->tail, + LFS_MKTAG(0x7ff, 0, 0x3ff), + LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 0, 8), + NULL, + lfs_fs_parent_match, &(struct lfs_fs_parent_match){ + lfs, {pair[0], pair[1]}}); + if (tag && tag != LFS_ERR_NOENT) { + return tag; + } + } + + return LFS_ERR_NOENT; +} +#endif + +#ifndef LFS_READONLY +static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans) { + LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) > 0 || orphans >= 0); + lfs->gstate.tag += orphans; + lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x800, 0, 0)) | + ((uint32_t)lfs_gstate_hasorphans(&lfs->gstate) << 31)); + + return 0; +} +#endif + +#ifndef LFS_READONLY +static void lfs_fs_prepmove(lfs_t *lfs, + uint16_t id, const lfs_block_t pair[2]) { + lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x7ff, 0x3ff, 0)) | + ((id != 0x3ff) ? LFS_MKTAG(LFS_TYPE_DELETE, id, 0) : 0)); + lfs->gstate.pair[0] = (id != 0x3ff) ? pair[0] : 0; + lfs->gstate.pair[1] = (id != 0x3ff) ? pair[1] : 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_fs_demove(lfs_t *lfs) { + if (!lfs_gstate_hasmove(&lfs->gdisk)) { + return 0; + } + + // Fix bad moves + LFS_DEBUG("Fixing move {0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16, + lfs->gdisk.pair[0], + lfs->gdisk.pair[1], + lfs_tag_id(lfs->gdisk.tag)); + + // fetch and delete the moved entry + lfs_mdir_t movedir; + int err = lfs_dir_fetch(lfs, &movedir, lfs->gdisk.pair); + if (err) { + return err; + } + + // prep gstate and delete move id + uint16_t moveid = lfs_tag_id(lfs->gdisk.tag); + lfs_fs_prepmove(lfs, 0x3ff, NULL); + err = lfs_dir_commit(lfs, &movedir, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_DELETE, moveid, 0), NULL})); + if (err) { + return err; + } + + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss) { + if (!lfs_gstate_hasorphans(&lfs->gstate)) { + return 0; + } + + int8_t found = 0; +restart: + { + // Fix any orphans + lfs_mdir_t pdir = {.split = true, .tail = {0, 1}}; + lfs_mdir_t dir; + + // iterate over all directory directory entries + while (!lfs_pair_isnull(pdir.tail)) { + int err = lfs_dir_fetch(lfs, &dir, pdir.tail); + if (err) { + return err; + } + + // check head blocks for orphans + if (!pdir.split) { + // check if we have a parent + lfs_mdir_t parent; + lfs_stag_t tag = lfs_fs_parent(lfs, pdir.tail, &parent); + if (tag < 0 && tag != LFS_ERR_NOENT) { + return tag; + } + + // note we only check for full orphans if we may have had a + // power-loss, otherwise orphans are created intentionally + // during operations such as lfs_mkdir + if (tag == LFS_ERR_NOENT && powerloss) { + // we are an orphan + LFS_DEBUG("Fixing orphan {0x%"PRIx32", 0x%"PRIx32"}", + pdir.tail[0], pdir.tail[1]); + + // steal state + err = lfs_dir_getgstate(lfs, &dir, &lfs->gdelta); + if (err) { + return err; + } + + // steal tail + lfs_pair_tole32(dir.tail); + int state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_TAIL + dir.split, 0x3ff, 8), + dir.tail})); + lfs_pair_fromle32(dir.tail); + if (state < 0) { + return state; + } + + found += 1; + + // did our commit create more orphans? + if (state == LFS_OK_ORPHANED) { + goto restart; + } + + // refetch tail + continue; + } + + if (tag != LFS_ERR_NOENT) { + lfs_block_t pair[2]; + lfs_stag_t state = lfs_dir_get(lfs, &parent, + LFS_MKTAG(0x7ff, 0x3ff, 0), tag, pair); + if (state < 0) { + return state; + } + lfs_pair_fromle32(pair); + + if (!lfs_pair_sync(pair, pdir.tail)) { + // we have desynced + LFS_DEBUG("Fixing half-orphan " + "{0x%"PRIx32", 0x%"PRIx32"} " + "-> {0x%"PRIx32", 0x%"PRIx32"}", + pdir.tail[0], pdir.tail[1], pair[0], pair[1]); + + // fix pending move in this pair? this looks like an + // optimization but is in fact _required_ since + // relocating may outdate the move. + uint16_t moveid = 0x3ff; + if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) { + moveid = lfs_tag_id(lfs->gstate.tag); + LFS_DEBUG("Fixing move while fixing orphans " + "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n", + pdir.pair[0], pdir.pair[1], moveid); + lfs_fs_prepmove(lfs, 0x3ff, NULL); + } + + lfs_pair_tole32(pair); + state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS( + {LFS_MKTAG_IF(moveid != 0x3ff, + LFS_TYPE_DELETE, moveid, 0), NULL}, + {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), + pair})); + lfs_pair_fromle32(pair); + if (state < 0) { + return state; + } + + found += 1; + + // did our commit create more orphans? + if (state == LFS_OK_ORPHANED) { + goto restart; + } + + // refetch tail + continue; + } + } + } + + pdir = dir; + } + } + + // mark orphans as fixed + return lfs_fs_preporphans(lfs, -lfs_min( + lfs_gstate_getorphans(&lfs->gstate), + found)); +} +#endif + +#ifndef LFS_READONLY +static int lfs_fs_forceconsistency(lfs_t *lfs) { + int err = lfs_fs_demove(lfs); + if (err) { + return err; + } + + err = lfs_fs_deorphan(lfs, true); + if (err) { + return err; + } + + return 0; +} +#endif + +static int lfs_fs_size_count(void *p, lfs_block_t block) { + (void)block; + lfs_size_t *size = p; + *size += 1; + return 0; +} + +static lfs_ssize_t lfs_fs_rawsize(lfs_t *lfs) { + lfs_size_t size = 0; + int err = lfs_fs_rawtraverse(lfs, lfs_fs_size_count, &size, false); + if (err) { + return err; + } + + return size; +} + +#ifdef LFS_MIGRATE +////// Migration from littelfs v1 below this ////// + +/// Version info /// + +// Software library version +// Major (top-nibble), incremented on backwards incompatible changes +// Minor (bottom-nibble), incremented on feature additions +#define LFS1_VERSION 0x00010007 +#define LFS1_VERSION_MAJOR (0xffff & (LFS1_VERSION >> 16)) +#define LFS1_VERSION_MINOR (0xffff & (LFS1_VERSION >> 0)) + +// Version of On-disk data structures +// Major (top-nibble), incremented on backwards incompatible changes +// Minor (bottom-nibble), incremented on feature additions +#define LFS1_DISK_VERSION 0x00010001 +#define LFS1_DISK_VERSION_MAJOR (0xffff & (LFS1_DISK_VERSION >> 16)) +#define LFS1_DISK_VERSION_MINOR (0xffff & (LFS1_DISK_VERSION >> 0)) + + +/// v1 Definitions /// + +// File types +enum lfs1_type { + LFS1_TYPE_REG = 0x11, + LFS1_TYPE_DIR = 0x22, + LFS1_TYPE_SUPERBLOCK = 0x2e, +}; + +typedef struct lfs1 { + lfs_block_t root[2]; +} lfs1_t; + +typedef struct lfs1_entry { + lfs_off_t off; + + struct lfs1_disk_entry { + uint8_t type; + uint8_t elen; + uint8_t alen; + uint8_t nlen; + union { + struct { + lfs_block_t head; + lfs_size_t size; + } file; + lfs_block_t dir[2]; + } u; + } d; +} lfs1_entry_t; + +typedef struct lfs1_dir { + struct lfs1_dir *next; + lfs_block_t pair[2]; + lfs_off_t off; + + lfs_block_t head[2]; + lfs_off_t pos; + + struct lfs1_disk_dir { + uint32_t rev; + lfs_size_t size; + lfs_block_t tail[2]; + } d; +} lfs1_dir_t; + +typedef struct lfs1_superblock { + lfs_off_t off; + + struct lfs1_disk_superblock { + uint8_t type; + uint8_t elen; + uint8_t alen; + uint8_t nlen; + lfs_block_t root[2]; + uint32_t block_size; + uint32_t block_count; + uint32_t version; + char magic[8]; + } d; +} lfs1_superblock_t; + + +/// Low-level wrappers v1->v2 /// +static void lfs1_crc(uint32_t *crc, const void *buffer, size_t size) { + *crc = lfs_crc(*crc, buffer, size); +} + +static int lfs1_bd_read(lfs_t *lfs, lfs_block_t block, + lfs_off_t off, void *buffer, lfs_size_t size) { + // if we ever do more than writes to alternating pairs, + // this may need to consider pcache + return lfs_bd_read(lfs, &lfs->pcache, &lfs->rcache, size, + block, off, buffer, size); +} + +static int lfs1_bd_crc(lfs_t *lfs, lfs_block_t block, + lfs_off_t off, lfs_size_t size, uint32_t *crc) { + for (lfs_off_t i = 0; i < size; i++) { + uint8_t c; + int err = lfs1_bd_read(lfs, block, off+i, &c, 1); + if (err) { + return err; + } + + lfs1_crc(crc, &c, 1); + } + + return 0; +} + + +/// Endian swapping functions /// +static void lfs1_dir_fromle32(struct lfs1_disk_dir *d) { + d->rev = lfs_fromle32(d->rev); + d->size = lfs_fromle32(d->size); + d->tail[0] = lfs_fromle32(d->tail[0]); + d->tail[1] = lfs_fromle32(d->tail[1]); +} + +static void lfs1_dir_tole32(struct lfs1_disk_dir *d) { + d->rev = lfs_tole32(d->rev); + d->size = lfs_tole32(d->size); + d->tail[0] = lfs_tole32(d->tail[0]); + d->tail[1] = lfs_tole32(d->tail[1]); +} + +static void lfs1_entry_fromle32(struct lfs1_disk_entry *d) { + d->u.dir[0] = lfs_fromle32(d->u.dir[0]); + d->u.dir[1] = lfs_fromle32(d->u.dir[1]); +} + +static void lfs1_entry_tole32(struct lfs1_disk_entry *d) { + d->u.dir[0] = lfs_tole32(d->u.dir[0]); + d->u.dir[1] = lfs_tole32(d->u.dir[1]); +} + +static void lfs1_superblock_fromle32(struct lfs1_disk_superblock *d) { + d->root[0] = lfs_fromle32(d->root[0]); + d->root[1] = lfs_fromle32(d->root[1]); + d->block_size = lfs_fromle32(d->block_size); + d->block_count = lfs_fromle32(d->block_count); + d->version = lfs_fromle32(d->version); +} + + +///// Metadata pair and directory operations /// +static inline lfs_size_t lfs1_entry_size(const lfs1_entry_t *entry) { + return 4 + entry->d.elen + entry->d.alen + entry->d.nlen; +} + +static int lfs1_dir_fetch(lfs_t *lfs, + lfs1_dir_t *dir, const lfs_block_t pair[2]) { + // copy out pair, otherwise may be aliasing dir + const lfs_block_t tpair[2] = {pair[0], pair[1]}; + bool valid = false; + + // check both blocks for the most recent revision + for (int i = 0; i < 2; i++) { + struct lfs1_disk_dir test; + int err = lfs1_bd_read(lfs, tpair[i], 0, &test, sizeof(test)); + lfs1_dir_fromle32(&test); + if (err) { + if (err == LFS_ERR_CORRUPT) { + continue; + } + return err; + } + + if (valid && lfs_scmp(test.rev, dir->d.rev) < 0) { + continue; + } + + if ((0x7fffffff & test.size) < sizeof(test)+4 || + (0x7fffffff & test.size) > lfs->cfg->block_size) { + continue; + } + + uint32_t crc = 0xffffffff; + lfs1_dir_tole32(&test); + lfs1_crc(&crc, &test, sizeof(test)); + lfs1_dir_fromle32(&test); + err = lfs1_bd_crc(lfs, tpair[i], sizeof(test), + (0x7fffffff & test.size) - sizeof(test), &crc); + if (err) { + if (err == LFS_ERR_CORRUPT) { + continue; + } + return err; + } + + if (crc != 0) { + continue; + } + + valid = true; + + // setup dir in case it's valid + dir->pair[0] = tpair[(i+0) % 2]; + dir->pair[1] = tpair[(i+1) % 2]; + dir->off = sizeof(dir->d); + dir->d = test; + } + + if (!valid) { + LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}", + tpair[0], tpair[1]); + return LFS_ERR_CORRUPT; + } + + return 0; +} + +static int lfs1_dir_next(lfs_t *lfs, lfs1_dir_t *dir, lfs1_entry_t *entry) { + while (dir->off + sizeof(entry->d) > (0x7fffffff & dir->d.size)-4) { + if (!(0x80000000 & dir->d.size)) { + entry->off = dir->off; + return LFS_ERR_NOENT; + } + + int err = lfs1_dir_fetch(lfs, dir, dir->d.tail); + if (err) { + return err; + } + + dir->off = sizeof(dir->d); + dir->pos += sizeof(dir->d) + 4; + } + + int err = lfs1_bd_read(lfs, dir->pair[0], dir->off, + &entry->d, sizeof(entry->d)); + lfs1_entry_fromle32(&entry->d); + if (err) { + return err; + } + + entry->off = dir->off; + dir->off += lfs1_entry_size(entry); + dir->pos += lfs1_entry_size(entry); + return 0; +} + +/// littlefs v1 specific operations /// +int lfs1_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data) { + if (lfs_pair_isnull(lfs->lfs1->root)) { + return 0; + } + + // iterate over metadata pairs + lfs1_dir_t dir; + lfs1_entry_t entry; + lfs_block_t cwd[2] = {0, 1}; + + while (true) { + for (int i = 0; i < 2; i++) { + int err = cb(data, cwd[i]); + if (err) { + return err; + } + } + + int err = lfs1_dir_fetch(lfs, &dir, cwd); + if (err) { + return err; + } + + // iterate over contents + while (dir.off + sizeof(entry.d) <= (0x7fffffff & dir.d.size)-4) { + err = lfs1_bd_read(lfs, dir.pair[0], dir.off, + &entry.d, sizeof(entry.d)); + lfs1_entry_fromle32(&entry.d); + if (err) { + return err; + } + + dir.off += lfs1_entry_size(&entry); + if ((0x70 & entry.d.type) == (0x70 & LFS1_TYPE_REG)) { + err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache, + entry.d.u.file.head, entry.d.u.file.size, cb, data); + if (err) { + return err; + } + } + } + + // we also need to check if we contain a threaded v2 directory + lfs_mdir_t dir2 = {.split=true, .tail={cwd[0], cwd[1]}}; + while (dir2.split) { + err = lfs_dir_fetch(lfs, &dir2, dir2.tail); + if (err) { + break; + } + + for (int i = 0; i < 2; i++) { + err = cb(data, dir2.pair[i]); + if (err) { + return err; + } + } + } + + cwd[0] = dir.d.tail[0]; + cwd[1] = dir.d.tail[1]; + + if (lfs_pair_isnull(cwd)) { + break; + } + } + + return 0; +} + +static int lfs1_moved(lfs_t *lfs, const void *e) { + if (lfs_pair_isnull(lfs->lfs1->root)) { + return 0; + } + + // skip superblock + lfs1_dir_t cwd; + int err = lfs1_dir_fetch(lfs, &cwd, (const lfs_block_t[2]){0, 1}); + if (err) { + return err; + } + + // iterate over all directory directory entries + lfs1_entry_t entry; + while (!lfs_pair_isnull(cwd.d.tail)) { + err = lfs1_dir_fetch(lfs, &cwd, cwd.d.tail); + if (err) { + return err; + } + + while (true) { + err = lfs1_dir_next(lfs, &cwd, &entry); + if (err && err != LFS_ERR_NOENT) { + return err; + } + + if (err == LFS_ERR_NOENT) { + break; + } + + if (!(0x80 & entry.d.type) && + memcmp(&entry.d.u, e, sizeof(entry.d.u)) == 0) { + return true; + } + } + } + + return false; +} + +/// Filesystem operations /// +static int lfs1_mount(lfs_t *lfs, struct lfs1 *lfs1, + const struct lfs_config *cfg) { + int err = 0; + { + err = lfs_init(lfs, cfg); + if (err) { + return err; + } + + lfs->lfs1 = lfs1; + lfs->lfs1->root[0] = LFS_BLOCK_NULL; + lfs->lfs1->root[1] = LFS_BLOCK_NULL; + + // setup free lookahead + lfs->free.off = 0; + lfs->free.size = 0; + lfs->free.i = 0; + lfs_alloc_ack(lfs); + + // load superblock + lfs1_dir_t dir; + lfs1_superblock_t superblock; + err = lfs1_dir_fetch(lfs, &dir, (const lfs_block_t[2]){0, 1}); + if (err && err != LFS_ERR_CORRUPT) { + goto cleanup; + } + + if (!err) { + err = lfs1_bd_read(lfs, dir.pair[0], sizeof(dir.d), + &superblock.d, sizeof(superblock.d)); + lfs1_superblock_fromle32(&superblock.d); + if (err) { + goto cleanup; + } + + lfs->lfs1->root[0] = superblock.d.root[0]; + lfs->lfs1->root[1] = superblock.d.root[1]; + } + + if (err || memcmp(superblock.d.magic, "littlefs", 8) != 0) { + LFS_ERROR("Invalid superblock at {0x%"PRIx32", 0x%"PRIx32"}", + 0, 1); + err = LFS_ERR_CORRUPT; + goto cleanup; + } + + uint16_t major_version = (0xffff & (superblock.d.version >> 16)); + uint16_t minor_version = (0xffff & (superblock.d.version >> 0)); + if ((major_version != LFS1_DISK_VERSION_MAJOR || + minor_version > LFS1_DISK_VERSION_MINOR)) { + LFS_ERROR("Invalid version v%d.%d", major_version, minor_version); + err = LFS_ERR_INVAL; + goto cleanup; + } + + return 0; + } + +cleanup: + lfs_deinit(lfs); + return err; +} + +static int lfs1_unmount(lfs_t *lfs) { + return lfs_deinit(lfs); +} + +/// v1 migration /// +static int lfs_rawmigrate(lfs_t *lfs, const struct lfs_config *cfg) { + struct lfs1 lfs1; + int err = lfs1_mount(lfs, &lfs1, cfg); + if (err) { + return err; + } + + { + // iterate through each directory, copying over entries + // into new directory + lfs1_dir_t dir1; + lfs_mdir_t dir2; + dir1.d.tail[0] = lfs->lfs1->root[0]; + dir1.d.tail[1] = lfs->lfs1->root[1]; + while (!lfs_pair_isnull(dir1.d.tail)) { + // iterate old dir + err = lfs1_dir_fetch(lfs, &dir1, dir1.d.tail); + if (err) { + goto cleanup; + } + + // create new dir and bind as temporary pretend root + err = lfs_dir_alloc(lfs, &dir2); + if (err) { + goto cleanup; + } + + dir2.rev = dir1.d.rev; + dir1.head[0] = dir1.pair[0]; + dir1.head[1] = dir1.pair[1]; + lfs->root[0] = dir2.pair[0]; + lfs->root[1] = dir2.pair[1]; + + err = lfs_dir_commit(lfs, &dir2, NULL, 0); + if (err) { + goto cleanup; + } + + while (true) { + lfs1_entry_t entry1; + err = lfs1_dir_next(lfs, &dir1, &entry1); + if (err && err != LFS_ERR_NOENT) { + goto cleanup; + } + + if (err == LFS_ERR_NOENT) { + break; + } + + // check that entry has not been moved + if (entry1.d.type & 0x80) { + int moved = lfs1_moved(lfs, &entry1.d.u); + if (moved < 0) { + err = moved; + goto cleanup; + } + + if (moved) { + continue; + } + + entry1.d.type &= ~0x80; + } + + // also fetch name + char name[LFS_NAME_MAX+1]; + memset(name, 0, sizeof(name)); + err = lfs1_bd_read(lfs, dir1.pair[0], + entry1.off + 4+entry1.d.elen+entry1.d.alen, + name, entry1.d.nlen); + if (err) { + goto cleanup; + } + + bool isdir = (entry1.d.type == LFS1_TYPE_DIR); + + // create entry in new dir + err = lfs_dir_fetch(lfs, &dir2, lfs->root); + if (err) { + goto cleanup; + } + + uint16_t id; + err = lfs_dir_find(lfs, &dir2, &(const char*){name}, &id); + if (!(err == LFS_ERR_NOENT && id != 0x3ff)) { + err = (err < 0) ? err : LFS_ERR_EXIST; + goto cleanup; + } + + lfs1_entry_tole32(&entry1.d); + err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL}, + {LFS_MKTAG_IF_ELSE(isdir, + LFS_TYPE_DIR, id, entry1.d.nlen, + LFS_TYPE_REG, id, entry1.d.nlen), + name}, + {LFS_MKTAG_IF_ELSE(isdir, + LFS_TYPE_DIRSTRUCT, id, sizeof(entry1.d.u), + LFS_TYPE_CTZSTRUCT, id, sizeof(entry1.d.u)), + &entry1.d.u})); + lfs1_entry_fromle32(&entry1.d); + if (err) { + goto cleanup; + } + } + + if (!lfs_pair_isnull(dir1.d.tail)) { + // find last block and update tail to thread into fs + err = lfs_dir_fetch(lfs, &dir2, lfs->root); + if (err) { + goto cleanup; + } + + while (dir2.split) { + err = lfs_dir_fetch(lfs, &dir2, dir2.tail); + if (err) { + goto cleanup; + } + } + + lfs_pair_tole32(dir2.pair); + err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir1.d.tail})); + lfs_pair_fromle32(dir2.pair); + if (err) { + goto cleanup; + } + } + + // Copy over first block to thread into fs. Unfortunately + // if this fails there is not much we can do. + LFS_DEBUG("Migrating {0x%"PRIx32", 0x%"PRIx32"} " + "-> {0x%"PRIx32", 0x%"PRIx32"}", + lfs->root[0], lfs->root[1], dir1.head[0], dir1.head[1]); + + err = lfs_bd_erase(lfs, dir1.head[1]); + if (err) { + goto cleanup; + } + + err = lfs_dir_fetch(lfs, &dir2, lfs->root); + if (err) { + goto cleanup; + } + + for (lfs_off_t i = 0; i < dir2.off; i++) { + uint8_t dat; + err = lfs_bd_read(lfs, + NULL, &lfs->rcache, dir2.off, + dir2.pair[0], i, &dat, 1); + if (err) { + goto cleanup; + } + + err = lfs_bd_prog(lfs, + &lfs->pcache, &lfs->rcache, true, + dir1.head[1], i, &dat, 1); + if (err) { + goto cleanup; + } + } + + err = lfs_bd_flush(lfs, &lfs->pcache, &lfs->rcache, true); + if (err) { + goto cleanup; + } + } + + // Create new superblock. This marks a successful migration! + err = lfs1_dir_fetch(lfs, &dir1, (const lfs_block_t[2]){0, 1}); + if (err) { + goto cleanup; + } + + dir2.pair[0] = dir1.pair[0]; + dir2.pair[1] = dir1.pair[1]; + dir2.rev = dir1.d.rev; + dir2.off = sizeof(dir2.rev); + dir2.etag = 0xffffffff; + dir2.count = 0; + dir2.tail[0] = lfs->lfs1->root[0]; + dir2.tail[1] = lfs->lfs1->root[1]; + dir2.erased = false; + dir2.split = true; + + lfs_superblock_t superblock = { + .version = LFS_DISK_VERSION, + .block_size = lfs->cfg->block_size, + .block_count = lfs->cfg->block_count, + .name_max = lfs->name_max, + .file_max = lfs->file_max, + .attr_max = lfs->attr_max, + }; + + lfs_superblock_tole32(&superblock); + err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL}, + {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"}, + {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)), + &superblock})); + if (err) { + goto cleanup; + } + + // sanity check that fetch works + err = lfs_dir_fetch(lfs, &dir2, (const lfs_block_t[2]){0, 1}); + if (err) { + goto cleanup; + } + + // force compaction to prevent accidentally mounting v1 + dir2.erased = false; + err = lfs_dir_commit(lfs, &dir2, NULL, 0); + if (err) { + goto cleanup; + } + } + +cleanup: + lfs1_unmount(lfs); + return err; +} + +#endif + + +/// Public API wrappers /// + +// Here we can add tracing/thread safety easily + +// Thread-safe wrappers if enabled +#ifdef LFS_THREADSAFE +#define LFS_LOCK(cfg) cfg->lock(cfg) +#define LFS_UNLOCK(cfg) cfg->unlock(cfg) +#else +#define LFS_LOCK(cfg) ((void)cfg, 0) +#define LFS_UNLOCK(cfg) ((void)cfg) +#endif + +// Public API +#ifndef LFS_READONLY +int lfs_format(lfs_t *lfs, const struct lfs_config *cfg) { + int err = LFS_LOCK(cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_format(%p, %p {.context=%p, " + ".read=%p, .prog=%p, .erase=%p, .sync=%p, " + ".read_size=%"PRIu32", .prog_size=%"PRIu32", " + ".block_size=%"PRIu32", .block_count=%"PRIu32", " + ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", " + ".lookahead_size=%"PRIu32", .read_buffer=%p, " + ".prog_buffer=%p, .lookahead_buffer=%p, " + ".name_max=%"PRIu32", .file_max=%"PRIu32", " + ".attr_max=%"PRIu32"})", + (void*)lfs, (void*)cfg, cfg->context, + (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog, + (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync, + cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count, + cfg->block_cycles, cfg->cache_size, cfg->lookahead_size, + cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer, + cfg->name_max, cfg->file_max, cfg->attr_max); + + err = lfs_rawformat(lfs, cfg); + + LFS_TRACE("lfs_format -> %d", err); + LFS_UNLOCK(cfg); + return err; +} +#endif + +int lfs_mount(lfs_t *lfs, const struct lfs_config *cfg) { + int err = LFS_LOCK(cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_mount(%p, %p {.context=%p, " + ".read=%p, .prog=%p, .erase=%p, .sync=%p, " + ".read_size=%"PRIu32", .prog_size=%"PRIu32", " + ".block_size=%"PRIu32", .block_count=%"PRIu32", " + ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", " + ".lookahead_size=%"PRIu32", .read_buffer=%p, " + ".prog_buffer=%p, .lookahead_buffer=%p, " + ".name_max=%"PRIu32", .file_max=%"PRIu32", " + ".attr_max=%"PRIu32"})", + (void*)lfs, (void*)cfg, cfg->context, + (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog, + (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync, + cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count, + cfg->block_cycles, cfg->cache_size, cfg->lookahead_size, + cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer, + cfg->name_max, cfg->file_max, cfg->attr_max); + + err = lfs_rawmount(lfs, cfg); + + LFS_TRACE("lfs_mount -> %d", err); + LFS_UNLOCK(cfg); + return err; +} + +int lfs_unmount(lfs_t *lfs) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_unmount(%p)", (void*)lfs); + + err = lfs_rawunmount(lfs); + + LFS_TRACE("lfs_unmount -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +#ifndef LFS_READONLY +int lfs_remove(lfs_t *lfs, const char *path) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_remove(%p, \"%s\")", (void*)lfs, path); + + err = lfs_rawremove(lfs, path); + + LFS_TRACE("lfs_remove -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} +#endif + +#ifndef LFS_READONLY +int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_rename(%p, \"%s\", \"%s\")", (void*)lfs, oldpath, newpath); + + err = lfs_rawrename(lfs, oldpath, newpath); + + LFS_TRACE("lfs_rename -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} +#endif + +int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_stat(%p, \"%s\", %p)", (void*)lfs, path, (void*)info); + + err = lfs_rawstat(lfs, path, info); + + LFS_TRACE("lfs_stat -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path, + uint8_t type, void *buffer, lfs_size_t size) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_getattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")", + (void*)lfs, path, type, buffer, size); + + lfs_ssize_t res = lfs_rawgetattr(lfs, path, type, buffer, size); + + LFS_TRACE("lfs_getattr -> %"PRId32, res); + LFS_UNLOCK(lfs->cfg); + return res; +} + +#ifndef LFS_READONLY +int lfs_setattr(lfs_t *lfs, const char *path, + uint8_t type, const void *buffer, lfs_size_t size) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_setattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")", + (void*)lfs, path, type, buffer, size); + + err = lfs_rawsetattr(lfs, path, type, buffer, size); + + LFS_TRACE("lfs_setattr -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} +#endif + +#ifndef LFS_READONLY +int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_removeattr(%p, \"%s\", %"PRIu8")", (void*)lfs, path, type); + + err = lfs_rawremoveattr(lfs, path, type); + + LFS_TRACE("lfs_removeattr -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} +#endif + +#ifndef LFS_NO_MALLOC +int lfs_file_open(lfs_t *lfs, lfs_file_t *file, const char *path, int flags) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_file_open(%p, %p, \"%s\", %x)", + (void*)lfs, (void*)file, path, flags); + LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file)); + + err = lfs_file_rawopen(lfs, file, path, flags); + + LFS_TRACE("lfs_file_open -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} +#endif + +int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file, + const char *path, int flags, + const struct lfs_file_config *cfg) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_file_opencfg(%p, %p, \"%s\", %x, %p {" + ".buffer=%p, .attrs=%p, .attr_count=%"PRIu32"})", + (void*)lfs, (void*)file, path, flags, + (void*)cfg, cfg->buffer, (void*)cfg->attrs, cfg->attr_count); + LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file)); + + err = lfs_file_rawopencfg(lfs, file, path, flags, cfg); + + LFS_TRACE("lfs_file_opencfg -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +int lfs_file_close(lfs_t *lfs, lfs_file_t *file) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_file_close(%p, %p)", (void*)lfs, (void*)file); + LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file)); + + err = lfs_file_rawclose(lfs, file); + + LFS_TRACE("lfs_file_close -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +#ifndef LFS_READONLY +int lfs_file_sync(lfs_t *lfs, lfs_file_t *file) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_file_sync(%p, %p)", (void*)lfs, (void*)file); + LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file)); + + err = lfs_file_rawsync(lfs, file); + + LFS_TRACE("lfs_file_sync -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} +#endif + +lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file, + void *buffer, lfs_size_t size) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_file_read(%p, %p, %p, %"PRIu32")", + (void*)lfs, (void*)file, buffer, size); + LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file)); + + lfs_ssize_t res = lfs_file_rawread(lfs, file, buffer, size); + + LFS_TRACE("lfs_file_read -> %"PRId32, res); + LFS_UNLOCK(lfs->cfg); + return res; +} + +#ifndef LFS_READONLY +lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file, + const void *buffer, lfs_size_t size) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_file_write(%p, %p, %p, %"PRIu32")", + (void*)lfs, (void*)file, buffer, size); + LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file)); + + lfs_ssize_t res = lfs_file_rawwrite(lfs, file, buffer, size); + + LFS_TRACE("lfs_file_write -> %"PRId32, res); + LFS_UNLOCK(lfs->cfg); + return res; +} +#endif + +lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file, + lfs_soff_t off, int whence) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_file_seek(%p, %p, %"PRId32", %d)", + (void*)lfs, (void*)file, off, whence); + LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file)); + + lfs_soff_t res = lfs_file_rawseek(lfs, file, off, whence); + + LFS_TRACE("lfs_file_seek -> %"PRId32, res); + LFS_UNLOCK(lfs->cfg); + return res; +} + +#ifndef LFS_READONLY +int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_file_truncate(%p, %p, %"PRIu32")", + (void*)lfs, (void*)file, size); + LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file)); + + err = lfs_file_rawtruncate(lfs, file, size); + + LFS_TRACE("lfs_file_truncate -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} +#endif + +lfs_soff_t lfs_file_tell(lfs_t *lfs, lfs_file_t *file) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_file_tell(%p, %p)", (void*)lfs, (void*)file); + LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file)); + + lfs_soff_t res = lfs_file_rawtell(lfs, file); + + LFS_TRACE("lfs_file_tell -> %"PRId32, res); + LFS_UNLOCK(lfs->cfg); + return res; +} + +int lfs_file_rewind(lfs_t *lfs, lfs_file_t *file) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_file_rewind(%p, %p)", (void*)lfs, (void*)file); + + err = lfs_file_rawrewind(lfs, file); + + LFS_TRACE("lfs_file_rewind -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_file_size(%p, %p)", (void*)lfs, (void*)file); + LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file)); + + lfs_soff_t res = lfs_file_rawsize(lfs, file); + + LFS_TRACE("lfs_file_size -> %"PRId32, res); + LFS_UNLOCK(lfs->cfg); + return res; +} + +#ifndef LFS_READONLY +int lfs_mkdir(lfs_t *lfs, const char *path) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_mkdir(%p, \"%s\")", (void*)lfs, path); + + err = lfs_rawmkdir(lfs, path); + + LFS_TRACE("lfs_mkdir -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} +#endif + +int lfs_dir_open(lfs_t *lfs, lfs_dir_t *dir, const char *path) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_dir_open(%p, %p, \"%s\")", (void*)lfs, (void*)dir, path); + LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)dir)); + + err = lfs_dir_rawopen(lfs, dir, path); + + LFS_TRACE("lfs_dir_open -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +int lfs_dir_close(lfs_t *lfs, lfs_dir_t *dir) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_dir_close(%p, %p)", (void*)lfs, (void*)dir); + + err = lfs_dir_rawclose(lfs, dir); + + LFS_TRACE("lfs_dir_close -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +int lfs_dir_read(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_dir_read(%p, %p, %p)", + (void*)lfs, (void*)dir, (void*)info); + + err = lfs_dir_rawread(lfs, dir, info); + + LFS_TRACE("lfs_dir_read -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +int lfs_dir_seek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_dir_seek(%p, %p, %"PRIu32")", + (void*)lfs, (void*)dir, off); + + err = lfs_dir_rawseek(lfs, dir, off); + + LFS_TRACE("lfs_dir_seek -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +lfs_soff_t lfs_dir_tell(lfs_t *lfs, lfs_dir_t *dir) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_dir_tell(%p, %p)", (void*)lfs, (void*)dir); + + lfs_soff_t res = lfs_dir_rawtell(lfs, dir); + + LFS_TRACE("lfs_dir_tell -> %"PRId32, res); + LFS_UNLOCK(lfs->cfg); + return res; +} + +int lfs_dir_rewind(lfs_t *lfs, lfs_dir_t *dir) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_dir_rewind(%p, %p)", (void*)lfs, (void*)dir); + + err = lfs_dir_rawrewind(lfs, dir); + + LFS_TRACE("lfs_dir_rewind -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +lfs_ssize_t lfs_fs_size(lfs_t *lfs) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_fs_size(%p)", (void*)lfs); + + lfs_ssize_t res = lfs_fs_rawsize(lfs); + + LFS_TRACE("lfs_fs_size -> %"PRId32, res); + LFS_UNLOCK(lfs->cfg); + return res; +} + +int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void *, lfs_block_t), void *data) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_fs_traverse(%p, %p, %p)", + (void*)lfs, (void*)(uintptr_t)cb, data); + + err = lfs_fs_rawtraverse(lfs, cb, data, true); + + LFS_TRACE("lfs_fs_traverse -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +#ifdef LFS_MIGRATE +int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg) { + int err = LFS_LOCK(cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_migrate(%p, %p {.context=%p, " + ".read=%p, .prog=%p, .erase=%p, .sync=%p, " + ".read_size=%"PRIu32", .prog_size=%"PRIu32", " + ".block_size=%"PRIu32", .block_count=%"PRIu32", " + ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", " + ".lookahead_size=%"PRIu32", .read_buffer=%p, " + ".prog_buffer=%p, .lookahead_buffer=%p, " + ".name_max=%"PRIu32", .file_max=%"PRIu32", " + ".attr_max=%"PRIu32"})", + (void*)lfs, (void*)cfg, cfg->context, + (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog, + (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync, + cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count, + cfg->block_cycles, cfg->cache_size, cfg->lookahead_size, + cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer, + cfg->name_max, cfg->file_max, cfg->attr_max); + + err = lfs_rawmigrate(lfs, cfg); + + LFS_TRACE("lfs_migrate -> %d", err); + LFS_UNLOCK(cfg); + return err; +} +#endif + diff --git a/cube/swiss/source/devices/kunaigc/lfs.h b/cube/swiss/source/devices/kunaigc/lfs.h new file mode 100755 index 00000000..9c95a097 --- /dev/null +++ b/cube/swiss/source/devices/kunaigc/lfs.h @@ -0,0 +1,703 @@ +/* + * The little filesystem + * + * Copyright (c) 2022, The littlefs authors. + * Copyright (c) 2017, Arm Limited. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef LFS_H +#define LFS_H + +//#define LFS_YES_TRACE 1 + +#include +#include +#include "lfs_util.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/// Version info /// + +// Software library version +// Major (top-nibble), incremented on backwards incompatible changes +// Minor (bottom-nibble), incremented on feature additions +#define LFS_VERSION 0x00020005 +#define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16)) +#define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >> 0)) + +// Version of On-disk data structures +// Major (top-nibble), incremented on backwards incompatible changes +// Minor (bottom-nibble), incremented on feature additions +#define LFS_DISK_VERSION 0x00020000 +#define LFS_DISK_VERSION_MAJOR (0xffff & (LFS_DISK_VERSION >> 16)) +#define LFS_DISK_VERSION_MINOR (0xffff & (LFS_DISK_VERSION >> 0)) + + +/// Definitions /// + +// Type definitions +typedef uint32_t lfs_size_t; +typedef uint32_t lfs_off_t; + +typedef int32_t lfs_ssize_t; +typedef int32_t lfs_soff_t; + +typedef uint32_t lfs_block_t; + +// Maximum name size in bytes, may be redefined to reduce the size of the +// info struct. Limited to <= 1022. Stored in superblock and must be +// respected by other littlefs drivers. +#ifndef LFS_NAME_MAX +#define LFS_NAME_MAX 255 +#endif + +// Maximum size of a file in bytes, may be redefined to limit to support other +// drivers. Limited on disk to <= 4294967296. However, above 2147483647 the +// functions lfs_file_seek, lfs_file_size, and lfs_file_tell will return +// incorrect values due to using signed integers. Stored in superblock and +// must be respected by other littlefs drivers. +#ifndef LFS_FILE_MAX +#define LFS_FILE_MAX 2147483647 +#endif + +// Maximum size of custom attributes in bytes, may be redefined, but there is +// no real benefit to using a smaller LFS_ATTR_MAX. Limited to <= 1022. +#ifndef LFS_ATTR_MAX +#define LFS_ATTR_MAX 1022 +#endif + +// Possible error codes, these are negative to allow +// valid positive return values +enum lfs_error { + LFS_ERR_OK = 0, // No error + LFS_ERR_IO = -5, // Error during device operation + LFS_ERR_CORRUPT = -84, // Corrupted + LFS_ERR_NOENT = -2, // No directory entry + LFS_ERR_EXIST = -17, // Entry already exists + LFS_ERR_NOTDIR = -20, // Entry is not a dir + LFS_ERR_ISDIR = -21, // Entry is a dir + LFS_ERR_NOTEMPTY = -39, // Dir is not empty + LFS_ERR_BADF = -9, // Bad file number + LFS_ERR_FBIG = -27, // File too large + LFS_ERR_INVAL = -22, // Invalid parameter + LFS_ERR_NOSPC = -28, // No space left on device + LFS_ERR_NOMEM = -12, // No more memory available + LFS_ERR_NOATTR = -61, // No data/attr available + LFS_ERR_NAMETOOLONG = -36, // File name too long +}; + +// File types +enum lfs_type { + // file types + LFS_TYPE_REG = 0x001, + LFS_TYPE_DIR = 0x002, + + // internally used types + LFS_TYPE_SPLICE = 0x400, + LFS_TYPE_NAME = 0x000, + LFS_TYPE_STRUCT = 0x200, + LFS_TYPE_USERATTR = 0x300, + LFS_TYPE_FROM = 0x100, + LFS_TYPE_TAIL = 0x600, + LFS_TYPE_GLOBALS = 0x700, + LFS_TYPE_CRC = 0x500, + + // internally used type specializations + LFS_TYPE_CREATE = 0x401, + LFS_TYPE_DELETE = 0x4ff, + LFS_TYPE_SUPERBLOCK = 0x0ff, + LFS_TYPE_DIRSTRUCT = 0x200, + LFS_TYPE_CTZSTRUCT = 0x202, + LFS_TYPE_INLINESTRUCT = 0x201, + LFS_TYPE_SOFTTAIL = 0x600, + LFS_TYPE_HARDTAIL = 0x601, + LFS_TYPE_MOVESTATE = 0x7ff, + + // internal chip sources + LFS_FROM_NOOP = 0x000, + LFS_FROM_MOVE = 0x101, + LFS_FROM_USERATTRS = 0x102, +}; + +// File open flags +enum lfs_open_flags { + // open flags + LFS_O_RDONLY = 1, // Open a file as read only +#ifndef LFS_READONLY + LFS_O_WRONLY = 2, // Open a file as write only + LFS_O_RDWR = 3, // Open a file as read and write + LFS_O_CREAT = 0x0100, // Create a file if it does not exist + LFS_O_EXCL = 0x0200, // Fail if a file already exists + LFS_O_TRUNC = 0x0400, // Truncate the existing file to zero size + LFS_O_APPEND = 0x0800, // Move to end of file on every write +#endif + + // internally used flags +#ifndef LFS_READONLY + LFS_F_DIRTY = 0x010000, // File does not match storage + LFS_F_WRITING = 0x020000, // File has been written since last flush +#endif + LFS_F_READING = 0x040000, // File has been read since last flush +#ifndef LFS_READONLY + LFS_F_ERRED = 0x080000, // An error occurred during write +#endif + LFS_F_INLINE = 0x100000, // Currently inlined in directory entry +}; + +// File seek flags +enum lfs_whence_flags { + LFS_SEEK_SET = 0, // Seek relative to an absolute position + LFS_SEEK_CUR = 1, // Seek relative to the current file position + LFS_SEEK_END = 2, // Seek relative to the end of the file +}; + + +// Configuration provided during initialization of the littlefs +struct lfs_config { + // Opaque user provided context that can be used to pass + // information to the block device operations + void *context; + + // Read a region in a block. Negative error codes are propagated + // to the user. + int (*read)(const struct lfs_config *c, lfs_block_t block, + lfs_off_t off, void *buffer, lfs_size_t size); + + // Program a region in a block. The block must have previously + // been erased. Negative error codes are propagated to the user. + // May return LFS_ERR_CORRUPT if the block should be considered bad. + int (*prog)(const struct lfs_config *c, lfs_block_t block, + lfs_off_t off, const void *buffer, lfs_size_t size); + + // Erase a block. A block must be erased before being programmed. + // The state of an erased block is undefined. Negative error codes + // are propagated to the user. + // May return LFS_ERR_CORRUPT if the block should be considered bad. + int (*erase)(const struct lfs_config *c, lfs_block_t block); + + // Sync the state of the underlying block device. Negative error codes + // are propagated to the user. + int (*sync)(const struct lfs_config *c); + +#ifdef LFS_THREADSAFE + // Lock the underlying block device. Negative error codes + // are propagated to the user. + int (*lock)(const struct lfs_config *c); + + // Unlock the underlying block device. Negative error codes + // are propagated to the user. + int (*unlock)(const struct lfs_config *c); +#endif + + // Minimum size of a block read in bytes. All read operations will be a + // multiple of this value. + lfs_size_t read_size; + + // Minimum size of a block program in bytes. All program operations will be + // a multiple of this value. + lfs_size_t prog_size; + + // Size of an erasable block in bytes. This does not impact ram consumption + // and may be larger than the physical erase size. However, non-inlined + // files take up at minimum one block. Must be a multiple of the read and + // program sizes. + lfs_size_t block_size; + + // Number of erasable blocks on the device. + lfs_size_t block_count; + + // Number of erase cycles before littlefs evicts metadata logs and moves + // the metadata to another block. Suggested values are in the + // range 100-1000, with large values having better performance at the cost + // of less consistent wear distribution. + // + // Set to -1 to disable block-level wear-leveling. + int32_t block_cycles; + + // Size of block caches in bytes. Each cache buffers a portion of a block in + // RAM. The littlefs needs a read cache, a program cache, and one additional + // cache per file. Larger caches can improve performance by storing more + // data and reducing the number of disk accesses. Must be a multiple of the + // read and program sizes, and a factor of the block size. + lfs_size_t cache_size; + + // Size of the lookahead buffer in bytes. A larger lookahead buffer + // increases the number of blocks found during an allocation pass. The + // lookahead buffer is stored as a compact bitmap, so each byte of RAM + // can track 8 blocks. Must be a multiple of 8. + lfs_size_t lookahead_size; + + // Optional statically allocated read buffer. Must be cache_size. + // By default lfs_malloc is used to allocate this buffer. + void *read_buffer; + + // Optional statically allocated program buffer. Must be cache_size. + // By default lfs_malloc is used to allocate this buffer. + void *prog_buffer; + + // Optional statically allocated lookahead buffer. Must be lookahead_size + // and aligned to a 32-bit boundary. By default lfs_malloc is used to + // allocate this buffer. + void *lookahead_buffer; + + // Optional upper limit on length of file names in bytes. No downside for + // larger names except the size of the info struct which is controlled by + // the LFS_NAME_MAX define. Defaults to LFS_NAME_MAX when zero. Stored in + // superblock and must be respected by other littlefs drivers. + lfs_size_t name_max; + + // Optional upper limit on files in bytes. No downside for larger files + // but must be <= LFS_FILE_MAX. Defaults to LFS_FILE_MAX when zero. Stored + // in superblock and must be respected by other littlefs drivers. + lfs_size_t file_max; + + // Optional upper limit on custom attributes in bytes. No downside for + // larger attributes size but must be <= LFS_ATTR_MAX. Defaults to + // LFS_ATTR_MAX when zero. + lfs_size_t attr_max; + + // Optional upper limit on total space given to metadata pairs in bytes. On + // devices with large blocks (e.g. 128kB) setting this to a low size (2-8kB) + // can help bound the metadata compaction time. Must be <= block_size. + // Defaults to block_size when zero. + lfs_size_t metadata_max; +}; + +// File info structure +struct lfs_info { + // Type of the file, either LFS_TYPE_REG or LFS_TYPE_DIR + uint8_t type; + + // Size of the file, only valid for REG files. Limited to 32-bits. + lfs_size_t size; + + // Name of the file stored as a null-terminated string. Limited to + // LFS_NAME_MAX+1, which can be changed by redefining LFS_NAME_MAX to + // reduce RAM. LFS_NAME_MAX is stored in superblock and must be + // respected by other littlefs drivers. + char name[LFS_NAME_MAX+1]; +}; + +// Custom attribute structure, used to describe custom attributes +// committed atomically during file writes. +struct lfs_attr { + // 8-bit type of attribute, provided by user and used to + // identify the attribute + uint8_t type; + + // Pointer to buffer containing the attribute + void *buffer; + + // Size of attribute in bytes, limited to LFS_ATTR_MAX + lfs_size_t size; +}; + +// Optional configuration provided during lfs_file_opencfg +struct lfs_file_config { + // Optional statically allocated file buffer. Must be cache_size. + // By default lfs_malloc is used to allocate this buffer. + void *buffer; + + // Optional list of custom attributes related to the file. If the file + // is opened with read access, these attributes will be read from disk + // during the open call. If the file is opened with write access, the + // attributes will be written to disk every file sync or close. This + // write occurs atomically with update to the file's contents. + // + // Custom attributes are uniquely identified by an 8-bit type and limited + // to LFS_ATTR_MAX bytes. When read, if the stored attribute is smaller + // than the buffer, it will be padded with zeros. If the stored attribute + // is larger, then it will be silently truncated. If the attribute is not + // found, it will be created implicitly. + struct lfs_attr *attrs; + + // Number of custom attributes in the list + lfs_size_t attr_count; +}; + + +/// internal littlefs data structures /// +typedef struct lfs_cache { + lfs_block_t block; + lfs_off_t off; + lfs_size_t size; + uint8_t *buffer; +} lfs_cache_t; + +typedef struct lfs_mdir { + lfs_block_t pair[2]; + uint32_t rev; + lfs_off_t off; + uint32_t etag; + uint16_t count; + bool erased; + bool split; + lfs_block_t tail[2]; +} lfs_mdir_t; + +// littlefs directory type +typedef struct lfs_dir { + struct lfs_dir *next; + uint16_t id; + uint8_t type; + lfs_mdir_t m; + + lfs_off_t pos; + lfs_block_t head[2]; +} lfs_dir_t; + +// littlefs file type +typedef struct lfs_file { + struct lfs_file *next; + uint16_t id; + uint8_t type; + lfs_mdir_t m; + + struct lfs_ctz { + lfs_block_t head; + lfs_size_t size; + } ctz; + + uint32_t flags; + lfs_off_t pos; + lfs_block_t block; + lfs_off_t off; + lfs_cache_t cache; + + const struct lfs_file_config *cfg; +} lfs_file_t; + +typedef struct lfs_superblock { + uint32_t version; + lfs_size_t block_size; + lfs_size_t block_count; + lfs_size_t name_max; + lfs_size_t file_max; + lfs_size_t attr_max; +} lfs_superblock_t; + +typedef struct lfs_gstate { + uint32_t tag; + lfs_block_t pair[2]; +} lfs_gstate_t; + +// The littlefs filesystem type +typedef struct lfs { + lfs_cache_t rcache; + lfs_cache_t pcache; + + lfs_block_t root[2]; + struct lfs_mlist { + struct lfs_mlist *next; + uint16_t id; + uint8_t type; + lfs_mdir_t m; + } *mlist; + uint32_t seed; + + lfs_gstate_t gstate; + lfs_gstate_t gdisk; + lfs_gstate_t gdelta; + + struct lfs_free { + lfs_block_t off; + lfs_block_t size; + lfs_block_t i; + lfs_block_t ack; + uint32_t *buffer; + } free; + + const struct lfs_config *cfg; + lfs_size_t name_max; + lfs_size_t file_max; + lfs_size_t attr_max; + +#ifdef LFS_MIGRATE + struct lfs1 *lfs1; +#endif +} lfs_t; + + +/// Filesystem functions /// + +#ifndef LFS_READONLY +// Format a block device with the littlefs +// +// Requires a littlefs object and config struct. This clobbers the littlefs +// object, and does not leave the filesystem mounted. The config struct must +// be zeroed for defaults and backwards compatibility. +// +// Returns a negative error code on failure. +int lfs_format(lfs_t *lfs, const struct lfs_config *config); +#endif + +// Mounts a littlefs +// +// Requires a littlefs object and config struct. Multiple filesystems +// may be mounted simultaneously with multiple littlefs objects. Both +// lfs and config must be allocated while mounted. The config struct must +// be zeroed for defaults and backwards compatibility. +// +// Returns a negative error code on failure. +int lfs_mount(lfs_t *lfs, const struct lfs_config *config); + +// Unmounts a littlefs +// +// Does nothing besides releasing any allocated resources. +// Returns a negative error code on failure. +int lfs_unmount(lfs_t *lfs); + +/// General operations /// + +#ifndef LFS_READONLY +// Removes a file or directory +// +// If removing a directory, the directory must be empty. +// Returns a negative error code on failure. +int lfs_remove(lfs_t *lfs, const char *path); +#endif + +#ifndef LFS_READONLY +// Rename or move a file or directory +// +// If the destination exists, it must match the source in type. +// If the destination is a directory, the directory must be empty. +// +// Returns a negative error code on failure. +int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath); +#endif + +// Find info about a file or directory +// +// Fills out the info structure, based on the specified file or directory. +// Returns a negative error code on failure. +int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info); + +// Get a custom attribute +// +// Custom attributes are uniquely identified by an 8-bit type and limited +// to LFS_ATTR_MAX bytes. When read, if the stored attribute is smaller than +// the buffer, it will be padded with zeros. If the stored attribute is larger, +// then it will be silently truncated. If no attribute is found, the error +// LFS_ERR_NOATTR is returned and the buffer is filled with zeros. +// +// Returns the size of the attribute, or a negative error code on failure. +// Note, the returned size is the size of the attribute on disk, irrespective +// of the size of the buffer. This can be used to dynamically allocate a buffer +// or check for existence. +lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path, + uint8_t type, void *buffer, lfs_size_t size); + +#ifndef LFS_READONLY +// Set custom attributes +// +// Custom attributes are uniquely identified by an 8-bit type and limited +// to LFS_ATTR_MAX bytes. If an attribute is not found, it will be +// implicitly created. +// +// Returns a negative error code on failure. +int lfs_setattr(lfs_t *lfs, const char *path, + uint8_t type, const void *buffer, lfs_size_t size); +#endif + +#ifndef LFS_READONLY +// Removes a custom attribute +// +// If an attribute is not found, nothing happens. +// +// Returns a negative error code on failure. +int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type); +#endif + + +/// File operations /// + +#ifndef LFS_NO_MALLOC +// Open a file +// +// The mode that the file is opened in is determined by the flags, which +// are values from the enum lfs_open_flags that are bitwise-ored together. +// +// Returns a negative error code on failure. +int lfs_file_open(lfs_t *lfs, lfs_file_t *file, + const char *path, int flags); + +// if LFS_NO_MALLOC is defined, lfs_file_open() will fail with LFS_ERR_NOMEM +// thus use lfs_file_opencfg() with config.buffer set. +#endif + +// Open a file with extra configuration +// +// The mode that the file is opened in is determined by the flags, which +// are values from the enum lfs_open_flags that are bitwise-ored together. +// +// The config struct provides additional config options per file as described +// above. The config struct must be allocated while the file is open, and the +// config struct must be zeroed for defaults and backwards compatibility. +// +// Returns a negative error code on failure. +int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file, + const char *path, int flags, + const struct lfs_file_config *config); + +// Close a file +// +// Any pending writes are written out to storage as though +// sync had been called and releases any allocated resources. +// +// Returns a negative error code on failure. +int lfs_file_close(lfs_t *lfs, lfs_file_t *file); + +// Synchronize a file on storage +// +// Any pending writes are written out to storage. +// Returns a negative error code on failure. +int lfs_file_sync(lfs_t *lfs, lfs_file_t *file); + +// Read data from file +// +// Takes a buffer and size indicating where to store the read data. +// Returns the number of bytes read, or a negative error code on failure. +lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file, + void *buffer, lfs_size_t size); + +#ifndef LFS_READONLY +// Write data to file +// +// Takes a buffer and size indicating the data to write. The file will not +// actually be updated on the storage until either sync or close is called. +// +// Returns the number of bytes written, or a negative error code on failure. +lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file, + const void *buffer, lfs_size_t size); +#endif + +// Change the position of the file +// +// The change in position is determined by the offset and whence flag. +// Returns the new position of the file, or a negative error code on failure. +lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file, + lfs_soff_t off, int whence); + +#ifndef LFS_READONLY +// Truncates the size of the file to the specified size +// +// Returns a negative error code on failure. +int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size); +#endif + +// Return the position of the file +// +// Equivalent to lfs_file_seek(lfs, file, 0, LFS_SEEK_CUR) +// Returns the position of the file, or a negative error code on failure. +lfs_soff_t lfs_file_tell(lfs_t *lfs, lfs_file_t *file); + +// Change the position of the file to the beginning of the file +// +// Equivalent to lfs_file_seek(lfs, file, 0, LFS_SEEK_SET) +// Returns a negative error code on failure. +int lfs_file_rewind(lfs_t *lfs, lfs_file_t *file); + +// Return the size of the file +// +// Similar to lfs_file_seek(lfs, file, 0, LFS_SEEK_END) +// Returns the size of the file, or a negative error code on failure. +lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file); + + +/// Directory operations /// + +#ifndef LFS_READONLY +// Create a directory +// +// Returns a negative error code on failure. +int lfs_mkdir(lfs_t *lfs, const char *path); +#endif + +// Open a directory +// +// Once open a directory can be used with read to iterate over files. +// Returns a negative error code on failure. +int lfs_dir_open(lfs_t *lfs, lfs_dir_t *dir, const char *path); + +// Close a directory +// +// Releases any allocated resources. +// Returns a negative error code on failure. +int lfs_dir_close(lfs_t *lfs, lfs_dir_t *dir); + +// Read an entry in the directory +// +// Fills out the info structure, based on the specified file or directory. +// Returns a positive value on success, 0 at the end of directory, +// or a negative error code on failure. +int lfs_dir_read(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info); + +// Change the position of the directory +// +// The new off must be a value previous returned from tell and specifies +// an absolute offset in the directory seek. +// +// Returns a negative error code on failure. +int lfs_dir_seek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off); + +// Return the position of the directory +// +// The returned offset is only meant to be consumed by seek and may not make +// sense, but does indicate the current position in the directory iteration. +// +// Returns the position of the directory, or a negative error code on failure. +lfs_soff_t lfs_dir_tell(lfs_t *lfs, lfs_dir_t *dir); + +// Change the position of the directory to the beginning of the directory +// +// Returns a negative error code on failure. +int lfs_dir_rewind(lfs_t *lfs, lfs_dir_t *dir); + + +/// Filesystem-level filesystem operations + +// Finds the current size of the filesystem +// +// Note: Result is best effort. If files share COW structures, the returned +// size may be larger than the filesystem actually is. +// +// Returns the number of allocated blocks, or a negative error code on failure. +lfs_ssize_t lfs_fs_size(lfs_t *lfs); + +// Traverse through all blocks in use by the filesystem +// +// The provided callback will be called with each block address that is +// currently in use by the filesystem. This can be used to determine which +// blocks are in use or how much of the storage is available. +// +// Returns a negative error code on failure. +int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data); + +#ifndef LFS_READONLY +#ifdef LFS_MIGRATE +// Attempts to migrate a previous version of littlefs +// +// Behaves similarly to the lfs_format function. Attempts to mount +// the previous version of littlefs and update the filesystem so it can be +// mounted with the current version of littlefs. +// +// Requires a littlefs object and config struct. This clobbers the littlefs +// object, and does not leave the filesystem mounted. The config struct must +// be zeroed for defaults and backwards compatibility. +// +// Returns a negative error code on failure. +int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg); +#endif +#endif + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif diff --git a/cube/swiss/source/devices/kunaigc/lfs_util.c b/cube/swiss/source/devices/kunaigc/lfs_util.c new file mode 100755 index 00000000..9cdd1c60 --- /dev/null +++ b/cube/swiss/source/devices/kunaigc/lfs_util.c @@ -0,0 +1,34 @@ +/* + * lfs util functions + * + * Copyright (c) 2022, The littlefs authors. + * Copyright (c) 2017, Arm Limited. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + */ +#include "lfs_util.h" + +// Only compile if user does not provide custom config +#ifndef LFS_CONFIG + + +// Software CRC implementation with small lookup table +uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size) { + static const uint32_t rtable[16] = { + 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac, + 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, + 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, + 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c, + }; + + const uint8_t *data = buffer; + + for (size_t i = 0; i < size; i++) { + crc = (crc >> 4) ^ rtable[(crc ^ (data[i] >> 0)) & 0xf]; + crc = (crc >> 4) ^ rtable[(crc ^ (data[i] >> 4)) & 0xf]; + } + + return crc; +} + + +#endif diff --git a/cube/swiss/source/devices/kunaigc/lfs_util.h b/cube/swiss/source/devices/kunaigc/lfs_util.h new file mode 100755 index 00000000..0cbc2a31 --- /dev/null +++ b/cube/swiss/source/devices/kunaigc/lfs_util.h @@ -0,0 +1,245 @@ +/* + * lfs utility functions + * + * Copyright (c) 2022, The littlefs authors. + * Copyright (c) 2017, Arm Limited. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef LFS_UTIL_H +#define LFS_UTIL_H + +// Users can override lfs_util.h with their own configuration by defining +// LFS_CONFIG as a header file to include (-DLFS_CONFIG=lfs_config.h). +// +// If LFS_CONFIG is used, none of the default utils will be emitted and must be +// provided by the config file. To start, I would suggest copying lfs_util.h +// and modifying as needed. +#ifdef LFS_CONFIG +#define LFS_STRINGIZE(x) LFS_STRINGIZE2(x) +#define LFS_STRINGIZE2(x) #x +#include LFS_STRINGIZE(LFS_CONFIG) +#else + +// System includes +#include +#include +#include +#include + +#ifndef LFS_NO_MALLOC +#include +#endif +#ifndef LFS_NO_ASSERT +#include +#endif +#if !defined(LFS_NO_DEBUG) || \ + !defined(LFS_NO_WARN) || \ + !defined(LFS_NO_ERROR) || \ + defined(LFS_YES_TRACE) +#include +#endif + +#ifdef __cplusplus +extern "C" +{ +#endif + + +// Macros, may be replaced by system specific wrappers. Arguments to these +// macros must not have side-effects as the macros can be removed for a smaller +// code footprint + +// Logging functions +#ifndef LFS_TRACE +#ifdef LFS_YES_TRACE +#define LFS_TRACE_(fmt, ...) \ + printf("%s:%d:trace: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) +#define LFS_TRACE(...) LFS_TRACE_(__VA_ARGS__, "") +#else +#define LFS_TRACE(...) +#endif +#endif + +#ifndef LFS_DEBUG +#ifndef LFS_NO_DEBUG +#define LFS_DEBUG_(fmt, ...) \ + printf("%s:%d:debug: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) +#define LFS_DEBUG(...) LFS_DEBUG_(__VA_ARGS__, "") +#else +#define LFS_DEBUG(...) +#endif +#endif + +#ifndef LFS_WARN +#ifndef LFS_NO_WARN +#define LFS_WARN_(fmt, ...) \ + printf("%s:%d:warn: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) +#define LFS_WARN(...) LFS_WARN_(__VA_ARGS__, "") +#else +#define LFS_WARN(...) +#endif +#endif + +#ifndef LFS_ERROR +#ifndef LFS_NO_ERROR +#define LFS_ERROR_(fmt, ...) \ + printf("%s:%d:error: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) +#define LFS_ERROR(...) LFS_ERROR_(__VA_ARGS__, "") +#else +#define LFS_ERROR(...) +#endif +#endif + +// Runtime assertions +#ifndef LFS_ASSERT +#ifndef LFS_NO_ASSERT +#define LFS_ASSERT(test) assert(test) +#else +#define LFS_ASSERT(test) +#endif +#endif + + +// Builtin functions, these may be replaced by more efficient +// toolchain-specific implementations. LFS_NO_INTRINSICS falls back to a more +// expensive basic C implementation for debugging purposes + +// Min/max functions for unsigned 32-bit numbers +static inline uint32_t lfs_max(uint32_t a, uint32_t b) { + return (a > b) ? a : b; +} + +static inline uint32_t lfs_min(uint32_t a, uint32_t b) { + return (a < b) ? a : b; +} + +// Align to nearest multiple of a size +static inline uint32_t lfs_aligndown(uint32_t a, uint32_t alignment) { + return a - (a % alignment); +} + +static inline uint32_t lfs_alignup(uint32_t a, uint32_t alignment) { + return lfs_aligndown(a + alignment-1, alignment); +} + +// Find the smallest power of 2 greater than or equal to a +static inline uint32_t lfs_npw2(uint32_t a) { +#if !defined(LFS_NO_INTRINSICS) && (defined(__GNUC__) || defined(__CC_ARM)) + return 32 - __builtin_clz(a-1); +#else + uint32_t r = 0; + uint32_t s; + a -= 1; + s = (a > 0xffff) << 4; a >>= s; r |= s; + s = (a > 0xff ) << 3; a >>= s; r |= s; + s = (a > 0xf ) << 2; a >>= s; r |= s; + s = (a > 0x3 ) << 1; a >>= s; r |= s; + return (r | (a >> 1)) + 1; +#endif +} + +// Count the number of trailing binary zeros in a +// lfs_ctz(0) may be undefined +static inline uint32_t lfs_ctz(uint32_t a) { +#if !defined(LFS_NO_INTRINSICS) && defined(__GNUC__) + return __builtin_ctz(a); +#else + return lfs_npw2((a & -a) + 1) - 1; +#endif +} + +// Count the number of binary ones in a +static inline uint32_t lfs_popc(uint32_t a) { +#if !defined(LFS_NO_INTRINSICS) && (defined(__GNUC__) || defined(__CC_ARM)) + return __builtin_popcount(a); +#else + a = a - ((a >> 1) & 0x55555555); + a = (a & 0x33333333) + ((a >> 2) & 0x33333333); + return (((a + (a >> 4)) & 0xf0f0f0f) * 0x1010101) >> 24; +#endif +} + +// Find the sequence comparison of a and b, this is the distance +// between a and b ignoring overflow +static inline int lfs_scmp(uint32_t a, uint32_t b) { + return (int)(unsigned)(a - b); +} + +// Convert between 32-bit little-endian and native order +static inline uint32_t lfs_fromle32(uint32_t a) { +#if !defined(LFS_NO_INTRINSICS) && ( \ + (defined( BYTE_ORDER ) && defined( ORDER_LITTLE_ENDIAN ) && BYTE_ORDER == ORDER_LITTLE_ENDIAN ) || \ + (defined(__BYTE_ORDER ) && defined(__ORDER_LITTLE_ENDIAN ) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN ) || \ + (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) + return a; +#elif !defined(LFS_NO_INTRINSICS) && ( \ + (defined( BYTE_ORDER ) && defined( ORDER_BIG_ENDIAN ) && BYTE_ORDER == ORDER_BIG_ENDIAN ) || \ + (defined(__BYTE_ORDER ) && defined(__ORDER_BIG_ENDIAN ) && __BYTE_ORDER == __ORDER_BIG_ENDIAN ) || \ + (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)) + return __builtin_bswap32(a); +#else + return (((uint8_t*)&a)[0] << 0) | + (((uint8_t*)&a)[1] << 8) | + (((uint8_t*)&a)[2] << 16) | + (((uint8_t*)&a)[3] << 24); +#endif +} + +static inline uint32_t lfs_tole32(uint32_t a) { + return lfs_fromle32(a); +} + +// Convert between 32-bit big-endian and native order +static inline uint32_t lfs_frombe32(uint32_t a) { +#if !defined(LFS_NO_INTRINSICS) && ( \ + (defined( BYTE_ORDER ) && defined( ORDER_LITTLE_ENDIAN ) && BYTE_ORDER == ORDER_LITTLE_ENDIAN ) || \ + (defined(__BYTE_ORDER ) && defined(__ORDER_LITTLE_ENDIAN ) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN ) || \ + (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) + return __builtin_bswap32(a); +#elif !defined(LFS_NO_INTRINSICS) && ( \ + (defined( BYTE_ORDER ) && defined( ORDER_BIG_ENDIAN ) && BYTE_ORDER == ORDER_BIG_ENDIAN ) || \ + (defined(__BYTE_ORDER ) && defined(__ORDER_BIG_ENDIAN ) && __BYTE_ORDER == __ORDER_BIG_ENDIAN ) || \ + (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)) + return a; +#else + return (((uint8_t*)&a)[0] << 24) | + (((uint8_t*)&a)[1] << 16) | + (((uint8_t*)&a)[2] << 8) | + (((uint8_t*)&a)[3] << 0); +#endif +} + +static inline uint32_t lfs_tobe32(uint32_t a) { + return lfs_frombe32(a); +} + +// Calculate CRC-32 with polynomial = 0x04c11db7 +uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size); + +// Allocate memory, only used if buffers are not provided to littlefs +// Note, memory must be 64-bit aligned +static inline void *lfs_malloc(size_t size) { +#ifndef LFS_NO_MALLOC + return malloc(size); +#else + (void)size; + return NULL; +#endif +} + +// Deallocate memory, only used if buffers are not provided to littlefs +static inline void lfs_free(void *p) { +#ifndef LFS_NO_MALLOC + free(p); +#else + (void)p; +#endif +} + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif +#endif diff --git a/cube/swiss/source/devices/kunaigc/spiflash.c b/cube/swiss/source/devices/kunaigc/spiflash.c new file mode 100755 index 00000000..d438d527 --- /dev/null +++ b/cube/swiss/source/devices/kunaigc/spiflash.c @@ -0,0 +1,218 @@ +/* (c) 2016-07-01 Jens Hauke */ +#include "spiflash.h" + +//#define SPI_DBG + +uint8_t spiflash_is_busy(void) { + uint8_t res; + spiflash_write(W25Q80BV_CMD_READ_STAT1); + res = spiflash_read_uint8(); + if (res != 0) +#ifdef SPI_DBG + kprintf("SPI status Register is %d\n", res); +#endif + res &= W25Q80BV_MASK_STAT_BUSY; + return res; +} + + +// Wait until not busy +void spiflash_wait(void) { + while (spiflash_is_busy()); +} + +void spiflash_cmd_addr_start(uint8_t cmd, uint32_t addr) { + uint32_t buff = ((uint32_t) cmd << 24) | addr; +#ifdef SPI_DBG + kprintf("\tCommand is %x, Adress: %04x\n", cmd, buff); +#endif + EXI_Imm(EXI_CHANNEL_0, &buff, 4, EXI_WRITE, NULL); + EXI_Sync(EXI_CHANNEL_0); +} + + +void spiflash_write_enable(void) { + spiflash_write(W25Q80BV_CMD_WRITE_ENABLE); +} + +void spiflash_write_disable(void) { + spiflash_write(W25Q80BV_CMD_WRITE_DISABLE); +} + + +void spiflash_write_start(uint32_t addr) { + spiflash_write_enable(); +#ifdef SPI_DBG + kprintf("Write start %04x\n", addr); +#endif + spiflash_cmd_addr_start(W25Q80BV_CMD_PAGE_PROG, addr); +} + + +void spiflash_read_start(uint32_t addr) { +#ifdef SPI_DBG + kprintf("Read start %04x\n", addr); +#endif + spiflash_cmd_addr_start(W25Q80BV_CMD_READ_DATA, addr); +} + +void spiflash_read_start_fast(uint32_t addr) { +#ifdef SPI_DBG + kprintf("Read start fast %04x\n", addr); +#endif + spiflash_cmd_addr_start(W25Q80BV_CMD_READ_FAST, addr); + spiflash_read_uint8(); +} + +uint8_t spiflash_read_uint8(void) { + uint8_t val = 0; + EXI_Imm(EXI_CHANNEL_0, &val, 1, EXI_READ, NULL); + EXI_Sync(EXI_CHANNEL_0); +#ifdef SPI_DBG + kprintf("Read u8 %01x\n", val); +#endif + return val; +} + + +uint16_t spiflash_read_uint16(void) { + uint16_t val = 0; + EXI_Imm(EXI_CHANNEL_0, &val, 2, EXI_READ, NULL); + EXI_Sync(EXI_CHANNEL_0); +#ifdef SPI_DBG + kprintf("Read u16 %02x\n", val); +#endif + return val; +} + + +uint32_t spiflash_read_uint32(void) { + uint32_t val = 0; + EXI_Imm(EXI_CHANNEL_0, &val, 4, EXI_READ, NULL); + EXI_Sync(EXI_CHANNEL_0); +#ifdef SPI_DBG + kprintf("Read u32 %04x\n", val); +#endif + return val; +} + + +uint16_t spiflash_read_uint16_le(void) { + uint16_t val = 0; + val = spiflash_read_uint8() | (uint16_t)spiflash_read_uint8() << 8; +#ifdef SPI_DBG + kprintf("Read u16_le %02x\n", val); +#endif + return val; +} + + +uint32_t spiflash_read_uint32_le(void) { + uint32_t val = 0; + val = spiflash_read_uint16_le() | (uint32_t)spiflash_read_uint16_le() << 16; +#ifdef SPI_DBG + kprintf("Read u32_le %04x\n", val); +#endif + return val; +} + + +void spiflash_write_uint16(uint16_t val) { +#ifdef SPI_DBG + kprintf("Write u16 : %02xl\n", val); +#endif + EXI_Imm(EXI_CHANNEL_0, &val, 2, EXI_WRITE, NULL); + EXI_Sync(EXI_CHANNEL_0); +} + + +void spiflash_write_uint32(uint32_t val) { +#ifdef SPI_DBG + kprintf("Write u32 : %04xl\n", val); +#endif + EXI_Imm(EXI_CHANNEL_0, &val, 4, EXI_WRITE, NULL); + EXI_Sync(EXI_CHANNEL_0); +} + + +void spiflash_write_uint16_le(uint16_t val) { +#ifdef SPI_DBG + kprintf("Write u16_le : %02xl\n", val); +#endif + spiflash_write_uint8(val); + spiflash_write_uint8(val >> 8); +} + + +void spiflash_write_uint32_le(uint32_t val) { +#ifdef SPI_DBG + kprintf("Write u32_le : %02xl\n", val); +#endif + spiflash_write_uint16_le(val); + spiflash_write_uint16_le(val >> 16); +} + + +void spiflash_erase4k(uint32_t addr) { + spiflash_write_enable(); +#ifdef SPI_DBG +kprintf("Erase 4k\n"); +#endif + spiflash_cmd_addr_start(W25Q80BV_CMD_ERASE_4K, addr); +} + + +void spiflash_erase32k(uint32_t addr) { + #ifdef SPI_DBG +kprintf("Erase 32k\n"); +#endif + spiflash_write_enable(); + spiflash_cmd_addr_start(W25Q80BV_CMD_ERASE_32K, addr); +} + + +void spiflash_erase64k(uint32_t addr) { + spiflash_write_enable(); +#ifdef SPI_DBG + kprintf("Erase 64k\n"); +#endif + spiflash_cmd_addr_start(W25Q80BV_CMD_ERASE_64K, addr); +} + + +void spiflash_chip_erase(void) { + spiflash_write_enable(); + spiflash_write(W25Q80BV_CMD_CHIP_ERASE); +} + + +uint16_t spiflash_device_id(void) { + uint16_t id; +#ifdef SPI_DBG + kprintf("Dev ID\n"); +#endif + spiflash_cmd_addr_start(W25Q80BV_CMD_READ_MAN_DEV_ID, 0x0000); + id = spiflash_read_uint16(); + return id; +} + +uint32_t spiflash_jedec_id(void) { + uint32_t id; + uint8_t cmd = W25Q80BV_CMD_READ_JEDEC_ID; + EXI_Imm(EXI_CHANNEL_0, &cmd, 1, EXI_WRITE, NULL); + EXI_Sync(EXI_CHANNEL_0); + id = spiflash_read_uint32() >> 8; + return id; +} + +uint64_t spiflash_unique_id(void) { + uint64_t id; +#ifdef SPI_DBG + kprintf("Unique ID\n"); +#endif + spiflash_cmd_addr_start(W25Q80BV_CMD_READ_UNIQUE_ID, 0x0000); + spiflash_read(); + id = (uint64_t)spiflash_read_uint32() << 32; + id |= spiflash_read_uint32(); + return id; +} diff --git a/cube/swiss/source/devices/kunaigc/spiflash.h b/cube/swiss/source/devices/kunaigc/spiflash.h new file mode 100755 index 00000000..9f875471 --- /dev/null +++ b/cube/swiss/source/devices/kunaigc/spiflash.h @@ -0,0 +1,139 @@ +/* (c) 2016-07-01 Jens Hauke -*- linux-c -*- */ +#ifndef _SPIFLASH_H_ +#define _SPIFLASH_H_ + +#include +#include + +#ifdef __cplusplus +#define _spiflash_h_ { +extern "C" _spiflash_h_ +#endif + + +#define W25Q80BV_CMD_WRITE_ENABLE 0x06 +#define W25Q80BV_CMD_WRITE_DISABLE 0x04 +#define W25Q80BV_CMD_PAGE_PROG 0x02 +#define W25Q80BV_CMD_READ_DATA 0x03 +#define W25Q80BV_CMD_READ_FAST 0x0B +#define W25Q80BV_CMD_READ_STAT1 0x05 +#define W25Q80BV_MASK_STAT_BUSY (1<<0) +#define W25Q80BV_CMD_ERASE_4K 0x20 +#define W25Q80BV_CMD_ERASE_32K 0x52 +#define W25Q80BV_CMD_ERASE_64K 0xD8 +#define W25Q80BV_CMD_CHIP_ERASE 0xC7 /* alternative 0x60 */ +#define W25Q80BV_CMD_READ_MAN_DEV_ID 0x90 +#define W25Q80BV_CMD_READ_JEDEC_ID 0x9F +#define W25Q80BV_CMD_READ_UNIQUE_ID 0x4B +#define W25Q80BV_PAGE_SIZE 256 +#define W25Q80BV_CAPACITY (1L * 1024L * 1024L) + +#define SPIFLASH_PAGE_SIZE W25Q80BV_PAGE_SIZE + +/* + * Generic commands + */ +void spiflash_cmd_start(uint8_t cmd); +void spiflash_cmd_addr_start(uint8_t cmd, uint32_t addr); + +void spiflash_end(void); +void spiflash_end_wait(void); + + +/* + * Writing + */ +void spiflash_write_enable(void); +void spiflash_write_disable(void); +void spiflash_write_start(uint32_t addr); + +static inline +void spiflash_write_uint8(uint8_t val) { + EXI_Imm(EXI_CHANNEL_0, &val, 1, EXI_WRITE, NULL); + EXI_Sync(EXI_CHANNEL_0); +} + + +static inline +void spiflash_write(uint8_t val) { + spiflash_write_uint8(val); +} + + +void spiflash_write_uint16(uint16_t val); +void spiflash_write_uint32(uint32_t val); + +// little-endian write +void spiflash_write_uint16_le(uint16_t val); +void spiflash_write_uint32_le(uint32_t val); + + +static inline +void spiflash_write_end(void) { + spiflash_end_wait(); +} + + +/* + * Reading + */ +void spiflash_read_start(uint32_t addr); +void spiflash_read_start_fast(uint32_t addr); + +uint8_t spiflash_read_uint8(void); +uint16_t spiflash_read_uint16(void); +uint32_t spiflash_read_uint32(void); + +// little-endian read +uint16_t spiflash_read_uint16_le(void); +uint32_t spiflash_read_uint32_le(void); + +static inline +uint8_t spiflash_read(void) { + return spiflash_read_uint8(); +} + +static inline +void spiflash_read_end(void) { + spiflash_end(); +} + + +/* + * Erase + */ +void spiflash_erase4k(uint32_t addr); +void spiflash_erase32k(uint32_t addr); +void spiflash_erase64k(uint32_t addr); +void spiflash_chip_erase(void); + + +/* + * Misc + */ + +// spiflash_device_id: return manufacturer/device id +// W25Q80BV: 0xEF13 +uint16_t spiflash_device_id(void); +uint32_t spiflash_jedec_id(void); +uint64_t spiflash_unique_id(void); + +// Status-1 BUSY-bit set? +uint8_t spiflash_is_busy(void); + +// Wait until not busy +void spiflash_wait(void); + +static inline +uint32_t spiflash_capacity(void) { + if (spiflash_device_id() == 0xEF13) { + return W25Q80BV_CAPACITY; + } else { + return ~0L; + } +} + +#ifdef __cplusplus +} +#endif +#endif /* _SPIFLASH_H_ */ diff --git a/cube/swiss/source/gui/FrameBufferMagic.c b/cube/swiss/source/gui/FrameBufferMagic.c index 632f3d0a..78df0532 100644 --- a/cube/swiss/source/gui/FrameBufferMagic.c +++ b/cube/swiss/source/gui/FrameBufferMagic.c @@ -42,6 +42,7 @@ GXTexObj sdsmallTexObj; GXTlutObj sdsmallTlutObj; GXTexObj hddTexObj; GXTlutObj hddTlutObj; +GXTexObj kunaigcTexObj; GXTexObj qoobTexObj; GXTlutObj qoobTlutObj; GXTexObj qoobIndTexObj; @@ -333,6 +334,7 @@ static void init_textures() GX_InitTexObjUserData(&sdsmallTexObj, &sdsmallTlutObj); TPL_GetTextureCI(&imagesTPL, hddimg, &hddTexObj, &hddTlutObj, GX_TLUT0); GX_InitTexObjUserData(&hddTexObj, &hddTlutObj); + TPL_GetTexture(&imagesTPL, kunaigcimg, &kunaigcTexObj); TPL_GetTextureCI(&imagesTPL, qoobimg, &qoobTexObj, &qoobTlutObj, GX_TLUT0); GX_InitTexObjFilterMode(&qoobTexObj, GX_LINEAR, GX_NEAR); GX_InitTexObjUserData(&qoobTexObj, &qoobTlutObj); @@ -588,6 +590,9 @@ static void _DrawImageNow(int textureId, int x, int y, int width, int height, in case TEX_ETH2GC: texObj = ð2gcTexObj; color = (GXColor) {216,216,216,255}; break; + case TEX_KUNAIGC: + texObj = &kunaigcTexObj; color = (GXColor) {216,216,216,255}; + break; } if(!ss) ss = GX_GetTexObjWidth(texObj); diff --git a/cube/swiss/source/gui/FrameBufferMagic.h b/cube/swiss/source/gui/FrameBufferMagic.h index 4f5e4a1d..d299b446 100644 --- a/cube/swiss/source/gui/FrameBufferMagic.h +++ b/cube/swiss/source/gui/FrameBufferMagic.h @@ -72,7 +72,8 @@ enum TextureId TEX_STAR, TEX_GCLOADER, TEX_M2LOADER, - TEX_ETH2GC + TEX_ETH2GC, + TEX_KUNAIGC }; extern GXTexObj ntscjTexObj; diff --git a/cube/swiss/source/images/images.scf b/cube/swiss/source/images/images.scf index 7af4bf17..5d459684 100644 --- a/cube/swiss/source/images/images.scf +++ b/cube/swiss/source/images/images.scf @@ -10,6 +10,7 @@ + diff --git a/cube/swiss/source/images/kunaigc.tif b/cube/swiss/source/images/kunaigc.tif new file mode 100644 index 0000000000000000000000000000000000000000..0e0e7105f7e51bb8bcc9e4973f34182a5869d58d GIT binary patch literal 7628 zcmeHLc{r5o`+qHMO0pNq7^%>h#aL!yN|cnbC$wmm7ZWq1nXwh7a#UI@AxV}jX%m&4 zV~Gf9ag-$4J3^9OOeucP45!n%e&<}@>-?_Y_rLSb_0Icw@8@%W?)$l)<(g-#taPA- z5CkbfkeCD{29Phf#pW;pU~z!O!E+(_NX*%V9`on0&_`BGQsiqV3qf)}*cIon&|hf| z9|2f-j(>Ow1gVJ46Zt9uA=Np&3ShM#bXJOqLvp}x4Y049vrB+?EU-(1r!lbO09F8a zE5Hsv_&NeCj4uX0CE{XGC2(*99L9;+oJk;O?ui8u0+3e803x`w_Z$S@lHZ>@{8$u* zyJG-@%XMey`LaAAODnP+0;h+?=;<4R*ccq1h`|%FI0S}3)Ym8KV<3}_fxZ-)H_Ss& zVNV9j6ggB}hD0#vrpR3eWDMEY9QI;tJH&w<57{`;4tdiEbmT^olt~~lkm<{Wc@#t- z(}%?+2AU!%bT$rbsUy&zFcs2LuG@1?cOsIi6@Nfj~fGaA+J31sEu95Q|3%M6tM&NjC zap*KO>;p4_C>P|1{S%+Z@c5%xf67hRBI?eMiGXn5_5Z~Fz3(D2V3Em03pUM97}Uzb z6e-M?NN3X+bfO4SJv=<{I3p^G3RCeYyuOD4%9w5pqX-@zFdhecP;hw64^mbvE|0>Z z!9pn@uEzj66bywxz)@%@LmJi?g{KlcP!t0r8j6m^7{XM28f<{|_(8&!!vM3A;`3ux zLMb|sGBhAiV7dW}GN9r?HZ-~cieQYzqhO4_KEc=!0~-=%vk{I1(ag@u6p7RO!D8n_ z;d!t*Oj9J8LPOX(io%MxP7EgO$fF3WhBdi=9nLM$Mvfv}2%tse0E$I4=fD&mo8!c0`#%|GdzO+6I-}C5hj2S=F~O@7sL+|nP!))BYbf7ZT8iNAzG^lglN?f zDYV%xa4G&UU6d!#nr)(aQCOZZ*q-Lr!|!&+U(7Wcm2PN+BN(BKF<2Z5Prw_XC zMKFLVG!K1!BO}`E*nC&#vORbK6b@|W3F-)H1?InK`$Vjpog&(QPA0$$7LGp{N)!f< z!s6zJ5>J>LO7vWh(ZXHn_b!{D|5s5=M1&umOC%ACdo^UH@<8lKSJo2D8A=pa5_j1E*hv z4~@x{+yp^P7LQ};VCL%L>V{bG9vlvrKntLi&;|;P>uY9bXDvK8{`ED{C_3OmFHu6R zzvld{bv_-O!9eRXaI93D8yngDDV!1EPia6<~-P7dY(p9B~|XR_!3Uj=wIokX$81hV;?cq*cJb|4BBXfS}5FajH50!?8& zNLXLM0oRzlC{r<|XTB5;MP!ly-{Kw~5Hy+%LDH@eB>xsTty?*2H zm9_ehS0@IsyFbsM?e^|{wbT8Zl#rJif$e zntp%y;zE8q?!#7eam_?g*5_fap;n}AdMrcD`TDOhiG?ocm`E$%Rc7kLpQ)W)se>$1 z_N%cVpNAWE7!iz=Bei#jM+J*Z1P~|yYa`zR5GlBai8ARd|=W`Rk$%k>kq9+y12W_@CtG#EpvRbET>7oeL=fS zpIW9$<)Rav4-ecf{nP;YuP#`4w;+1$IK21vhLHIyDvHCC9d5od0_Gsu3)F+qh%W{hCzGSA%(CwG3D{G!xnv?Dxct4A!Fx%jt4$zjU36X#xC z*6=*T-;W(l`gN*Bx=t*hP055)GE_L=wEbl7cRd)B<*cIEVkInkUZzO(r`+eO+l zYsJ+wNW=U4)uf*nOzqowVzutzdFSY?Uln9jd^WC%J*4j%F0DdMP1ot%{eY;Di|l{z zchkvOh8RFk~2YUMpC{c@$ULx zw7)&wU!#sQ-@0w_H4UxRVOp4QuhUD^n{J!+s~B%=k#cc0s#rAf{>YnNQ}H)FN&{1p8%t&6JAp zj=t~OgI*jr&jT5&bI%}*5n-}@)!W0ecdqLn>ZSUPy3-EXzpCwkd0xEy25(Pz`xafB-P9KC_9=e4#67AqR}UnJ$nm#mrw6Q{g2zDj}Yg#lK!>rKbxKv#WnK6c42=?-^OJjlOR0j$JQzHkB`^uNp>7 z&8rTnw|E>Sv%Jzj1v@zHyS$)Hft1wz*1OIwzHPya-c{c^PSUBKLFDN9!HGM%sNSA7 zF9-G>+R#>=QyTOESu5WQou8POr+cAqtEI{Z@0w!yI`w0`RphrLDFZVyNxGj7a_TB= zBY2%pUF62%uSHF?xH*tx^HNZV`(+ciZq&iQe$4cHqGc1N)Xi5xUOL#c_fjnO>D7j{ zk3-~L+46#VYUj)27gVd3%(P!$zc1$ai(Mr%x?ekzqxnrYe%9j;NfigihUVGpJYSlz zXT|gJzI++MF?+!~{jX2Al;)lB6wAp{tGR!?{XtP(+C+*GlfdtTlKG1!e)Iaelvets zIB*3U5`2{vWa{BtxGI@ErX;(?{$>Tnsq*N>(868L_CulrbI+o*9Q5Y90Q}`Ya zxxQFlF5jndD0(Iy5o=fZt%FEhwBeIXeskMdo6E0dFArH-56j;6Vu@{Ekp8t|efuOf zZk3k*=DfxBr3q`sl%gzNO~@xsEUi0htAt}aPkA}@GJU0lN~yZ65m+m9cfx!>^{@ZDdnMis@x8#=}RPu69Oq#_;IGf=!Qf-tjnYPBG1)` z9qNsAkd^3A+h;T{>x8B=Qxji*dg!2S?#8hqqQ=9cvYb011DWfB9}Vw5o+H?(s+)S& zpVSmlyFOH~cBpDJ18TC!IkxcgvPZ4^-kJ38H~px4*q*bRpU|s^B9%T~cUJj}N2-e2 zR`(>Iw!~F0daYl0ltfP}->O|8uu9!xTUn_rcRtoEfm14;IJmzFL&=8k>rIY7P7fbn zNujCmTIr1*X(0FdeQcN@X=IDNYwavq+Fvk$lS{Sn4y=9D6g67KR`sxW zo1|pG(Wq;<;i4c(Y|(!@AczbxH#|v=>G_g?A+0S-ea~Q|o;b{x$DDm#>py}e56n;U zttP1#ysI)YE16Q{7g?Dkzg}G$Ov*@ZwY@v_C_ev~Qde%|j-8i#19SULL+am$-ZY%L zSGz{0l&G?JWvXs<2$A~w=aIcC67Gqo**duf8umNhGvXJxtB*WAmEDFesY=x?9Gr(~ z5eWPd4kt6MQ~06MXY)paGSV}akzKs(m%)7Z)2#t_F1%G8{PwW_*7+Oz!+JmJ?+NZg z4r7Z_CboFRgbbzCZ=cRzV0a-a+3an7-o4|Wty7Kp$!7a>I*BtYeFm1$waYTO8bjP= zQv+SZ-&!rKuPlB~ZK^}KbRz^c62WA(VD<9FccllRM0ukR=HQ|_id zMEfYM_^MWNUsFnbYQRkJnpsdXC~JFf`L+3)-DAxoo9pk}O;^5}-1g-3FF}z#$u9Ri zyGo^}>bC3j6x}uYZGLdxF9FT-k|qYBeR@?bSXmCIW++8qV)<(X=YmYc{kBbwH7Olc zr3}x<-3sI}6Vv{^qzAO4I}jaiH=}D`=?(pqbFHGxCtEN2;pySpn4ddB`m`(8bw5yg z;A2^yOi91` zjVabM>W)X74*Kk?8~9jrIg~ndx6mx`X+kI4T@W!gwrPa2hcD2dsTA-TAC|R4{{y$K Bfgk_? literal 0 HcmV?d00001 diff --git a/cube/swiss/source/main.c b/cube/swiss/source/main.c old mode 100644 new mode 100755 index d24544b9..31ddbd86 --- a/cube/swiss/source/main.c +++ b/cube/swiss/source/main.c @@ -133,6 +133,7 @@ int main(int argc, char *argv[]) allDevices[i++] = &__device_card_a; allDevices[i++] = &__device_card_b; allDevices[i++] = &__device_qoob; + allDevices[i++] = &__device_kunaigc; allDevices[i++] = &__device_aram; allDevices[i++] = &__device_sys; allDevices[i++] = &__device_ata_a; From 1a457f11a34c02db23b4996152c6c4aa886b6bba Mon Sep 17 00:00:00 2001 From: BBsan2k Date: Sat, 13 Apr 2024 11:34:03 +0200 Subject: [PATCH 2/7] Try to Fix Kunai Image --- cube/swiss/source/gui/FrameBufferMagic.c | 2 +- cube/swiss/source/images/kunaigc.tif | Bin 7628 -> 33652 bytes 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/cube/swiss/source/gui/FrameBufferMagic.c b/cube/swiss/source/gui/FrameBufferMagic.c index 78df0532..a13247c8 100644 --- a/cube/swiss/source/gui/FrameBufferMagic.c +++ b/cube/swiss/source/gui/FrameBufferMagic.c @@ -591,7 +591,7 @@ static void _DrawImageNow(int textureId, int x, int y, int width, int height, in texObj = ð2gcTexObj; color = (GXColor) {216,216,216,255}; break; case TEX_KUNAIGC: - texObj = &kunaigcTexObj; color = (GXColor) {216,216,216,255}; + texObj = &kunaigcTexObj; break; } diff --git a/cube/swiss/source/images/kunaigc.tif b/cube/swiss/source/images/kunaigc.tif index 0e0e7105f7e51bb8bcc9e4973f34182a5869d58d..cde3b9fcc8f82de5f8815550acd141972a98b597 100644 GIT binary patch literal 33652 zcmeHwcU%-rv+e+rvnWUoVkBoJOAwKqK{Csd*@fLDOEjQ@qKE_$0TUugK$0W_l7oOs zmLNg0NRkD1@62N4eLv58&Ueqf-yb%=ndz$Ps;+*fyJmKJdeGE73{U_7up7VuI{+du zM@U2@guFjU5Pi+-gL&ewc}Rogr##u$JfzP^v}H%z2mp5e#NYi>p80DYvdQwbyaOcw zuo00G>ZJk=v473)1oH>KmL~?wixCk6yx@@yUFMF6f9K+)PcXmZ6@XeQ3 z^YOSkD)5;}>4@rhslr`cPXwaiMuEDTZE5?Pz2+yz_;a?xsE=MDiQ_fIVvnB1QQh%gYl9T5%Kf$6ZVr3 zMxvZW#Eu?4Dk3T_A}%fjmJmV*ATaj+LJ0KUEhFFTsKL=Nl&crU6^Y=1?AkjZeJ~1q zd{9N-)7Q3ndVQ}5ffgpTQy7Ny6!EwB5)l&?75O_QN7#2AFCUZ#!8J#i2;2ki2}fYi zpq|*@>Un)Tf8W)Asq(w{UsepB7ag6ySN`U&r{~{RjmD_^f)IR#^p|SCX`+n-yx<~+ za5T~f1%s>mf=$``cdf^`I{i2~zgU7K|I&B3zw5tHgCtvOgs!l4hUCGv%BrH^_826} z7>V>y{I7<9yvTNqAE*0=F}RvN2CfJV79mkdAu$PKaIlC;$x4a~h>FXKivDC|tK>Et zI!H%Xr+~k)K^Sm9+xV&Eb{pUjaqF-2)6A1r2en#@Nh635E3JvG?-waD~}JXH~=(;rR7B z^nEb!V30gN5A?0}{$%9;VPnzuzW-jA`)3j4-|KR}gya8jUGB$#g}K-xoZ*g&BHza9 zH^x6Zaa(=&*C6sO_Wd>J$U|2HSukLMgNM+ipm_fm-jABM%9}%h$HRvZSY*US!6YIE z?Z2x2EZ(ka@q5*u#oJYXXqq5gF^b|}Yum>DNejAJ*t(J#f%mp>MQJH12{1@+XK&Zg zclC#Rn5(&hmmD+{I>p4nh7pvuOa4~V;)mvuUle~5d{x{!S3li?5(3T^7rem*@2^F^ z-(deG7JZM{|2H!~TJ(R@>NZ<{D%l3e@5w)K{T`of(m!x*1LXJQAGm&x&o=2FxV8cE zd-4xlzsF~r^bcIy0Qo)n2d>}avrYO3u5Ez)p8NyX@A26t{R7uFKz>jDf$R79Y?J2kkl&Mk;QBp2+oXTs+6Kt)$v<%Y9-nQ}KX7dW-YF;XPfj7T-yNoJ^5R>sD3P2g(JXqs(#>t)FUdIIy?3e zmc=p<_CM_iGH`7zxH1@ARSQ6JaFs2XsJ&3&(pWtojF%4vBuOEjJ{n`>04^mYl+Z+A z;BbVGCnSLKgrlc34|t>}=o-2DLnTyPF^~=<-m(ct>4HmljMdGJLDnun1wa8v00X!H za4^RMr~#^gJ|F~~1SA15KpYSSIcAXdSF`#kB+|(Uj`8u*b8v%!?44k7$OIB_0@c(3 zR{#N^L4$C>1N`#=DQAK@mDhJokS4T2#RG%*+6Ed2A6E~ID}rDilv5H)tLj*20R)WX zAREL&;O+-TJAcARkFZ)64ay-L0aY&y;#;!;RSyT$cf!yat@E8wMIlsw5D3f9p4b`fg$kq{XLWozZa+Sg~PxFe&E7Q za7FRgex&cLkN(z=pgdFX8fFCyi6TuD}lq1mucUx_ONU#He8}zvYD!`o(0qnu3U`vPzP-OTPM>Fbao za3EdC#xLpT0^bA>;9vO&9}g(XLBNoK>*W!S=xt*Wh7kj01lOSs{Joce%x%34Q2Egt z32qqv=xmT{WN2`)BOGn&p$!ceqF?GECCEVbPH-b95SnT~)kW@%Li%|9!XiTwmiT^) zgz83+^{w@~pbc7k9}H3hj)0?}wXz@sfY#Z5Eljml7~((@&8-W-|91OHz>fxey@31# z{;wPzwDY%%%vKpPdk>7Uz4I@n=wNUU4`aALMiYI)SV#L?P@?$8{-rRb3lbH0%){0B z+ckrJtMMnku|WXH9pO&)K7=4i2`y>;o8qS5*uNI1c5o)FFaHT##x46Q8s7yF#=1xZ zgf%4w=>=Y%(eR(65S0fQuKvQMaUg`ZzwoKS)9$jpIAK&Bgen4Pz>Qex6CuAf+87CW zqOEANV;6yfIy41A3n~xL*@HU>@vmcu$OQnr!a%+{um>Q*cjIRP8p5XtY*lpt68s!M zN1y;lunZ{g1I_|86ciMc6f~5SG>p_#)Qr37X=vzo?_pxv&BU~ak%o}I9=3?>dm>tD zYFav420A*1U37GGyPzH2t}PYDe_{at3}B=H1AspX5g$OzNJPR&gdYG$E(N}ZND1_k z^lNMbJq8o_brs?rq-5k2lvLD2+l64nC;2910El)Fkr3}7CM74MASI!Z0ELVsJ9u}B zksdR!XX5h?7bn|wEj?dlFaK`C=MEB*sEBq|@_n&JpN1D;Xes791!wlFT{m`Ae{mNh z&C+2q0{>ii_GPEfhsEOu;_jV$a--(W=+gb7S8vCbO`Uwt$3G~peK)=$qhaRk7nzVz zQr9)H$_Nk>gWm2S_(V=dDn)RCS8V4F&;xHiCQ|Y6Yr7y1o*TA*lHhk3Mn%M`8tsOm zB^UOQK@O1bmwM3wI&qi9QQbru10VVB!oT+5zz-Mjy#Ngfp-x6X30S-m)h?T0)#j`> zS;U?fl2Lf7eJQ&L4dhN?V6bQ^^VWFL{Z`%_wpa|npV5d17NYP#%ODQS z&G*`Vd4&hY=kWkMbamFJ(ssjfQ-9Jjaicu}9s1mX-W(6~UEZ|q+sKM9QNzA!f4ZX7 zD{E>b&Zd}57B3#!(Xm-9woftO7mqrAamM|vp{CxApFMg~{j&qh#<50Yzj$%HZGqL! zUTQgQ0_ScWeTs6?t08n#Wf>D|v}pTI@6o2^)H|1&DU}5M)8}DUUys;?saUc zHM+Mpq2hee9jg&LKrXiIc0Nu#%x@*XJl^#+ZE5} zBq^XDN~TP(aK(#XogYP1cjAHK1s7{24Q$^s%bjDB)uxM+kG;L?V6%9jWUjto5bJuV z9S@|0+G0P&WDPK2Hk?07pqn`}KK5C1Ps(gGE;5yDSo+xH4kO>qm52D2S2bJ-mOX~k z9WKQ6y-Ei=0B3Hj|D=KuuG<-xV1_;L<;AmU1>M^zdb(4)6ldE#mx_u?+^wA_x%zBN zK88%4M1_l0J(8V@}A5W8oUt*3OrNGtT##wv)g^>V!5mlxRe&P7=bV{Apg{L=!z;ylyF-rn&n z_s8Xanboj9+mD+U2KH`vV6qjjpSyrdxwZP?!&{@>u79FkY9sD(hq^w(30*`2<+ zC#E=k<_dW)2l*DVUOvjqcGIG)B#BmT}|9-irI#%(aPP6%g)5_OW=kxQSNk0y|tT4dN8har4LHU z;OMdpHC3#@=w+Yjs13M#G{W;md7oWOM&TmDJ@gtLuzzi=W^mSy^$T;K8+-A<=94;WtmdIn zS+g&l*!yod4nJL8njWVO@a!g<$2tYZmEg+Y?pbvWb&Z)Nuv1M_e(eP%e;3oP2JQlEg%{)#<(2YHSCSdW$&51sXDEKIx?}uBXj6mcEDlBvQ~j`Y+X1`vQ4dV zlJ!RWyT|kH#~RXzf>dpqSa-)1mf-=_L2TvV!i52WsmOPO*@@%uKAs zV?5)f9~4E0=ymjzJI!|n#^3>s)V+v%E#Wxs~F< zFLjC)pm(=k5IO$XO>e$?Rcki%^9<@~>}_g1z*?OpPTwbcHXOhfeN8-wjw8Cl|wH5v_8WyxCVOO9HuzI&xe$D}eb*lf?u8UNi%%kJ}; z<%3gQSfPZ%cVIK7vMQCP^xko2XE47n?ZqBAUK6rmb3OW5SNMACW)tVy%HR};Kgp@3 z(aA$&O0@|%UfI-5D1wT0Xl=NLjxc57fp4O)`=E6*~c3h4!yZCu)w zMxGE2=GEaEg=6H>eQX!dZ(ODaSVEjN%d+#t6WoTy%8F;X)#p}?{KhS?BGdO3z;4%) z`g&@TwxRY>^OcOaW_MMtp~lq^Gs_+FlSk@wKVC&RV>*=wtnqt(^3M$^?FG^XkdJ_f6jGtG*W3ouU! zY%-Dr->7=J$~jmu8kd?K8eMTac^Q3oIXt4tV=8^@E%vNbvYbr!?D~D=l1Q$CMD9{h z;1$MAVFiTGmt4`6qd9{`gY|ynP*AW@Jg)R6@6oOSqhz$TFRgwxMvE{;l|Uhbpe;Y=fNnFxFP5dRS~gc);un_ovWLu6p*%EYsG5rvj~eR$(Q3 z-g-Y2yCTfMRyWr_G&pF%ZCzap3(577#`MdZj$74|kFgwiBH{Nsua~|WS?P!eqC#@a zl(A87u(w?VbCdh0dWYHjEi46cQWxUanMK~nJyo)qN-@lR>QVj3=NUHfRD(O)d`BNL z6Gz#CwwMeVuZdeH*PSvS(0b7R#@mWpSKXoZ*aEjQ9yoMm<)~foaqI#fkW0QfpC24G zb3C6bFRT2`veMAQo|1NRrID91a`%?G(wF5j#h!P^_$0RSJZGAx#i?iaTrVcxb)T?#to*49=E+yFU*FD#Z2CFj5r|G}cB0I1pF%~#e zPbI8@W9XIIQv0Q-;_KFaJ(bT~D0j!l%zB`nW5>D_9|qf3;W92Kydgv0VO6Kimd=?a zcfwiEZ;ZyRY`Rb9SxoLp;YyaZ6q;6WsC(0I`hHL+*S*J*btMMPF|80-E$6^&`z~f7 zGV-FYlg8VpZg<&AEYVl25$8qO1$}$pTg~RUeJHdI)%uvKkBv0QeP?p+`Za`BlqTA8 z$V2|koxp?5x z=_}c8OLRuA`!f@T8Xsi@J=v_eT9pr9<``U1^))kkXprRY=5ynGpdEd&>ETW8@wtpn zl;$CPXE~0)@S&f2+vM=euB-!HLh}WU)0v<_CC;V}mgZoVoMqw2+pX!Ho|5ld=zYsF zCe7TH6iY-@>hJ)U1#RZt19uCWLXR$wvh{}Su8Qg(T=(9m_$2mo-bS#}q9z_Fv|az0 zfKyCeyN3sQ@-~+=^&Z`;0Plji^)@;}XS#6MeRv=y1Lvx=;*JM`cN$G)ih%dPb>K~v zWa_#U9;mt)`e70e+|=8=3El&%#bF<5HC8Vyf%m-!a4n(pcWl34L&vY<0jKKK_I-+v z{u>n^?7_*;2=Y~3Vgrq#3$(%%J@+)@fh*S;`;`rz(G8(% z+3EW}3|c5Y@j+&n-4)3+r+a3pL&Iq8DPLS1RFyDMiCc;_!uM?b7Ud=U_<|k{gTF_? zr^CQ8z!7{1>;OLaXz|Q-%m8YP?w)t>>iygUZP>RpMgG~jWv_B;Olmr$%siB>7D{ma4LWTj zsyG(Wl@%+4cemO3Fx(aU^qsA&P>RD2$>I~YMjkQcN}`=FCi)M3NeOjXFeFN}yxAf* zK7vH~S;g(t^Emcp&yZv(7i|TZG82DdwQ4S(M82Aeyq9z9Do;@Vv+8i$Q-LgDtnu~! zGW}f>z7yB=S7s=v#hms5w+?fKRt1n4@v{13cy^67(MAqr7n^7LE=nvDE(Kcz! z@vGGBhxfrOf##iO0(Tme@#)pC;NL%K>wj=&J?dZzR>N0%U%_qi9uyrI_b|BTjkc z!UyIa@KS5Ia#Pz3bUQ4c&Ky$QDc!$EgVM_+R;rkI4;OY17j44)MdsYgk*GRE>4<_- ze5`_0N>$BWOQZHtzE0dsS-davl{S@x$7pF5ZOdVs0rAh6h^3p%!oo+3mJeqdt@M8~ zP)ItM*IH(*I`MfRRc5k%LE0+yyjFO4`#uy&_Kl?J_Z5;PCK3sCm+o9SRBJo?;>L{5 zp?x`B9<{`6HscC;t+?9{avC;t=W$QkPommZ(kmN25EXIdu$)vGZoc=a{OlpoftJtL zto?=+6`#ag1@#a`rDRH->{X=TET#NQEq3Sla!edm`?>WO1v@@ae{@Yp z(>*)N9fE6TII>98<%_`y2$XCbI7_30V5%;lZ%STZ>P)slQmCpArgzHlISyZ{HkKv>;Nyx@4$f0SR{uPAFm)8fXx zh~)v&@LTU?6|)0JNp#9(nb(?6;-VFH-L7V~d=XU2EPQQh(J;5` zdO&j65SP*R%8l{b%CZG&VknaeRdYEwB10_b z164xCVPOAh1x>A{f)rOF<>XKB)0873HnIg>!TNDeW*INOOXWCny^n-NX;=FN)5Uu| z0}t=1g^iw_kC*t8=^NY|IwqCSk^AJTuk`+d&z`a)On9a!CH6NKSX#O?SlO0g5;}V` zNw9MhPsWgf9=y|>8>Dy3QR?m`^+b#Ec0FkGf{wHK=NEi&yKzt52K9;@nHH7E56)(% z1f5o7b4ae7t9+F&p3gg*@FoAo#RG2jqR$CrQ<)?YKU^*#@p@IE90++{;fjiA& z?>2QJy&ll}74;3{f!YCl!qV+5qqza9vINGP`~Btw)Ub-T ziu+CV+^Az&;>)zpzPtYH)4kKF<61G%8$Gk^8aU2oNqyU>?niz1n%-Pm@oqjs^QnW% zf%xD}N995<88h#!I?0>NJwsCERSB=Z7!+PE7LjQ1%{U!q|76UPKg2n_h`;ud%Z4fq zO@mL`J#MPqBojy8emX~Lr~2rI7~12q%Gz$1y@z+X$Fj%4F0qRAW{)Vc5#73z&hl=z zX0#&q#bvTnrHR+{Hn<<__Q!ks5q;DuZkH_U&})rVlelwy@Pl0;>)2j)Hj;{aM+XhP zUy(g4yT#H;MP?8x!!wxRJACJXk+hu8NN&{LljPQ3Z$?i(z|$1(cAF1RwChd#9FW89 z`;mp@u6KZPVbpDy%C4D9qRqF}UcEWvLVdk=qd+X4;^XZs1y;`fNShmp@atC<7d76{ zx(ore0dK3OTaIX<-21Ltl4Dm+R9teWxl8q?Omf$tsKdxX`bp)hxTJs%QZcXdJY+j|w)Y9Fzp%K{HG*!i+W#X26G zsTrt<-4%A+_HthU7q3|t;!6N)3w&BlTh34ASnFpLN8#rqK!u0dy~270yV663vTNd> zIx3$IvxQp`@6UT%cGS=O+LTo-W5#2xw29jv&$@naxQkGE9zh=FZEMi)UbQTWy+GTK zeSC4H-M(0Q?u@uZgI;{a*=EC#B z5E1ORcLe4y)SrJL9+o7p$IjAw&#TAwUhVFtYx18BQm9SPQDJlIXN2PSrLEJdj;WB& zgugc4$I`eL?R_bzZphk&g(P`xuJR%+fTo3$WzL?#~cS*$0m+i_0r*0T%YJNzv8#ykX@&Xh5}6rV5< zNe`|@hd)?|eV#simcK-0%yQpg#%TU7)Y`}Q*L1!1mtLdFwXhTSV_eg$$8q= zx5@E%aH=6~>wDhN!~x0IJYn_=qyb?2=Zn96j+O_SP3BSYK*HPs9Zi)Zr~rHIp}pWt z3_%OpK{g(UwIX5DmEi(f&>dSBQoZ zL?hi1?myE`UTA_oG&Ky#Q64alt!Y$(3W4Y7>4H81lHY7%&^|Co3f7E!Fr*)cBrmz z=#qRb|35_+R43T`UqrX#pP&n!We8mna6^I{gl-DhdqKRxCjQrq}5KFkBnNJF-M zM%NIq|M~(Ags#{x=tBJwD*qph?({!J7wW$dx;Eejp=&^RrvZAw;NM2~Yu}alYU^io zf9<;xzn}}9-v4lPH~tB_P`jaVs0?o2;0B@l0Ac{pYb^gZx?lV5-$jOB`|hui0UAyJ z;po!V{e!s64YmX7s)FDKz5fBByY*Vhujo?!&*JV^bpNjJ{)+CeeHR){|KaE={S$P- zX8e*MbP0ihfcd}OcfUr4zl*!SMuuPGE_BuV4@cMPpP)+uoVYlX5TtKx!ciC6lv`oF zgYkmKb3v5_2L2AilZpAS_{HG`9@#_%VQDk zzx!&w7A@S`<=or%{`vXo_a2<|jTXIPv?*@?uyogOWgOhcR9TSBM!nM8`_5JKFjY-8 z-f0FG4_BQ4rt1PSDY=Oc6?_)jlJEB4dCP{bC|;{5K=v!c`XWi*d5Yhytf1NG-KkcV zI?pkLtv}V@F~e>}9DkZa&qC<=7Zdg1l6kJtW9p@oL8a&K%^heGqvk)RdPGF^>p!k?Uc zLqZO5j4d^7fYk$`LWwqVtJ`-jR!|&WS_DJNOZ5gbb{ zrn_Hh$zzGO!Q(2&<<|SB@9e^P2p<_7dPR%BC|;3^qDOt)J?Ry|MUl1l;9W1d)ilf$ zLnI1M`EfWLZo^{yeAeoWV-B@X+3O-4m2>t+Vq8AnCU1y&bp0;*$Ke;F!v)O33lGx0 zll4O%jYOfRCfPb;cT`rjMEgD|OT-EvjI8x>(C_J0nsR?&B-ee1!&nEO_AY9GB>O5A zXRh+i398mgA98--Q!)qEC-E0@#_xt-3wi97mNHD#drD<93VZw-%>#PZwf(t#*1{{- zF=l*XGIv{DWcePxNRT$ekv*V7HXgnDLUJMS&|5kIX)Xk+dntDjP+&13p} zx<7EZ3Z}dq3+^w8yKbW=P&g*}j8^XXv&ZJW{blUT^}=Vm?vv7#tTXnrMP&i?I0|Ie z4YGy#-sq`8Th<}reF<~7>x{25uhA%nutiZH?joXad)_C@1ap|E9gn~y$|O83TH8}< zRs3F`v?Gg7P*}|*qU@x^?OjBZeq;0->q!+p;=6`EHhJ~ng=^yS%PeDtGMBszkUl!(iPDDC|>En|UL=?&GY z$)hv^p^UEHeVcC{7aI&#pghm_s!1I&m6J<)CRnO^yM)I_$>4f2r9RhXuLF7&1DeJ< z5~EXgX?B)(T$O43!i`QxGY`}65V|Xhdc60{hm{fS-0SGXC~f_u+PixKp*v!uB`e? z>e&lck`Dv$^egt$j+NX7gDM`C=+0$to)qbr!P$cTcdJ;RktZ!y|c2oD&fB_b)UKkNT$x^}%#b^gRfW(P} zorJ76j9No@Lw$rwZ(%t7nhwt=xK^v>p1$Dl9kiar;VhJL&n3}%M3=!l<-WAA=G!Xd zS$p=N9*B#cuw_h(vgXFz7SE4d);uDjO_XbC8kioIgo5%<|e8?0^!S9 znx`yN&K2D&<<|Ak`mXQg|03g6H2s8fU`AF#DIDP|bZ{;`*?M1V&_|~P>X#wpvKBti zt{vMXCb!PU+Iw9OPp=9W_LY((rd-)ggn8_yTB9eLGWb#jmW+$jJ}Y*)>)`7*kH~qQ z89AIdnMgRh)$pYq_NCW< zTX;8iI;XHhqsizDV~0zcx`7PYDGqg;hxh*`EwfP9uoa2U+|iF}59EtkZj@Xx%m6EC z`lQ+fRq`aBTAxW{PhE9+lN^JA@`qidm&`Nk*)^U$3yTQ5`|i$l1Mi1nuib>sI56fj zs|8%3=O8x_c*kOT{{}IY$t8-z4C#k2kX(4HKW1=@NQ^ROG?#^#S@>c!-G)SAP|xN( z&bNA?Pa%x{T3Pjs@ickS7mEDZivH{Xwdoss-+mxJmT0u8;taIEdYs)_2uyE$;BFl^n6=kEDt$z1Ix{kM_irrNAy8^E@}_ z6i6Xoyp36F1M;*>w9z5vW7j$Uy+IWMZ@;%>cq5Bq0B-xI? z6cxXJo$|7@_hqgy44fXjg<(SLa(@@=+gBnYpSr5$!+(Yx?s*x8nCt zuVgx3@eB0yZBX*BJKOpA$SpDbE23*J^o@A1a@0n1Zs#5u%w6ixRX}2`URJO&|60O620wKuJ|Y=-9xZZp$k3E z1}AMz`AZKMdKL)sGnda1r;fmw=}x{*=^{ocsM%GBKie7RA|mT;CS@F#^S-)*mI21L z=GC4n%A9Vz^N?@*LQru@qE?mOVV`hM%H@b5|4)ZJ@$2EwKB*_!J?-GXsqY|VhX}}2 zHkoyKI>poT!l_GJy!k90r&&-ZXND-K9=KQp_-n+N}YBt z=E6oRy&%&uIwM_L#Y35ovXSp4<|Qy4weTg)`>M`XcuNz6xOdQs(D(+qRerd}wU*%i z#p7{QJG?v;sB)5{SMKtyw3MAhcAnEvGoqI|7*>*5E5btJrfsAC*e{^fH0ktX)AqZU zmJNpAjTh1nJT-+s2|j%BL~c>6#{LLB3pv*5yVGK&v%67Y{WKdQ3fY$@dCF6bPIWy{ z+;^!Jn}sA3R84&4_XtZ#k16#dwwTcOCSJB7tLh?o>2#gv&f$g64@gMLuhbS&SOTY#jQ6*8h0K?_R67A*^!wft2>u2|9O)eO_l6 zlMXe0z^5IbNGYCYrM!6?M*HQ4Pu?Nv@nX(VV$I_FhwL;Y%VvlA-dtObUzER2P9@95Ix6qYYOJyR*zZ5nyyS>^Q}&tSJ> z9Aqk(7k4cDVKwl}M&w?Ko+@Wx&&>xn!vb6An(G?})+Iix1R7tnJkC0U=NU+2p!Tm4 zDO9H>SJv1Ak7qu`xDku***8o*9Vto zS!bEZ+9kXf&`x1b@~2j<4v{*t&o@r4`yg9fxVa?bji;;FcningORQmBni}&$-Yf0k zYnDE%8VVnI+b+t@A78b6cuQl!>pwu1u_#a`d(Bt zj>%1lqFtI#ux>uV9!#Hc&3bo}SXlMQDCIg@d-02%@knBc zZyh|79dLtR0k?HM+|v_wZCFVmd)Rpbhdnve#&Yx2i{^umc*~-XO(n&>T`;S5b)wCC zru6o)Adf0GvnHxOS>$&6i=9r)!a~tC6HU1U*ioSa#;RdY41((t&4I(ev9ee;mNCBpiMxDj@Oc$Wl7x;rA>-q%h0S zs^v3=T6+TUhcmP5_|h?YOHvUNq2r4ORF3BpUq86_gHkq1dkQ7z?P63Vu)(aZyeiI{ z>_zlEbSJ9ZyYW#!hulwvGGwUtho(j2wFP-%iO$)5vVoy!YjB&9Nwg=9@#qgJ zQQ)0LMe2i@Dt9G{*Vqg+cOyI6F&C8U(}3fmJ9x>Q@|J7Ol$({5qr%7}BJSf=nPyW% z<7Kl#*K?1I<7GYHeLitMv!^x4o9%GJ^Uq^4Z_F4jDHD;rLp_jKKuHCHWaf?7;=D5uvv;N6=_0iM|9n29`#!u^# zF4~;hfn3Y13prIkRafYKUR#O#w88p|nh=&$UhU{dsB#K({`Eu!_Le)1FEc{LH}P%t z8+eh|dH9QhIi!06OlH`;nzSnHnr?;(G6YPn$&EzU?ucc3C*&{KsMh+Jaou)A`hw;- zBT?27a|<7z3_G71v}06%TlM-Go!#|2>Fz~b98&c7Lg3NI32+_TW&8Oi_ zYz+Dp^l;Harh_yV<~?utrD5iAimc6-6qe&|w($Bkh;*DhvHSWPiDF6pMC@S5^7%{e z!hpTgNTIj1Cz$W+!tFlEubnHcrOZ@bO1Lo2{80VSmFwJ!_bv+AJ=?IgaTj=KUB~7> zOyqg`3{ZjJfvd7(@>e(C_bN>H@SQz3_)N~LZ6R{Y2QSuE{ zUAlyQ8_BGPhE$le&jlXHPW5P9XTBu z>MAMszWZJLN4IPB4Xy`z*G-ovx~0xkmuR_foK)6sy+4^5qdQPgMd<*8zi8-egPpB+ zeCNtx@hX#Pl?sJzdpFJ24>h3pvGp#G1b85Z9 z5>CM`_Ayu5ba3rh(#}UCp2Ix8nH-Uxai_235m_6&p&5Lk7;j+s$HD7y2HiphMid*;6o;`0Y5E9$ z92lF;d8sZD?#`c!LGw#5(w6l+X}Hq>AS{Mb6rjEWfd51I#6`@TU~ML64)Ra)%e`Dr zDzcB%7Bswg{M1ExiMyCWLX^fKBM4S-5seW0$lY1`(WYRZaDHY}tWO^|pXvvjFbauh z%Eh%TJi0b7_$72}c1T|0IYrL#=7HHst@$2w+wN25e)#^)km`w$)S%lzbB{ZkOHPLE z|8P{tW9ZgH0a1*$^3g9oS?jX)-Nd1h^*br2JMZmhc{38l>i9@849_*0B-Yqqx8p(v zl~sIFM*4mO>NR`6DU(j)6;8zD?U)^FjrnAgXHX^glLz8u`M5XrU4$d%&j!c6t~n+* z$3pW+p~SRK`LqdV;%A1@5I2fGd&@^jXU2F7$4^-7WTCn^;n>SiGZPdLSwK2DVx}N| z$3;!n?+ckl+NZ80a-xQ?qjOp`oqU(jSHD8hkF~MRw zod%LvV^4A{i%VqoNnb&vUH<-!Dx#8>VV}2cc86UN2**7;43UbXmV>wvQ^TOcmGN5H ziSqmLk1e}Ac&p5J$W_gJiY#)rOki}rBi8*{IM|W@B9B8sufe({W9RJ-)=6SAM90eF zP4X_6gJ_No-)R~x&n#LGH)pe3(MIz_U0n^zhE$_j#f3q${`$|H7L!b-3&(fwM7l;5 zkVJ&-VsQ%?->u&FIn%T*{D_Q(8*@L2f%DNN8pv8HovY|ex{z}+iDfe~Bgv05eX}rr$f1a2+_amoHch6*`YQ8M)jI2oit5+A zN$b@L9x2S5M^s16+qH0C{QOGBy8X%StjhW)Qwj^$dJ~843~Rr$vzlP5ubihpD@CF) zc(_ya7Fps`(?c?%vtJO+y>y+N1}2x13U0cvVFE{`UmbT5lb@Bj?{*@u>G*5!Ptu2u za(Zy3Th=^$E^Iu1tMhTioiL-K<~jf7Vw&|wWHPtj#uj&6e06jB0Cpe6iJph(B$t-e z%7QT(&5cdTGHfayN0hU zwtkBLRAM=FTAZWYa`JU@fNo&Yd%G8Dm9qPt}V zCp+mFsUbKlQPBu?Gw@Z12#*Eb`feJ<43krwAvQ@R93H1IWiyrIw01cj;X*3f32yY^ z-SW2!_k9$%iE!*j4{prD!s3+EKK_Y93x?ve)6A4V7C4)(V zuNzbGto%)r6-m#BO9S(!tGWHmw28}j^KEc}EwYbA_)U}Y#jBEK)|s!qoGyIR}jx}`* zDspZx(Koiw|1=*OuOzaPGjR6t)lRoZvM;F-xya=GsWnd+>Zui0qpaiTcU=%c5({Z7 zE|Ku>8|ot~{#0k{8?UF~j3APuiaN(VSa4AyShm{D!iBR&<@o*c3|C^9Jao$D^(;OS zC7)Bijd?;^AL)F6Scs;ZviNEzKh9F&waWRyYID4mMdgYj9kuO@|4>H5!t`^FIb+wy z=NuTN?$E3kE=Mhg&L=-%k37dF^gQiI|IWzAS54jzPq3w(8*HmEG=i7ryA9~fFe}d6 zj(>1&YWg(uARVYzkk>z98wAhZndVA^9Y5ZdE6Dbey8L{0;s3J>E-pkL6SPh;?3vJpHHld4p>e0Io zZ_d@)k_p4pxnoj~?bu;lR;BpmkO0-&kU0sG$H2wLTXWLTn=H`n!dCL|@Ih?N?g4;&lmY*7GPK|w{8y*;9vVKb zj>hKd#y`})YD2a)5pWF|kp5v8dJhOxH-^JpG!afnNDFe(6>WkBzdZr2q=zKVh^=p; zd~3&X!s2OYCYO-5X0E{(f?x@t{tw%b5b_zC4}=l|jWUI~Iq3ax4U$4r?a;dfp7w4? z6jT!=CNNwPTMVcKv^lx@fAa)-3CIf#u1<&kzw1A+2~9TtViTmd+k~KSgri|7SHkB} zet-zg#e@HR8;wN#;6eHbXN0<^4`G2jArG|^hV=AOMWW!);&V8{-oXQ|`jw-hZvqvD zjBN>^NoXhVe|r0%;Jd#wzm+z20c&(b|4WUXpvJelK+8rxUS3EP=3DVCZy}J-TY{}` HAprjeaPcR} literal 7628 zcmeHLc{r5o`+qHMO0pNq7^%>h#aL!yN|cnbC$wmm7ZWq1nXwh7a#UI@AxV}jX%m&4 zV~Gf9ag-$4J3^9OOeucP45!n%e&<}@>-?_Y_rLSb_0Icw@8@%W?)$l)<(g-#taPA- z5CkbfkeCD{29Phf#pW;pU~z!O!E+(_NX*%V9`on0&_`BGQsiqV3qf)}*cIon&|hf| z9|2f-j(>Ow1gVJ46Zt9uA=Np&3ShM#bXJOqLvp}x4Y049vrB+?EU-(1r!lbO09F8a zE5Hsv_&NeCj4uX0CE{XGC2(*99L9;+oJk;O?ui8u0+3e803x`w_Z$S@lHZ>@{8$u* zyJG-@%XMey`LaAAODnP+0;h+?=;<4R*ccq1h`|%FI0S}3)Ym8KV<3}_fxZ-)H_Ss& zVNV9j6ggB}hD0#vrpR3eWDMEY9QI;tJH&w<57{`;4tdiEbmT^olt~~lkm<{Wc@#t- z(}%?+2AU!%bT$rbsUy&zFcs2LuG@1?cOsIi6@Nfj~fGaA+J31sEu95Q|3%M6tM&NjC zap*KO>;p4_C>P|1{S%+Z@c5%xf67hRBI?eMiGXn5_5Z~Fz3(D2V3Em03pUM97}Uzb z6e-M?NN3X+bfO4SJv=<{I3p^G3RCeYyuOD4%9w5pqX-@zFdhecP;hw64^mbvE|0>Z z!9pn@uEzj66bywxz)@%@LmJi?g{KlcP!t0r8j6m^7{XM28f<{|_(8&!!vM3A;`3ux zLMb|sGBhAiV7dW}GN9r?HZ-~cieQYzqhO4_KEc=!0~-=%vk{I1(ag@u6p7RO!D8n_ z;d!t*Oj9J8LPOX(io%MxP7EgO$fF3WhBdi=9nLM$Mvfv}2%tse0E$I4=fD&mo8!c0`#%|GdzO+6I-}C5hj2S=F~O@7sL+|nP!))BYbf7ZT8iNAzG^lglN?f zDYV%xa4G&UU6d!#nr)(aQCOZZ*q-Lr!|!&+U(7Wcm2PN+BN(BKF<2Z5Prw_XC zMKFLVG!K1!BO}`E*nC&#vORbK6b@|W3F-)H1?InK`$Vjpog&(QPA0$$7LGp{N)!f< z!s6zJ5>J>LO7vWh(ZXHn_b!{D|5s5=M1&umOC%ACdo^UH@<8lKSJo2D8A=pa5_j1E*hv z4~@x{+yp^P7LQ};VCL%L>V{bG9vlvrKntLi&;|;P>uY9bXDvK8{`ED{C_3OmFHu6R zzvld{bv_-O!9eRXaI93D8yngDDV!1EPia6<~-P7dY(p9B~|XR_!3Uj=wIokX$81hV;?cq*cJb|4BBXfS}5FajH50!?8& zNLXLM0oRzlC{r<|XTB5;MP!ly-{Kw~5Hy+%LDH@eB>xsTty?*2H zm9_ehS0@IsyFbsM?e^|{wbT8Zl#rJif$e zntp%y;zE8q?!#7eam_?g*5_fap;n}AdMrcD`TDOhiG?ocm`E$%Rc7kLpQ)W)se>$1 z_N%cVpNAWE7!iz=Bei#jM+J*Z1P~|yYa`zR5GlBai8ARd|=W`Rk$%k>kq9+y12W_@CtG#EpvRbET>7oeL=fS zpIW9$<)Rav4-ecf{nP;YuP#`4w;+1$IK21vhLHIyDvHCC9d5od0_Gsu3)F+qh%W{hCzGSA%(CwG3D{G!xnv?Dxct4A!Fx%jt4$zjU36X#xC z*6=*T-;W(l`gN*Bx=t*hP055)GE_L=wEbl7cRd)B<*cIEVkInkUZzO(r`+eO+l zYsJ+wNW=U4)uf*nOzqowVzutzdFSY?Uln9jd^WC%J*4j%F0DdMP1ot%{eY;Di|l{z zchkvOh8RFk~2YUMpC{c@$ULx zw7)&wU!#sQ-@0w_H4UxRVOp4QuhUD^n{J!+s~B%=k#cc0s#rAf{>YnNQ}H)FN&{1p8%t&6JAp zj=t~OgI*jr&jT5&bI%}*5n-}@)!W0ecdqLn>ZSUPy3-EXzpCwkd0xEy25(Pz`xafB-P9KC_9=e4#67AqR}UnJ$nm#mrw6Q{g2zDj}Yg#lK!>rKbxKv#WnK6c42=?-^OJjlOR0j$JQzHkB`^uNp>7 z&8rTnw|E>Sv%Jzj1v@zHyS$)Hft1wz*1OIwzHPya-c{c^PSUBKLFDN9!HGM%sNSA7 zF9-G>+R#>=QyTOESu5WQou8POr+cAqtEI{Z@0w!yI`w0`RphrLDFZVyNxGj7a_TB= zBY2%pUF62%uSHF?xH*tx^HNZV`(+ciZq&iQe$4cHqGc1N)Xi5xUOL#c_fjnO>D7j{ zk3-~L+46#VYUj)27gVd3%(P!$zc1$ai(Mr%x?ekzqxnrYe%9j;NfigihUVGpJYSlz zXT|gJzI++MF?+!~{jX2Al;)lB6wAp{tGR!?{XtP(+C+*GlfdtTlKG1!e)Iaelvets zIB*3U5`2{vWa{BtxGI@ErX;(?{$>Tnsq*N>(868L_CulrbI+o*9Q5Y90Q}`Ya zxxQFlF5jndD0(Iy5o=fZt%FEhwBeIXeskMdo6E0dFArH-56j;6Vu@{Ekp8t|efuOf zZk3k*=DfxBr3q`sl%gzNO~@xsEUi0htAt}aPkA}@GJU0lN~yZ65m+m9cfx!>^{@ZDdnMis@x8#=}RPu69Oq#_;IGf=!Qf-tjnYPBG1)` z9qNsAkd^3A+h;T{>x8B=Qxji*dg!2S?#8hqqQ=9cvYb011DWfB9}Vw5o+H?(s+)S& zpVSmlyFOH~cBpDJ18TC!IkxcgvPZ4^-kJ38H~px4*q*bRpU|s^B9%T~cUJj}N2-e2 zR`(>Iw!~F0daYl0ltfP}->O|8uu9!xTUn_rcRtoEfm14;IJmzFL&=8k>rIY7P7fbn zNujCmTIr1*X(0FdeQcN@X=IDNYwavq+Fvk$lS{Sn4y=9D6g67KR`sxW zo1|pG(Wq;<;i4c(Y|(!@AczbxH#|v=>G_g?A+0S-ea~Q|o;b{x$DDm#>py}e56n;U zttP1#ysI)YE16Q{7g?Dkzg}G$Ov*@ZwY@v_C_ev~Qde%|j-8i#19SULL+am$-ZY%L zSGz{0l&G?JWvXs<2$A~w=aIcC67Gqo**duf8umNhGvXJxtB*WAmEDFesY=x?9Gr(~ z5eWPd4kt6MQ~06MXY)paGSV}akzKs(m%)7Z)2#t_F1%G8{PwW_*7+Oz!+JmJ?+NZg z4r7Z_CboFRgbbzCZ=cRzV0a-a+3an7-o4|Wty7Kp$!7a>I*BtYeFm1$waYTO8bjP= zQv+SZ-&!rKuPlB~ZK^}KbRz^c62WA(VD<9FccllRM0ukR=HQ|_id zMEfYM_^MWNUsFnbYQRkJnpsdXC~JFf`L+3)-DAxoo9pk}O;^5}-1g-3FF}z#$u9Ri zyGo^}>bC3j6x}uYZGLdxF9FT-k|qYBeR@?bSXmCIW++8qV)<(X=YmYc{kBbwH7Olc zr3}x<-3sI}6Vv{^qzAO4I}jaiH=}D`=?(pqbFHGxCtEN2;pySpn4ddB`m`(8bw5yg z;A2^yOi91` zjVabM>W)X74*Kk?8~9jrIg~ndx6mx`X+kI4T@W!gwrPa2hcD2dsTA-TAC|R4{{y$K Bfgk_? From 22aa7f10165c74ce9dccfa1141cfbefaf1c274d0 Mon Sep 17 00:00:00 2001 From: BBsan2k Date: Sat, 13 Apr 2024 12:09:31 +0200 Subject: [PATCH 3/7] Image - another try --- .../devices/kunaigc/deviceHandler-KunaiGC.c | 2 +- cube/swiss/source/images/kunaigc.tif | Bin 33652 -> 7176 bytes 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/cube/swiss/source/devices/kunaigc/deviceHandler-KunaiGC.c b/cube/swiss/source/devices/kunaigc/deviceHandler-KunaiGC.c index 1c3e1a7b..8eb06681 100755 --- a/cube/swiss/source/devices/kunaigc/deviceHandler-KunaiGC.c +++ b/cube/swiss/source/devices/kunaigc/deviceHandler-KunaiGC.c @@ -230,7 +230,7 @@ DEVICEHANDLER_INTERFACE __device_kunaigc = { .hwName = "KunaiGC IPL", .deviceName = "KunaiGC", .deviceDescription = "KunaiGC File System", - .deviceTexture = {TEX_KUNAIGC, 112, 57, 112, 57}, + .deviceTexture = {TEX_KUNAIGC, 120, 80, 120, 80}, .features = FEAT_READ|FEAT_WRITE|FEAT_CONFIG_DEVICE|FEAT_BOOT_DEVICE|FEAT_AUTOLOAD_DOL, .location = LOC_SYSTEM, .initial = &initial_KunaiGC, diff --git a/cube/swiss/source/images/kunaigc.tif b/cube/swiss/source/images/kunaigc.tif index cde3b9fcc8f82de5f8815550acd141972a98b597..e00cbc3fc367f5d009a522483a897ea7501b5c0d 100644 GIT binary patch literal 7176 zcmeHLcT`i^*1zD4D2N3GL|}-qf~F^g!~{p_ND~mnf|EuFq>%yvL{OiIB48N-rHoR> zf)G#;5o|~qq**`(1}wBe9!-%VgS?Z-D1P5_y*1zZ-hbY#oBPW?zx~_$>{C|GwX-vU zWQ&{1z zGljze)}7*CQiq@=D&H%8;Xp`#3Y!3IFipo$MHQL@{II~jdYUE%*foLO9N4h{F9O&B zVApBBZU8IdtAJ0jstQyJ9DV|vq66P4?aSvu5a-ax{2f64;zMc>3^=R>cEx22Zb1M1 znG5lB(7@fuD z@Tu_rqT6s7he?HRB{-rTg%&Iyj%~1ru~8eJfjP~mXU4*M%Vp3u?plRRIHRA!502zV$NO^CuE(I_4dHQhrju?hepQwjZB z53zfYkcDz(i3L&-gJl)K;!BoImrzvylY*j4Yz|MT>=Y>87r~f}PduSi#8q;b3>1sY z;sH@H$Pe=cU&8VJT&yp0Q?w|%Gd&Ry?vwr(?7#Z1BmbNxTIj9u@9L zW5ArgwG@omPia{oI+H&zLYIY2H6@&(!k8& z@MvrnN--YF76l+%(j+W}IB@#37Ty0^i)+wWGWuhFO0u0oz=6eQOMIwsv;Wz!mBUon zG1zGhpju$$Xo`gZT9jcyvB(x87EK}$xeEkbD*V%y@Tu7mtXC#YLQ^F7pAu8ZISQB@ z@1Xz0R;*4K1@OU~vgL?D{2-<2U3qcag`V8f#E1QH8F|59Bn z@RkJ9M63;LP)ATJF#na?Cv5e{DPsI3JlYhGtXp#vlwWs z1~;D}6iL9@lmf76phT#^;D#3Yxw~2b90@Rt$pgn!NaX^+%b7f;0xNu7d0eSNM{y|* zw{uwhEpFiH3T^|)2C{=3Ar}Y+(IF|sfw+(aLVzb9SS%q^hyb28kO-ndL68_?fF~DX z0b3y8NFX18#6Zag+6a(4Kp>m1#9N|_=K`Y8fd&U?DIy3U9?(?8gB0}z9B{qetjttJ zr&>r8(Uhhc;9J$(8-m`PhagQ)2wM06g4B0>C6?l)#izb~g#%9Ufb)OsBY4Ab_TxJg z6=izhT}o2#P7R@ZgI;Z?p_ZNMY{7hscA7@M%a1|zaF>j2=g)>-$oM(++_s*yd|Z>0 zp`S+nyRxGj?&R91VO31rP1KuothftQ?l!%7u&DYcX(YUV{+rSF$&)D|Bfqy46$PHG z@wR<4d9tx6q?Xz>SvyuYY1Y-64$C;XuJB~@_w!0riw|y38h77hpb@zW`()?OY3H)a z9MFXMZrQ7vyq>i@DaqIBU3qPwkSF)i?wy<1_U4+-#Zwn4rS|y+Qhj?(WL17nR+*hD z0zc5_<3~I3W3y&)@7?F+soQ4=us((>Qx`d}%0LP1=e9oH=9Ery+8vs6Y0G$&LsnG5 z;U#%LxE*`Bpm`4aa{L}&YO(af9W$mrRZ_8YCv5Z1OMB+!6q~%9us&bf#w)>GzU$6h z!qfffhX8vHaqqa6`JHXMP8{hO@Gp5E<9N8NIa2%jB2Vi~t5Fp%qVMYBw6oS7Tjo5L zUU9MJ+0I>|-p{_!POcHvbVm;bo~z&0mhZEo;@+j?Lu#Cc?bhr;;_nBtHn){pzR0?> z0@;g59lmu}Zz#1dwQj?u3OGH#uP1`N16DRZO5UM<_Sh2Xa(Y(W3+j=Kg!xyTx=M3y z^o`|TP+2e9UN~PracSk!-(yoE3|4H@QW1*>v&((bi#mrYWVn)~Gw$vCwmzioi5D|M zE`|ErKC<+j`A5iObVS%|lVtyzm9b5Ouc(L1hTxs#n)B(+dp7CLmZy&R^l%H$Gj7K| zseRR7PuHNC%1yA%9KA@j{^0D#__mmhwcSXgk<;k|F-Ki*X;?^lRNlXS zIZ81<6y;NSh_-suOtd+kcv-EX4tv46Z{qtoFrg*DdIWe*K$UikH($*VaY zkxbM>ab0S>#;cnKPszh%)7AsBJK7_ixW0*1PI`M(f4MQba9;M(u$8@pmloE?bk>C7 z!*e^F{yD!D`DfgKX^jBu{mecyq2#hXr{IKV*?jeu!1OXr`a7pKz37p}U16)M-ispa zg)>5C*&Au}KfEyhc+fvZb~iaG%Aw}kgjDRi`21m{d6{Q&b)EfjuDSe&V}pq`nIU6i zc?Y&~0_Ix3nA=1Q$qwEm$Q}4l$vUv3-1CG+@dm`vk=k3Sv%^X!eGZ4Xw7GM$*N%93 z8ScA}n>*-xP(%JIh&=Mzs_Jc|{9VWyQ{z#;@W7ZZ--LTUX5{CV$a`7WYhKvonXh+n z_RhUBtMN~R3Xc|w%0f*q>nw+JxA>_wXO=gXdfk6yG4%fXIEVPSHH6l*7{`foDOs~i zbALO3^Zl&b*u0JYGqXNuWmm?zg}G4dNu8Hwwxf|q1$A!$>5tc|gp>33b`w*!ncWfB4>w6pDdOevGQq0zRILNOk zi)l%)UauQf`@p_=iR_1)@hfWN(fNymt=o0w4etZ2kruniDK5!~-V2XIn1aac2ukmOS4qTOC1tTe730+D>QhhS+8U^4(`{ zWp;0k%Vxzq8=Up-Ow`kr`E1Ge(uzE`g-{m}K|J`18vToJtlk|~O{uRNg6~~7V%ak8 zXY6I=cvN<+O{d&)08*TYQ^U?Uxi%4PeN=X?p zw>*csp?JKo{4%+5D5;Vh;+93bw!8Mc+){5Vzp?OnfTw>VZm`Is8%NGZdL=r&lzMOL z(m&nVVMisXyNU*y-0%GsLg}rKNFVC7sw`_Bu0LE5>Q+%cZgiOu5a=3Dz&_o6(4KOT zz1+hrDt=(6txkP-O*MvC_0IT778BpmIzlQp=-o`YIO1KijyO0XqErP;xN0wZV1MYB z_K8?~{i+Q2jfhXO= zyrWisoGU~vTasbZ7ut@z;odN@e^u-D=)ieDJnrI?u0NLDzhmUJsp+Dnfo6ZGbxTS% zuGm&q_tTM|=j3KP<8gtOxO>-XomTla3+>USuk)6pe!-0x99 zOiI3;%nG|1RXlm>cS^YVl~r*OdP#yr^9EVe#`f%|ZJf4=`D%lL;x)x}$IT4{$e^8z ziZ2FhGGBEq4YBnluAdPmop`cbDmd1o2k%0Mx@H8dtFMhaSP<(sSJ!^{=bAeCeQKes z)!488O?!mh-u!)mhHZJdiw~nt*t~8D6cqPV_FLZ7ufM9*v`gH@%24y_N$KhE{q?MY zv%YUvFqN|^Y}wPB(dMyLWA@>h_Q?g?Z|?G=$bIFvePl)=yC#%+tB}(oo3Xfhv$Jj^ zs@!iTJxq40kIU3o;bgAQKDnfLp)HCRu%=zJ(U*_CREV*2ly6pOIKGetphuHR; z??Y~ypRP*{dl$R4(WYU1W^Qb&%+GLa|847%O!{d|&f4AvnRBNsV^2{;>yE+Ry!+yO z{@%J7@}_fVUX6z7_pclJpwoJq+s5zi(e|X?Kh!XKXUEy`+n&L*!g+4o<=#xH~7!V3V32! zwyybVV7}^kyP)**J8xW>_)Vs!Uvy_n-tdgdt-`tP*Y;*)p4wNEc0D3yiA>;MDN`4x zKWQn=uJvni-st>PKrzga8`*9us6W>I2whzCN7;dq&etE*Fy>e3iMq)xyQ)M(C*CX= z&06rbXz*It>(b_1#*qbSH)9um;0BB@dmFmuZ76wi-P|sn)BTU7c_S;p80DYvdQwbyaOcw zuo00G>ZJk=v473)1oH>KmL~?wixCk6yx@@yUFMF6f9K+)PcXmZ6@XeQ3 z^YOSkD)5;}>4@rhslr`cPXwaiMuEDTZE5?Pz2+yz_;a?xsE=MDiQ_fIVvnB1QQh%gYl9T5%Kf$6ZVr3 zMxvZW#Eu?4Dk3T_A}%fjmJmV*ATaj+LJ0KUEhFFTsKL=Nl&crU6^Y=1?AkjZeJ~1q zd{9N-)7Q3ndVQ}5ffgpTQy7Ny6!EwB5)l&?75O_QN7#2AFCUZ#!8J#i2;2ki2}fYi zpq|*@>Un)Tf8W)Asq(w{UsepB7ag6ySN`U&r{~{RjmD_^f)IR#^p|SCX`+n-yx<~+ za5T~f1%s>mf=$``cdf^`I{i2~zgU7K|I&B3zw5tHgCtvOgs!l4hUCGv%BrH^_826} z7>V>y{I7<9yvTNqAE*0=F}RvN2CfJV79mkdAu$PKaIlC;$x4a~h>FXKivDC|tK>Et zI!H%Xr+~k)K^Sm9+xV&Eb{pUjaqF-2)6A1r2en#@Nh635E3JvG?-waD~}JXH~=(;rR7B z^nEb!V30gN5A?0}{$%9;VPnzuzW-jA`)3j4-|KR}gya8jUGB$#g}K-xoZ*g&BHza9 zH^x6Zaa(=&*C6sO_Wd>J$U|2HSukLMgNM+ipm_fm-jABM%9}%h$HRvZSY*US!6YIE z?Z2x2EZ(ka@q5*u#oJYXXqq5gF^b|}Yum>DNejAJ*t(J#f%mp>MQJH12{1@+XK&Zg zclC#Rn5(&hmmD+{I>p4nh7pvuOa4~V;)mvuUle~5d{x{!S3li?5(3T^7rem*@2^F^ z-(deG7JZM{|2H!~TJ(R@>NZ<{D%l3e@5w)K{T`of(m!x*1LXJQAGm&x&o=2FxV8cE zd-4xlzsF~r^bcIy0Qo)n2d>}avrYO3u5Ez)p8NyX@A26t{R7uFKz>jDf$R79Y?J2kkl&Mk;QBp2+oXTs+6Kt)$v<%Y9-nQ}KX7dW-YF;XPfj7T-yNoJ^5R>sD3P2g(JXqs(#>t)FUdIIy?3e zmc=p<_CM_iGH`7zxH1@ARSQ6JaFs2XsJ&3&(pWtojF%4vBuOEjJ{n`>04^mYl+Z+A z;BbVGCnSLKgrlc34|t>}=o-2DLnTyPF^~=<-m(ct>4HmljMdGJLDnun1wa8v00X!H za4^RMr~#^gJ|F~~1SA15KpYSSIcAXdSF`#kB+|(Uj`8u*b8v%!?44k7$OIB_0@c(3 zR{#N^L4$C>1N`#=DQAK@mDhJokS4T2#RG%*+6Ed2A6E~ID}rDilv5H)tLj*20R)WX zAREL&;O+-TJAcARkFZ)64ay-L0aY&y;#;!;RSyT$cf!yat@E8wMIlsw5D3f9p4b`fg$kq{XLWozZa+Sg~PxFe&E7Q za7FRgex&cLkN(z=pgdFX8fFCyi6TuD}lq1mucUx_ONU#He8}zvYD!`o(0qnu3U`vPzP-OTPM>Fbao za3EdC#xLpT0^bA>;9vO&9}g(XLBNoK>*W!S=xt*Wh7kj01lOSs{Joce%x%34Q2Egt z32qqv=xmT{WN2`)BOGn&p$!ceqF?GECCEVbPH-b95SnT~)kW@%Li%|9!XiTwmiT^) zgz83+^{w@~pbc7k9}H3hj)0?}wXz@sfY#Z5Eljml7~((@&8-W-|91OHz>fxey@31# z{;wPzwDY%%%vKpPdk>7Uz4I@n=wNUU4`aALMiYI)SV#L?P@?$8{-rRb3lbH0%){0B z+ckrJtMMnku|WXH9pO&)K7=4i2`y>;o8qS5*uNI1c5o)FFaHT##x46Q8s7yF#=1xZ zgf%4w=>=Y%(eR(65S0fQuKvQMaUg`ZzwoKS)9$jpIAK&Bgen4Pz>Qex6CuAf+87CW zqOEANV;6yfIy41A3n~xL*@HU>@vmcu$OQnr!a%+{um>Q*cjIRP8p5XtY*lpt68s!M zN1y;lunZ{g1I_|86ciMc6f~5SG>p_#)Qr37X=vzo?_pxv&BU~ak%o}I9=3?>dm>tD zYFav420A*1U37GGyPzH2t}PYDe_{at3}B=H1AspX5g$OzNJPR&gdYG$E(N}ZND1_k z^lNMbJq8o_brs?rq-5k2lvLD2+l64nC;2910El)Fkr3}7CM74MASI!Z0ELVsJ9u}B zksdR!XX5h?7bn|wEj?dlFaK`C=MEB*sEBq|@_n&JpN1D;Xes791!wlFT{m`Ae{mNh z&C+2q0{>ii_GPEfhsEOu;_jV$a--(W=+gb7S8vCbO`Uwt$3G~peK)=$qhaRk7nzVz zQr9)H$_Nk>gWm2S_(V=dDn)RCS8V4F&;xHiCQ|Y6Yr7y1o*TA*lHhk3Mn%M`8tsOm zB^UOQK@O1bmwM3wI&qi9QQbru10VVB!oT+5zz-Mjy#Ngfp-x6X30S-m)h?T0)#j`> zS;U?fl2Lf7eJQ&L4dhN?V6bQ^^VWFL{Z`%_wpa|npV5d17NYP#%ODQS z&G*`Vd4&hY=kWkMbamFJ(ssjfQ-9Jjaicu}9s1mX-W(6~UEZ|q+sKM9QNzA!f4ZX7 zD{E>b&Zd}57B3#!(Xm-9woftO7mqrAamM|vp{CxApFMg~{j&qh#<50Yzj$%HZGqL! zUTQgQ0_ScWeTs6?t08n#Wf>D|v}pTI@6o2^)H|1&DU}5M)8}DUUys;?saUc zHM+Mpq2hee9jg&LKrXiIc0Nu#%x@*XJl^#+ZE5} zBq^XDN~TP(aK(#XogYP1cjAHK1s7{24Q$^s%bjDB)uxM+kG;L?V6%9jWUjto5bJuV z9S@|0+G0P&WDPK2Hk?07pqn`}KK5C1Ps(gGE;5yDSo+xH4kO>qm52D2S2bJ-mOX~k z9WKQ6y-Ei=0B3Hj|D=KuuG<-xV1_;L<;AmU1>M^zdb(4)6ldE#mx_u?+^wA_x%zBN zK88%4M1_l0J(8V@}A5W8oUt*3OrNGtT##wv)g^>V!5mlxRe&P7=bV{Apg{L=!z;ylyF-rn&n z_s8Xanboj9+mD+U2KH`vV6qjjpSyrdxwZP?!&{@>u79FkY9sD(hq^w(30*`2<+ zC#E=k<_dW)2l*DVUOvjqcGIG)B#BmT}|9-irI#%(aPP6%g)5_OW=kxQSNk0y|tT4dN8har4LHU z;OMdpHC3#@=w+Yjs13M#G{W;md7oWOM&TmDJ@gtLuzzi=W^mSy^$T;K8+-A<=94;WtmdIn zS+g&l*!yod4nJL8njWVO@a!g<$2tYZmEg+Y?pbvWb&Z)Nuv1M_e(eP%e;3oP2JQlEg%{)#<(2YHSCSdW$&51sXDEKIx?}uBXj6mcEDlBvQ~j`Y+X1`vQ4dV zlJ!RWyT|kH#~RXzf>dpqSa-)1mf-=_L2TvV!i52WsmOPO*@@%uKAs zV?5)f9~4E0=ymjzJI!|n#^3>s)V+v%E#Wxs~F< zFLjC)pm(=k5IO$XO>e$?Rcki%^9<@~>}_g1z*?OpPTwbcHXOhfeN8-wjw8Cl|wH5v_8WyxCVOO9HuzI&xe$D}eb*lf?u8UNi%%kJ}; z<%3gQSfPZ%cVIK7vMQCP^xko2XE47n?ZqBAUK6rmb3OW5SNMACW)tVy%HR};Kgp@3 z(aA$&O0@|%UfI-5D1wT0Xl=NLjxc57fp4O)`=E6*~c3h4!yZCu)w zMxGE2=GEaEg=6H>eQX!dZ(ODaSVEjN%d+#t6WoTy%8F;X)#p}?{KhS?BGdO3z;4%) z`g&@TwxRY>^OcOaW_MMtp~lq^Gs_+FlSk@wKVC&RV>*=wtnqt(^3M$^?FG^XkdJ_f6jGtG*W3ouU! zY%-Dr->7=J$~jmu8kd?K8eMTac^Q3oIXt4tV=8^@E%vNbvYbr!?D~D=l1Q$CMD9{h z;1$MAVFiTGmt4`6qd9{`gY|ynP*AW@Jg)R6@6oOSqhz$TFRgwxMvE{;l|Uhbpe;Y=fNnFxFP5dRS~gc);un_ovWLu6p*%EYsG5rvj~eR$(Q3 z-g-Y2yCTfMRyWr_G&pF%ZCzap3(577#`MdZj$74|kFgwiBH{Nsua~|WS?P!eqC#@a zl(A87u(w?VbCdh0dWYHjEi46cQWxUanMK~nJyo)qN-@lR>QVj3=NUHfRD(O)d`BNL z6Gz#CwwMeVuZdeH*PSvS(0b7R#@mWpSKXoZ*aEjQ9yoMm<)~foaqI#fkW0QfpC24G zb3C6bFRT2`veMAQo|1NRrID91a`%?G(wF5j#h!P^_$0RSJZGAx#i?iaTrVcxb)T?#to*49=E+yFU*FD#Z2CFj5r|G}cB0I1pF%~#e zPbI8@W9XIIQv0Q-;_KFaJ(bT~D0j!l%zB`nW5>D_9|qf3;W92Kydgv0VO6Kimd=?a zcfwiEZ;ZyRY`Rb9SxoLp;YyaZ6q;6WsC(0I`hHL+*S*J*btMMPF|80-E$6^&`z~f7 zGV-FYlg8VpZg<&AEYVl25$8qO1$}$pTg~RUeJHdI)%uvKkBv0QeP?p+`Za`BlqTA8 z$V2|koxp?5x z=_}c8OLRuA`!f@T8Xsi@J=v_eT9pr9<``U1^))kkXprRY=5ynGpdEd&>ETW8@wtpn zl;$CPXE~0)@S&f2+vM=euB-!HLh}WU)0v<_CC;V}mgZoVoMqw2+pX!Ho|5ld=zYsF zCe7TH6iY-@>hJ)U1#RZt19uCWLXR$wvh{}Su8Qg(T=(9m_$2mo-bS#}q9z_Fv|az0 zfKyCeyN3sQ@-~+=^&Z`;0Plji^)@;}XS#6MeRv=y1Lvx=;*JM`cN$G)ih%dPb>K~v zWa_#U9;mt)`e70e+|=8=3El&%#bF<5HC8Vyf%m-!a4n(pcWl34L&vY<0jKKK_I-+v z{u>n^?7_*;2=Y~3Vgrq#3$(%%J@+)@fh*S;`;`rz(G8(% z+3EW}3|c5Y@j+&n-4)3+r+a3pL&Iq8DPLS1RFyDMiCc;_!uM?b7Ud=U_<|k{gTF_? zr^CQ8z!7{1>;OLaXz|Q-%m8YP?w)t>>iygUZP>RpMgG~jWv_B;Olmr$%siB>7D{ma4LWTj zsyG(Wl@%+4cemO3Fx(aU^qsA&P>RD2$>I~YMjkQcN}`=FCi)M3NeOjXFeFN}yxAf* zK7vH~S;g(t^Emcp&yZv(7i|TZG82DdwQ4S(M82Aeyq9z9Do;@Vv+8i$Q-LgDtnu~! zGW}f>z7yB=S7s=v#hms5w+?fKRt1n4@v{13cy^67(MAqr7n^7LE=nvDE(Kcz! z@vGGBhxfrOf##iO0(Tme@#)pC;NL%K>wj=&J?dZzR>N0%U%_qi9uyrI_b|BTjkc z!UyIa@KS5Ia#Pz3bUQ4c&Ky$QDc!$EgVM_+R;rkI4;OY17j44)MdsYgk*GRE>4<_- ze5`_0N>$BWOQZHtzE0dsS-davl{S@x$7pF5ZOdVs0rAh6h^3p%!oo+3mJeqdt@M8~ zP)ItM*IH(*I`MfRRc5k%LE0+yyjFO4`#uy&_Kl?J_Z5;PCK3sCm+o9SRBJo?;>L{5 zp?x`B9<{`6HscC;t+?9{avC;t=W$QkPommZ(kmN25EXIdu$)vGZoc=a{OlpoftJtL zto?=+6`#ag1@#a`rDRH->{X=TET#NQEq3Sla!edm`?>WO1v@@ae{@Yp z(>*)N9fE6TII>98<%_`y2$XCbI7_30V5%;lZ%STZ>P)slQmCpArgzHlISyZ{HkKv>;Nyx@4$f0SR{uPAFm)8fXx zh~)v&@LTU?6|)0JNp#9(nb(?6;-VFH-L7V~d=XU2EPQQh(J;5` zdO&j65SP*R%8l{b%CZG&VknaeRdYEwB10_b z164xCVPOAh1x>A{f)rOF<>XKB)0873HnIg>!TNDeW*INOOXWCny^n-NX;=FN)5Uu| z0}t=1g^iw_kC*t8=^NY|IwqCSk^AJTuk`+d&z`a)On9a!CH6NKSX#O?SlO0g5;}V` zNw9MhPsWgf9=y|>8>Dy3QR?m`^+b#Ec0FkGf{wHK=NEi&yKzt52K9;@nHH7E56)(% z1f5o7b4ae7t9+F&p3gg*@FoAo#RG2jqR$CrQ<)?YKU^*#@p@IE90++{;fjiA& z?>2QJy&ll}74;3{f!YCl!qV+5qqza9vINGP`~Btw)Ub-T ziu+CV+^Az&;>)zpzPtYH)4kKF<61G%8$Gk^8aU2oNqyU>?niz1n%-Pm@oqjs^QnW% zf%xD}N995<88h#!I?0>NJwsCERSB=Z7!+PE7LjQ1%{U!q|76UPKg2n_h`;ud%Z4fq zO@mL`J#MPqBojy8emX~Lr~2rI7~12q%Gz$1y@z+X$Fj%4F0qRAW{)Vc5#73z&hl=z zX0#&q#bvTnrHR+{Hn<<__Q!ks5q;DuZkH_U&})rVlelwy@Pl0;>)2j)Hj;{aM+XhP zUy(g4yT#H;MP?8x!!wxRJACJXk+hu8NN&{LljPQ3Z$?i(z|$1(cAF1RwChd#9FW89 z`;mp@u6KZPVbpDy%C4D9qRqF}UcEWvLVdk=qd+X4;^XZs1y;`fNShmp@atC<7d76{ zx(ore0dK3OTaIX<-21Ltl4Dm+R9teWxl8q?Omf$tsKdxX`bp)hxTJs%QZcXdJY+j|w)Y9Fzp%K{HG*!i+W#X26G zsTrt<-4%A+_HthU7q3|t;!6N)3w&BlTh34ASnFpLN8#rqK!u0dy~270yV663vTNd> zIx3$IvxQp`@6UT%cGS=O+LTo-W5#2xw29jv&$@naxQkGE9zh=FZEMi)UbQTWy+GTK zeSC4H-M(0Q?u@uZgI;{a*=EC#B z5E1ORcLe4y)SrJL9+o7p$IjAw&#TAwUhVFtYx18BQm9SPQDJlIXN2PSrLEJdj;WB& zgugc4$I`eL?R_bzZphk&g(P`xuJR%+fTo3$WzL?#~cS*$0m+i_0r*0T%YJNzv8#ykX@&Xh5}6rV5< zNe`|@hd)?|eV#simcK-0%yQpg#%TU7)Y`}Q*L1!1mtLdFwXhTSV_eg$$8q= zx5@E%aH=6~>wDhN!~x0IJYn_=qyb?2=Zn96j+O_SP3BSYK*HPs9Zi)Zr~rHIp}pWt z3_%OpK{g(UwIX5DmEi(f&>dSBQoZ zL?hi1?myE`UTA_oG&Ky#Q64alt!Y$(3W4Y7>4H81lHY7%&^|Co3f7E!Fr*)cBrmz z=#qRb|35_+R43T`UqrX#pP&n!We8mna6^I{gl-DhdqKRxCjQrq}5KFkBnNJF-M zM%NIq|M~(Ags#{x=tBJwD*qph?({!J7wW$dx;Eejp=&^RrvZAw;NM2~Yu}alYU^io zf9<;xzn}}9-v4lPH~tB_P`jaVs0?o2;0B@l0Ac{pYb^gZx?lV5-$jOB`|hui0UAyJ z;po!V{e!s64YmX7s)FDKz5fBByY*Vhujo?!&*JV^bpNjJ{)+CeeHR){|KaE={S$P- zX8e*MbP0ihfcd}OcfUr4zl*!SMuuPGE_BuV4@cMPpP)+uoVYlX5TtKx!ciC6lv`oF zgYkmKb3v5_2L2AilZpAS_{HG`9@#_%VQDk zzx!&w7A@S`<=or%{`vXo_a2<|jTXIPv?*@?uyogOWgOhcR9TSBM!nM8`_5JKFjY-8 z-f0FG4_BQ4rt1PSDY=Oc6?_)jlJEB4dCP{bC|;{5K=v!c`XWi*d5Yhytf1NG-KkcV zI?pkLtv}V@F~e>}9DkZa&qC<=7Zdg1l6kJtW9p@oL8a&K%^heGqvk)RdPGF^>p!k?Uc zLqZO5j4d^7fYk$`LWwqVtJ`-jR!|&WS_DJNOZ5gbb{ zrn_Hh$zzGO!Q(2&<<|SB@9e^P2p<_7dPR%BC|;3^qDOt)J?Ry|MUl1l;9W1d)ilf$ zLnI1M`EfWLZo^{yeAeoWV-B@X+3O-4m2>t+Vq8AnCU1y&bp0;*$Ke;F!v)O33lGx0 zll4O%jYOfRCfPb;cT`rjMEgD|OT-EvjI8x>(C_J0nsR?&B-ee1!&nEO_AY9GB>O5A zXRh+i398mgA98--Q!)qEC-E0@#_xt-3wi97mNHD#drD<93VZw-%>#PZwf(t#*1{{- zF=l*XGIv{DWcePxNRT$ekv*V7HXgnDLUJMS&|5kIX)Xk+dntDjP+&13p} zx<7EZ3Z}dq3+^w8yKbW=P&g*}j8^XXv&ZJW{blUT^}=Vm?vv7#tTXnrMP&i?I0|Ie z4YGy#-sq`8Th<}reF<~7>x{25uhA%nutiZH?joXad)_C@1ap|E9gn~y$|O83TH8}< zRs3F`v?Gg7P*}|*qU@x^?OjBZeq;0->q!+p;=6`EHhJ~ng=^yS%PeDtGMBszkUl!(iPDDC|>En|UL=?&GY z$)hv^p^UEHeVcC{7aI&#pghm_s!1I&m6J<)CRnO^yM)I_$>4f2r9RhXuLF7&1DeJ< z5~EXgX?B)(T$O43!i`QxGY`}65V|Xhdc60{hm{fS-0SGXC~f_u+PixKp*v!uB`e? z>e&lck`Dv$^egt$j+NX7gDM`C=+0$to)qbr!P$cTcdJ;RktZ!y|c2oD&fB_b)UKkNT$x^}%#b^gRfW(P} zorJ76j9No@Lw$rwZ(%t7nhwt=xK^v>p1$Dl9kiar;VhJL&n3}%M3=!l<-WAA=G!Xd zS$p=N9*B#cuw_h(vgXFz7SE4d);uDjO_XbC8kioIgo5%<|e8?0^!S9 znx`yN&K2D&<<|Ak`mXQg|03g6H2s8fU`AF#DIDP|bZ{;`*?M1V&_|~P>X#wpvKBti zt{vMXCb!PU+Iw9OPp=9W_LY((rd-)ggn8_yTB9eLGWb#jmW+$jJ}Y*)>)`7*kH~qQ z89AIdnMgRh)$pYq_NCW< zTX;8iI;XHhqsizDV~0zcx`7PYDGqg;hxh*`EwfP9uoa2U+|iF}59EtkZj@Xx%m6EC z`lQ+fRq`aBTAxW{PhE9+lN^JA@`qidm&`Nk*)^U$3yTQ5`|i$l1Mi1nuib>sI56fj zs|8%3=O8x_c*kOT{{}IY$t8-z4C#k2kX(4HKW1=@NQ^ROG?#^#S@>c!-G)SAP|xN( z&bNA?Pa%x{T3Pjs@ickS7mEDZivH{Xwdoss-+mxJmT0u8;taIEdYs)_2uyE$;BFl^n6=kEDt$z1Ix{kM_irrNAy8^E@}_ z6i6Xoyp36F1M;*>w9z5vW7j$Uy+IWMZ@;%>cq5Bq0B-xI? z6cxXJo$|7@_hqgy44fXjg<(SLa(@@=+gBnYpSr5$!+(Yx?s*x8nCt zuVgx3@eB0yZBX*BJKOpA$SpDbE23*J^o@A1a@0n1Zs#5u%w6ixRX}2`URJO&|60O620wKuJ|Y=-9xZZp$k3E z1}AMz`AZKMdKL)sGnda1r;fmw=}x{*=^{ocsM%GBKie7RA|mT;CS@F#^S-)*mI21L z=GC4n%A9Vz^N?@*LQru@qE?mOVV`hM%H@b5|4)ZJ@$2EwKB*_!J?-GXsqY|VhX}}2 zHkoyKI>poT!l_GJy!k90r&&-ZXND-K9=KQp_-n+N}YBt z=E6oRy&%&uIwM_L#Y35ovXSp4<|Qy4weTg)`>M`XcuNz6xOdQs(D(+qRerd}wU*%i z#p7{QJG?v;sB)5{SMKtyw3MAhcAnEvGoqI|7*>*5E5btJrfsAC*e{^fH0ktX)AqZU zmJNpAjTh1nJT-+s2|j%BL~c>6#{LLB3pv*5yVGK&v%67Y{WKdQ3fY$@dCF6bPIWy{ z+;^!Jn}sA3R84&4_XtZ#k16#dwwTcOCSJB7tLh?o>2#gv&f$g64@gMLuhbS&SOTY#jQ6*8h0K?_R67A*^!wft2>u2|9O)eO_l6 zlMXe0z^5IbNGYCYrM!6?M*HQ4Pu?Nv@nX(VV$I_FhwL;Y%VvlA-dtObUzER2P9@95Ix6qYYOJyR*zZ5nyyS>^Q}&tSJ> z9Aqk(7k4cDVKwl}M&w?Ko+@Wx&&>xn!vb6An(G?})+Iix1R7tnJkC0U=NU+2p!Tm4 zDO9H>SJv1Ak7qu`xDku***8o*9Vto zS!bEZ+9kXf&`x1b@~2j<4v{*t&o@r4`yg9fxVa?bji;;FcningORQmBni}&$-Yf0k zYnDE%8VVnI+b+t@A78b6cuQl!>pwu1u_#a`d(Bt zj>%1lqFtI#ux>uV9!#Hc&3bo}SXlMQDCIg@d-02%@knBc zZyh|79dLtR0k?HM+|v_wZCFVmd)Rpbhdnve#&Yx2i{^umc*~-XO(n&>T`;S5b)wCC zru6o)Adf0GvnHxOS>$&6i=9r)!a~tC6HU1U*ioSa#;RdY41((t&4I(ev9ee;mNCBpiMxDj@Oc$Wl7x;rA>-q%h0S zs^v3=T6+TUhcmP5_|h?YOHvUNq2r4ORF3BpUq86_gHkq1dkQ7z?P63Vu)(aZyeiI{ z>_zlEbSJ9ZyYW#!hulwvGGwUtho(j2wFP-%iO$)5vVoy!YjB&9Nwg=9@#qgJ zQQ)0LMe2i@Dt9G{*Vqg+cOyI6F&C8U(}3fmJ9x>Q@|J7Ol$({5qr%7}BJSf=nPyW% z<7Kl#*K?1I<7GYHeLitMv!^x4o9%GJ^Uq^4Z_F4jDHD;rLp_jKKuHCHWaf?7;=D5uvv;N6=_0iM|9n29`#!u^# zF4~;hfn3Y13prIkRafYKUR#O#w88p|nh=&$UhU{dsB#K({`Eu!_Le)1FEc{LH}P%t z8+eh|dH9QhIi!06OlH`;nzSnHnr?;(G6YPn$&EzU?ucc3C*&{KsMh+Jaou)A`hw;- zBT?27a|<7z3_G71v}06%TlM-Go!#|2>Fz~b98&c7Lg3NI32+_TW&8Oi_ zYz+Dp^l;Harh_yV<~?utrD5iAimc6-6qe&|w($Bkh;*DhvHSWPiDF6pMC@S5^7%{e z!hpTgNTIj1Cz$W+!tFlEubnHcrOZ@bO1Lo2{80VSmFwJ!_bv+AJ=?IgaTj=KUB~7> zOyqg`3{ZjJfvd7(@>e(C_bN>H@SQz3_)N~LZ6R{Y2QSuE{ zUAlyQ8_BGPhE$le&jlXHPW5P9XTBu z>MAMszWZJLN4IPB4Xy`z*G-ovx~0xkmuR_foK)6sy+4^5qdQPgMd<*8zi8-egPpB+ zeCNtx@hX#Pl?sJzdpFJ24>h3pvGp#G1b85Z9 z5>CM`_Ayu5ba3rh(#}UCp2Ix8nH-Uxai_235m_6&p&5Lk7;j+s$HD7y2HiphMid*;6o;`0Y5E9$ z92lF;d8sZD?#`c!LGw#5(w6l+X}Hq>AS{Mb6rjEWfd51I#6`@TU~ML64)Ra)%e`Dr zDzcB%7Bswg{M1ExiMyCWLX^fKBM4S-5seW0$lY1`(WYRZaDHY}tWO^|pXvvjFbauh z%Eh%TJi0b7_$72}c1T|0IYrL#=7HHst@$2w+wN25e)#^)km`w$)S%lzbB{ZkOHPLE z|8P{tW9ZgH0a1*$^3g9oS?jX)-Nd1h^*br2JMZmhc{38l>i9@849_*0B-Yqqx8p(v zl~sIFM*4mO>NR`6DU(j)6;8zD?U)^FjrnAgXHX^glLz8u`M5XrU4$d%&j!c6t~n+* z$3pW+p~SRK`LqdV;%A1@5I2fGd&@^jXU2F7$4^-7WTCn^;n>SiGZPdLSwK2DVx}N| z$3;!n?+ckl+NZ80a-xQ?qjOp`oqU(jSHD8hkF~MRw zod%LvV^4A{i%VqoNnb&vUH<-!Dx#8>VV}2cc86UN2**7;43UbXmV>wvQ^TOcmGN5H ziSqmLk1e}Ac&p5J$W_gJiY#)rOki}rBi8*{IM|W@B9B8sufe({W9RJ-)=6SAM90eF zP4X_6gJ_No-)R~x&n#LGH)pe3(MIz_U0n^zhE$_j#f3q${`$|H7L!b-3&(fwM7l;5 zkVJ&-VsQ%?->u&FIn%T*{D_Q(8*@L2f%DNN8pv8HovY|ex{z}+iDfe~Bgv05eX}rr$f1a2+_amoHch6*`YQ8M)jI2oit5+A zN$b@L9x2S5M^s16+qH0C{QOGBy8X%StjhW)Qwj^$dJ~843~Rr$vzlP5ubihpD@CF) zc(_ya7Fps`(?c?%vtJO+y>y+N1}2x13U0cvVFE{`UmbT5lb@Bj?{*@u>G*5!Ptu2u za(Zy3Th=^$E^Iu1tMhTioiL-K<~jf7Vw&|wWHPtj#uj&6e06jB0Cpe6iJph(B$t-e z%7QT(&5cdTGHfayN0hU zwtkBLRAM=FTAZWYa`JU@fNo&Yd%G8Dm9qPt}V zCp+mFsUbKlQPBu?Gw@Z12#*Eb`feJ<43krwAvQ@R93H1IWiyrIw01cj;X*3f32yY^ z-SW2!_k9$%iE!*j4{prD!s3+EKK_Y93x?ve)6A4V7C4)(V zuNzbGto%)r6-m#BO9S(!tGWHmw28}j^KEc}EwYbA_)U}Y#jBEK)|s!qoGyIR}jx}`* zDspZx(Koiw|1=*OuOzaPGjR6t)lRoZvM;F-xya=GsWnd+>Zui0qpaiTcU=%c5({Z7 zE|Ku>8|ot~{#0k{8?UF~j3APuiaN(VSa4AyShm{D!iBR&<@o*c3|C^9Jao$D^(;OS zC7)Bijd?;^AL)F6Scs;ZviNEzKh9F&waWRyYID4mMdgYj9kuO@|4>H5!t`^FIb+wy z=NuTN?$E3kE=Mhg&L=-%k37dF^gQiI|IWzAS54jzPq3w(8*HmEG=i7ryA9~fFe}d6 zj(>1&YWg(uARVYzkk>z98wAhZndVA^9Y5ZdE6Dbey8L{0;s3J>E-pkL6SPh;?3vJpHHld4p>e0Io zZ_d@)k_p4pxnoj~?bu;lR;BpmkO0-&kU0sG$H2wLTXWLTn=H`n!dCL|@Ih?N?g4;&lmY*7GPK|w{8y*;9vVKb zj>hKd#y`})YD2a)5pWF|kp5v8dJhOxH-^JpG!afnNDFe(6>WkBzdZr2q=zKVh^=p; zd~3&X!s2OYCYO-5X0E{(f?x@t{tw%b5b_zC4}=l|jWUI~Iq3ax4U$4r?a;dfp7w4? z6jT!=CNNwPTMVcKv^lx@fAa)-3CIf#u1<&kzw1A+2~9TtViTmd+k~KSgri|7SHkB} zet-zg#e@HR8;wN#;6eHbXN0<^4`G2jArG|^hV=AOMWW!);&V8{-oXQ|`jw-hZvqvD zjBN>^NoXhVe|r0%;Jd#wzm+z20c&(b|4WUXpvJelK+8rxUS3EP=3DVCZy}J-TY{}` HAprjeaPcR} From 8bd9c14dc175ec4372cd8594a63e76be99fd1029 Mon Sep 17 00:00:00 2001 From: BBsan2k Date: Tue, 16 Apr 2024 07:47:52 +0200 Subject: [PATCH 4/7] Chunk for performance improvement --- .../source/devices/kunaigc/deviceHandler-KunaiGC.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/cube/swiss/source/devices/kunaigc/deviceHandler-KunaiGC.c b/cube/swiss/source/devices/kunaigc/deviceHandler-KunaiGC.c index 8eb06681..506253c0 100755 --- a/cube/swiss/source/devices/kunaigc/deviceHandler-KunaiGC.c +++ b/cube/swiss/source/devices/kunaigc/deviceHandler-KunaiGC.c @@ -150,16 +150,24 @@ s32 deviceHandler_KunaiGC_readFile(file_handle* file, void* buffer, u32 length) // Assumes a single call to write a file. s32 deviceHandler_KunaiGC_writeFile(file_handle* file, const void* buffer, u32 length) { + s32 total_bytes_written = 0; if (!file->fp || !(((lfs_file_t*)file->fp)->flags & LFS_O_WRONLY)) { if (lfs_file_open(&lfs, &lfs_file, file->name, LFS_O_RDWR | LFS_O_CREAT) < 0) return -1; file->fp = (void*)&lfs_file; } lfs_file_seek(&lfs, (lfs_file_t *) file->fp, file->offset, LFS_SEEK_SET); - lfs_ssize_t bytes_written = lfs_file_write(&lfs, (lfs_file_t *) file->fp, buffer, length); + do { + s32 bytes_written = 0; + size_t chunkSize = length > cfg.prog_size ? cfg.prog_size : length; + bytes_written += lfs_file_write(&lfs, (lfs_file_t *) file->fp, (buffer + total_bytes_written), chunkSize); + length -= bytes_written; + total_bytes_written += bytes_written; + } while (length > 0); + file->offset = lfs_file_tell(&lfs, (lfs_file_t *) file->fp); readLFSInfo(); - return bytes_written; + return total_bytes_written; } s32 deviceHandler_KunaiGC_deleteFile(file_handle* file) { From 498ec9a03dfba452a619f20616a3e31ca71b919a Mon Sep 17 00:00:00 2001 From: BBsan2k Date: Tue, 16 Apr 2024 07:48:24 +0200 Subject: [PATCH 5/7] Fix EXI Locking --- cube/swiss/source/devices/kunaigc/kunaigc.c | 57 +++++++++++++++------ 1 file changed, 40 insertions(+), 17 deletions(-) diff --git a/cube/swiss/source/devices/kunaigc/kunaigc.c b/cube/swiss/source/devices/kunaigc/kunaigc.c index 02f5762a..3380cf52 100755 --- a/cube/swiss/source/devices/kunaigc/kunaigc.c +++ b/cube/swiss/source/devices/kunaigc/kunaigc.c @@ -7,30 +7,53 @@ #include "kunaigc.h" +#include "ogc/exi.h" #include +#include + +static bool exi_locked = false; + +static s32 exi_unlocked(s32 chn,s32 dev) +{ + if (EXI_Lock(EXI_CHANNEL_0, EXI_DEVICE_1, &exi_unlocked)) + { + exi_locked = true; + } +} + +static void lock_exi() +{ + exi_locked = EXI_Lock(EXI_CHANNEL_0, EXI_DEVICE_1, &exi_unlocked) > 0 ? true : false; + while (!exi_locked) + { + usleep(100); + } +} + +static void unlock_exi() +{ + EXI_Unlock(EXI_CHANNEL_0); + exi_locked = false; +} //wait for "WIP" flag being unset void kunai_wait() { - kunai_enable_passthrough(); - spiflash_wait(); - kunai_disable_passthrough(); + kunai_enable_passthrough(); + spiflash_wait(); + kunai_disable_passthrough(); } void kunai_disable_passthrough(void) { EXI_Deselect(EXI_CHANNEL_0); - EXI_Unlock(EXI_CHANNEL_0); + unlock_exi(); } void kunai_enable_passthrough(void) { - s32 retVal = 0; - uint8_t repetitions = 3; - do { - u32 addr = 0x80000000; //for passthrough we need to send one '1' and 31 '0' and afterwards whatever we want - EXI_Lock(EXI_CHANNEL_0, EXI_DEVICE_1, NULL); - EXI_Select(EXI_CHANNEL_0, EXI_DEVICE_1, EXI_SPEED32MHZ); - EXI_Imm(EXI_CHANNEL_0, &addr, 4, EXI_WRITE, NULL); - retVal = EXI_Sync(EXI_CHANNEL_0); - } while(retVal <= 0 && --repetitions); + u32 addr = 0x80000000; //for passthrough we need to send one '1' and 31 '0' and afterwards whatever we want + lock_exi(); + EXI_Select(EXI_CHANNEL_0, EXI_DEVICE_1, EXI_SPEED32MHZ); + EXI_Imm(EXI_CHANNEL_0, &addr, 4, EXI_WRITE, NULL); + EXI_Sync(EXI_CHANNEL_0); } @@ -107,27 +130,27 @@ int kunai_sync(const struct lfs_config *c) { return 0;} void kunai_disable(void) { u32 addr = 0xc0000000; u32 data = 6 << 24; - EXI_Lock(EXI_CHANNEL_0, EXI_DEVICE_1, NULL); + lock_exi(); EXI_Select(EXI_CHANNEL_0, EXI_DEVICE_1, EXI_SPEED8MHZ); EXI_Imm(EXI_CHANNEL_0, &addr, 4, EXI_WRITE, NULL); EXI_Sync(EXI_CHANNEL_0); EXI_Imm(EXI_CHANNEL_0, &data, 4, EXI_WRITE, NULL); EXI_Sync(EXI_CHANNEL_0); EXI_Deselect(EXI_CHANNEL_0); - EXI_Unlock(EXI_CHANNEL_0); + unlock_exi(); } void kunai_reenable(void) { u32 addr = 0xc0000000; u32 data = 1 << 24; - EXI_Lock(EXI_CHANNEL_0, EXI_DEVICE_1, NULL); + lock_exi(); EXI_Select(EXI_CHANNEL_0, EXI_DEVICE_1, EXI_SPEED8MHZ); EXI_Imm(EXI_CHANNEL_0, &addr, 4, EXI_WRITE, NULL); EXI_Sync(EXI_CHANNEL_0); EXI_Imm(EXI_CHANNEL_0, &data, 4, EXI_WRITE, NULL); EXI_Sync(EXI_CHANNEL_0); EXI_Deselect(EXI_CHANNEL_0); - EXI_Unlock(EXI_CHANNEL_0); + unlock_exi(); } void kunai_sector_erase(uint32_t addr) { From 4c65c4f700d5ba15442fb4f40d97e38152349c06 Mon Sep 17 00:00:00 2001 From: bbsan2k Date: Tue, 16 Apr 2024 14:29:15 +0200 Subject: [PATCH 6/7] Return 0 in exi_unlocked function --- cube/swiss/source/devices/kunaigc/kunaigc.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cube/swiss/source/devices/kunaigc/kunaigc.c b/cube/swiss/source/devices/kunaigc/kunaigc.c index 3380cf52..2eca3721 100755 --- a/cube/swiss/source/devices/kunaigc/kunaigc.c +++ b/cube/swiss/source/devices/kunaigc/kunaigc.c @@ -19,6 +19,8 @@ static s32 exi_unlocked(s32 chn,s32 dev) { exi_locked = true; } + + return 0; } static void lock_exi() From a25fe96c4abbacb67a29887f30d9573c234467d4 Mon Sep 17 00:00:00 2001 From: bbsan2k Date: Tue, 16 Apr 2024 14:30:12 +0200 Subject: [PATCH 7/7] Simplify --- cube/swiss/source/devices/kunaigc/kunaigc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cube/swiss/source/devices/kunaigc/kunaigc.c b/cube/swiss/source/devices/kunaigc/kunaigc.c index 2eca3721..062a7dd1 100755 --- a/cube/swiss/source/devices/kunaigc/kunaigc.c +++ b/cube/swiss/source/devices/kunaigc/kunaigc.c @@ -25,7 +25,7 @@ static s32 exi_unlocked(s32 chn,s32 dev) static void lock_exi() { - exi_locked = EXI_Lock(EXI_CHANNEL_0, EXI_DEVICE_1, &exi_unlocked) > 0 ? true : false; + exi_locked = EXI_Lock(EXI_CHANNEL_0, EXI_DEVICE_1, &exi_unlocked) > 0; while (!exi_locked) { usleep(100);