Added API boilerplate for lfs_fs_findfreeblocks and consistent style

This adds the tracing and optional locking for the littlefs API.

Also updated to match the code style, and added LFS_READONLY guards
where necessary.
This commit is contained in:
Christopher Haster 2023-09-11 23:42:37 -05:00
parent d85a0fe2e2
commit dbe4598c12
2 changed files with 43 additions and 21 deletions

55
lfs.c
View File

@ -622,6 +622,26 @@ static void lfs_alloc_drop(lfs_t *lfs) {
lfs_alloc_ack(lfs);
}
#ifndef LFS_READONLY
static int lfs_fs_rawfindfreeblocks(lfs_t *lfs) {
// Move free offset at the first unused block (lfs->free.i)
// lfs->free.i is equal lfs->free.size when all blocks are used
lfs->free.off = (lfs->free.off + lfs->free.i) % lfs->block_count;
lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size, lfs->free.ack);
lfs->free.i = 0;
// find mask of free blocks from tree
memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
int err = lfs_fs_rawtraverse(lfs, lfs_alloc_lookahead, lfs, true);
if (err) {
lfs_alloc_drop(lfs);
return err;
}
return 0;
}
#endif
#ifndef LFS_READONLY
static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) {
while (true) {
@ -654,29 +674,12 @@ static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) {
return LFS_ERR_NOSPC;
}
int err = lfs_find_free_blocks(lfs);
int err = lfs_fs_rawfindfreeblocks(lfs);
if(err) {
return err;
}
}
}
int lfs_find_free_blocks(lfs_t *lfs){
// Move free offset at the first unused block (lfs->free.i)
// lfs->free.i is equal lfs->free.size when all blocks are used
lfs->free.off = (lfs->free.off + lfs->free.i)
% lfs->block_count;
lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size, lfs->free.ack);
lfs->free.i = 0;
// find mask of free blocks from tree
memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
int const err = lfs_fs_rawtraverse(lfs, lfs_alloc_lookahead, lfs, true);
if (err) {
lfs_alloc_drop(lfs);
}
return err;
}
#endif
/// Metadata pair and directory operations ///
@ -6247,6 +6250,22 @@ int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void *, lfs_block_t), void *data) {
return err;
}
#ifndef LFS_READONLY
int lfs_fs_findfreeblocks(lfs_t *lfs) {
int err = LFS_LOCK(lfs->cfg);
if (err) {
return err;
}
LFS_TRACE("lfs_fs_findfreeblocks(%p)", (void*)lfs);
err = lfs_fs_rawfindfreeblocks(lfs);
LFS_TRACE("lfs_fs_findfreeblocks -> %d", err);
LFS_UNLOCK(lfs->cfg);
return err;
}
#endif
#ifndef LFS_READONLY
int lfs_fs_mkconsistent(lfs_t *lfs) {
int err = LFS_LOCK(lfs->cfg);

9
lfs.h
View File

@ -712,9 +712,12 @@ lfs_ssize_t lfs_fs_size(lfs_t *lfs);
// Returns a negative error code on failure.
int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data);
// Use Traverse function and try to find free blocks. LittleFS free blocks search is unpredictable.
// Search is costly operation which may delay write. In realtime write scenarios can be better to find them before a write.
int lfs_find_free_blocks(lfs_t *lfs);
// Use Traverse function and try to find free blocks. LittleFS free blocks
// search is unpredictable.
//
// Search is costly operation which may delay write. In realtime write
// scenarios can be better to find them before a write.
int lfs_fs_findfreeblocks(lfs_t *lfs);
#ifndef LFS_READONLY
// Attempt to make the filesystem consistent and ready for writing