return 0;
}
+static int fuse_exfat_fsync(const char* path, int datasync,
+ struct fuse_file_info *fi)
+{
+ exfat_debug("[%s] %s", __func__, path);
+ exfat_flush_node(&ef, get_node(fi));
+ exfat_flush(&ef);
+ return exfat_fsync(ef.dev);
+}
+
static int fuse_exfat_read(const char* path, char* buffer, size_t size,
off_t offset, struct fuse_file_info* fi)
{
.readdir = fuse_exfat_readdir,
.open = fuse_exfat_open,
.release = fuse_exfat_release,
+ .fsync = fuse_exfat_fsync,
+ .fsyncdir = fuse_exfat_fsync,
.read = fuse_exfat_read,
.write = fuse_exfat_write,
.unlink = fuse_exfat_unlink,
return EXFAT_CLUSTER_END;
}
-void exfat_flush_cmap(struct exfat* ef)
+void exfat_flush(struct exfat* ef)
{
- exfat_pwrite(ef->dev, ef->cmap.chunk, (ef->cmap.chunk_size + 7) / 8,
- exfat_c2o(ef, ef->cmap.start_cluster));
- ef->cmap.dirty = false;
+ if (ef->cmap.dirty)
+ {
+ exfat_pwrite(ef->dev, ef->cmap.chunk, (ef->cmap.chunk_size + 7) / 8,
+ exfat_c2o(ef, ef->cmap.start_cluster));
+ ef->cmap.dirty = false;
+ }
}
static void set_next_cluster(const struct exfat* ef, bool contiguous,
const struct exfat_node* node, cluster_t cluster);
cluster_t exfat_advance_cluster(const struct exfat* ef,
struct exfat_node* node, uint32_t count);
-void exfat_flush_cmap(struct exfat* ef);
+void exfat_flush(struct exfat* ef);
int exfat_truncate(struct exfat* ef, struct exfat_node* node, uint64_t size,
bool erase);
uint32_t exfat_count_free_clusters(const struct exfat* ef);
if (node->references == 0)
{
- if (node->flags & EXFAT_ATTRIB_DIRTY)
- exfat_flush_node(ef, node);
+ exfat_flush_node(ef, node);
if (node->flags & EXFAT_ATTRIB_UNLINKED)
{
/* free all clusters and node structure itself */
exfat_truncate(ef, node, 0, true);
free(node);
}
- if (ef->cmap.dirty)
- exfat_flush_cmap(ef);
+ exfat_flush(ef);
}
}
struct exfat_entry_meta1 meta1;
struct exfat_entry_meta2 meta2;
+ if (!(node->flags & EXFAT_ATTRIB_DIRTY))
+ return; /* no need to flush */
+
if (ef->ro)
exfat_bug("unable to flush node to read-only FS");