1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
32 #include <trace/events/f2fs.h>
34 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
36 struct inode *inode = file_inode(vmf->vma->vm_file);
39 down_read(&F2FS_I(inode)->i_mmap_sem);
40 ret = filemap_fault(vmf);
41 up_read(&F2FS_I(inode)->i_mmap_sem);
43 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
48 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
50 struct page *page = vmf->page;
51 struct inode *inode = file_inode(vmf->vma->vm_file);
52 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
53 struct dnode_of_data dn;
54 bool need_alloc = true;
57 if (unlikely(f2fs_cp_error(sbi))) {
62 if (!f2fs_is_checkpoint_ready(sbi)) {
67 #ifdef CONFIG_F2FS_FS_COMPRESSION
68 if (f2fs_compressed_file(inode)) {
69 int ret = f2fs_is_compressed_cluster(inode, page->index);
75 if (ret < F2FS_I(inode)->i_cluster_size) {
83 /* should do out of any locked page */
85 f2fs_balance_fs(sbi, true);
87 sb_start_pagefault(inode->i_sb);
89 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
91 file_update_time(vmf->vma->vm_file);
92 down_read(&F2FS_I(inode)->i_mmap_sem);
94 if (unlikely(page->mapping != inode->i_mapping ||
95 page_offset(page) > i_size_read(inode) ||
96 !PageUptodate(page))) {
103 /* block allocation */
104 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
105 set_new_dnode(&dn, inode, NULL, NULL, 0);
106 err = f2fs_get_block(&dn, page->index);
108 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
116 f2fs_wait_on_page_writeback(page, DATA, false, true);
118 /* wait for GCed page writeback via META_MAPPING */
119 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
122 * check to see if the page is mapped already (no holes)
124 if (PageMappedToDisk(page))
127 /* page is wholly or partially inside EOF */
128 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
129 i_size_read(inode)) {
132 offset = i_size_read(inode) & ~PAGE_MASK;
133 zero_user_segment(page, offset, PAGE_SIZE);
135 set_page_dirty(page);
136 if (!PageUptodate(page))
137 SetPageUptodate(page);
139 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
140 f2fs_update_time(sbi, REQ_TIME);
142 trace_f2fs_vm_page_mkwrite(page, DATA);
144 up_read(&F2FS_I(inode)->i_mmap_sem);
146 sb_end_pagefault(inode->i_sb);
148 return block_page_mkwrite_return(err);
151 static const struct vm_operations_struct f2fs_file_vm_ops = {
152 .fault = f2fs_filemap_fault,
153 .map_pages = filemap_map_pages,
154 .page_mkwrite = f2fs_vm_page_mkwrite,
157 static int get_parent_ino(struct inode *inode, nid_t *pino)
159 struct dentry *dentry;
161 inode = igrab(inode);
162 dentry = d_find_any_alias(inode);
167 *pino = parent_ino(dentry);
172 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
174 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
175 enum cp_reason_type cp_reason = CP_NO_NEEDED;
177 if (!S_ISREG(inode->i_mode))
178 cp_reason = CP_NON_REGULAR;
179 else if (f2fs_compressed_file(inode))
180 cp_reason = CP_COMPRESSED;
181 else if (inode->i_nlink != 1)
182 cp_reason = CP_HARDLINK;
183 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
184 cp_reason = CP_SB_NEED_CP;
185 else if (file_wrong_pino(inode))
186 cp_reason = CP_WRONG_PINO;
187 else if (!f2fs_space_for_roll_forward(sbi))
188 cp_reason = CP_NO_SPC_ROLL;
189 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
190 cp_reason = CP_NODE_NEED_CP;
191 else if (test_opt(sbi, FASTBOOT))
192 cp_reason = CP_FASTBOOT_MODE;
193 else if (F2FS_OPTION(sbi).active_logs == 2)
194 cp_reason = CP_SPEC_LOG_NUM;
195 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
196 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
197 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
199 cp_reason = CP_RECOVER_DIR;
204 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
206 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
208 /* But we need to avoid that there are some inode updates */
209 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
215 static void try_to_fix_pino(struct inode *inode)
217 struct f2fs_inode_info *fi = F2FS_I(inode);
220 down_write(&fi->i_sem);
221 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
222 get_parent_ino(inode, &pino)) {
223 f2fs_i_pino_write(inode, pino);
224 file_got_pino(inode);
226 up_write(&fi->i_sem);
229 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
230 int datasync, bool atomic)
232 struct inode *inode = file->f_mapping->host;
233 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
234 nid_t ino = inode->i_ino;
236 enum cp_reason_type cp_reason = 0;
237 struct writeback_control wbc = {
238 .sync_mode = WB_SYNC_ALL,
239 .nr_to_write = LONG_MAX,
242 unsigned int seq_id = 0;
244 if (unlikely(f2fs_readonly(inode->i_sb) ||
245 is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
248 trace_f2fs_sync_file_enter(inode);
250 if (S_ISDIR(inode->i_mode))
253 /* if fdatasync is triggered, let's do in-place-update */
254 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
255 set_inode_flag(inode, FI_NEED_IPU);
256 ret = file_write_and_wait_range(file, start, end);
257 clear_inode_flag(inode, FI_NEED_IPU);
260 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
264 /* if the inode is dirty, let's recover all the time */
265 if (!f2fs_skip_inode_update(inode, datasync)) {
266 f2fs_write_inode(inode, NULL);
271 * if there is no written data, don't waste time to write recovery info.
273 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
274 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
276 /* it may call write_inode just prior to fsync */
277 if (need_inode_page_update(sbi, ino))
280 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
281 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
287 * Both of fdatasync() and fsync() are able to be recovered from
290 down_read(&F2FS_I(inode)->i_sem);
291 cp_reason = need_do_checkpoint(inode);
292 up_read(&F2FS_I(inode)->i_sem);
295 /* all the dirty node pages should be flushed for POR */
296 ret = f2fs_sync_fs(inode->i_sb, 1);
299 * We've secured consistency through sync_fs. Following pino
300 * will be used only for fsynced inodes after checkpoint.
302 try_to_fix_pino(inode);
303 clear_inode_flag(inode, FI_APPEND_WRITE);
304 clear_inode_flag(inode, FI_UPDATE_WRITE);
308 atomic_inc(&sbi->wb_sync_req[NODE]);
309 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
310 atomic_dec(&sbi->wb_sync_req[NODE]);
314 /* if cp_error was enabled, we should avoid infinite loop */
315 if (unlikely(f2fs_cp_error(sbi))) {
320 if (f2fs_need_inode_block_update(sbi, ino)) {
321 f2fs_mark_inode_dirty_sync(inode, true);
322 f2fs_write_inode(inode, NULL);
327 * If it's atomic_write, it's just fine to keep write ordering. So
328 * here we don't need to wait for node write completion, since we use
329 * node chain which serializes node blocks. If one of node writes are
330 * reordered, we can see simply broken chain, resulting in stopping
331 * roll-forward recovery. It means we'll recover all or none node blocks
335 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
340 /* once recovery info is written, don't need to tack this */
341 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
342 clear_inode_flag(inode, FI_APPEND_WRITE);
344 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
345 ret = f2fs_issue_flush(sbi, inode->i_ino);
347 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
348 clear_inode_flag(inode, FI_UPDATE_WRITE);
349 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
351 f2fs_update_time(sbi, REQ_TIME);
353 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
354 f2fs_trace_ios(NULL, 1);
358 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
360 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
362 return f2fs_do_sync_file(file, start, end, datasync, false);
365 static pgoff_t __get_first_dirty_index(struct address_space *mapping,
366 pgoff_t pgofs, int whence)
371 if (whence != SEEK_DATA)
374 /* find first dirty page index */
375 nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY,
384 static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
385 pgoff_t dirty, pgoff_t pgofs, int whence)
389 if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
390 __is_valid_data_blkaddr(blkaddr))
394 if (blkaddr == NULL_ADDR)
401 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
403 struct inode *inode = file->f_mapping->host;
404 loff_t maxbytes = inode->i_sb->s_maxbytes;
405 struct dnode_of_data dn;
406 pgoff_t pgofs, end_offset, dirty;
407 loff_t data_ofs = offset;
413 isize = i_size_read(inode);
417 /* handle inline data case */
418 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
419 if (whence == SEEK_HOLE)
424 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
426 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
428 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
429 set_new_dnode(&dn, inode, NULL, NULL, 0);
430 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
431 if (err && err != -ENOENT) {
433 } else if (err == -ENOENT) {
434 /* direct node does not exists */
435 if (whence == SEEK_DATA) {
436 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
443 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
445 /* find data/hole in dnode block */
446 for (; dn.ofs_in_node < end_offset;
447 dn.ofs_in_node++, pgofs++,
448 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
451 blkaddr = datablock_addr(dn.inode,
452 dn.node_page, dn.ofs_in_node);
454 if (__is_valid_data_blkaddr(blkaddr) &&
455 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
456 blkaddr, DATA_GENERIC_ENHANCE)) {
461 if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty,
470 if (whence == SEEK_DATA)
473 if (whence == SEEK_HOLE && data_ofs > isize)
476 return vfs_setpos(file, data_ofs, maxbytes);
482 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
484 struct inode *inode = file->f_mapping->host;
485 loff_t maxbytes = inode->i_sb->s_maxbytes;
491 return generic_file_llseek_size(file, offset, whence,
492 maxbytes, i_size_read(inode));
497 return f2fs_seek_block(file, offset, whence);
503 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
505 struct inode *inode = file_inode(file);
508 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
511 if (!f2fs_is_compress_backend_ready(inode))
514 /* we don't need to use inline_data strictly */
515 err = f2fs_convert_inline_inode(inode);
520 vma->vm_ops = &f2fs_file_vm_ops;
521 set_inode_flag(inode, FI_MMAP_FILE);
525 static int f2fs_file_open(struct inode *inode, struct file *filp)
527 int err = fscrypt_file_open(inode, filp);
532 if (!f2fs_is_compress_backend_ready(inode))
535 err = fsverity_file_open(inode, filp);
539 filp->f_mode |= FMODE_NOWAIT;
541 return dquot_file_open(inode, filp);
544 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
546 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
547 struct f2fs_node *raw_node;
548 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
551 bool compressed_cluster = false;
552 int cluster_index = 0, valid_blocks = 0;
553 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
555 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
556 base = get_extra_isize(dn->inode);
558 raw_node = F2FS_NODE(dn->node_page);
559 addr = blkaddr_in_node(raw_node) + base + ofs;
561 /* Assumption: truncateion starts with cluster */
562 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
563 block_t blkaddr = le32_to_cpu(*addr);
565 if (f2fs_compressed_file(dn->inode) &&
566 !(cluster_index & (cluster_size - 1))) {
567 if (compressed_cluster)
568 f2fs_i_compr_blocks_update(dn->inode,
569 valid_blocks, false);
570 compressed_cluster = (blkaddr == COMPRESS_ADDR);
574 if (blkaddr == NULL_ADDR)
577 dn->data_blkaddr = NULL_ADDR;
578 f2fs_set_data_blkaddr(dn);
580 if (__is_valid_data_blkaddr(blkaddr)) {
581 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
582 DATA_GENERIC_ENHANCE))
584 if (compressed_cluster)
588 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
589 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
591 f2fs_invalidate_blocks(sbi, blkaddr);
595 if (compressed_cluster)
596 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
601 * once we invalidate valid blkaddr in range [ofs, ofs + count],
602 * we will invalidate all blkaddr in the whole range.
604 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
606 f2fs_update_extent_cache_range(dn, fofs, 0, len);
607 dec_valid_block_count(sbi, dn->inode, nr_free);
609 dn->ofs_in_node = ofs;
611 f2fs_update_time(sbi, REQ_TIME);
612 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
613 dn->ofs_in_node, nr_free);
616 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
618 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
621 static int truncate_partial_data_page(struct inode *inode, u64 from,
624 loff_t offset = from & (PAGE_SIZE - 1);
625 pgoff_t index = from >> PAGE_SHIFT;
626 struct address_space *mapping = inode->i_mapping;
629 if (!offset && !cache_only)
633 page = find_lock_page(mapping, index);
634 if (page && PageUptodate(page))
636 f2fs_put_page(page, 1);
640 if (f2fs_compressed_file(inode))
643 page = f2fs_get_lock_data_page(inode, index, true);
645 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
647 f2fs_wait_on_page_writeback(page, DATA, true, true);
648 zero_user(page, offset, PAGE_SIZE - offset);
650 /* An encrypted inode should have a key and truncate the last page. */
651 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
653 set_page_dirty(page);
654 f2fs_put_page(page, 1);
658 static int do_truncate_blocks(struct inode *inode, u64 from, bool lock)
660 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
661 struct dnode_of_data dn;
663 int count = 0, err = 0;
665 bool truncate_page = false;
667 trace_f2fs_truncate_blocks_enter(inode, from);
669 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
671 if (free_from >= sbi->max_file_blocks)
677 ipage = f2fs_get_node_page(sbi, inode->i_ino);
679 err = PTR_ERR(ipage);
683 if (f2fs_has_inline_data(inode)) {
684 f2fs_truncate_inline_inode(inode, ipage, from);
685 f2fs_put_page(ipage, 1);
686 truncate_page = true;
690 set_new_dnode(&dn, inode, ipage, NULL, 0);
691 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
698 count = ADDRS_PER_PAGE(dn.node_page, inode);
700 count -= dn.ofs_in_node;
701 f2fs_bug_on(sbi, count < 0);
703 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
704 f2fs_truncate_data_blocks_range(&dn, count);
710 err = f2fs_truncate_inode_blocks(inode, free_from);
715 /* lastly zero out the first data page */
717 err = truncate_partial_data_page(inode, from, truncate_page);
719 trace_f2fs_truncate_blocks_exit(inode, err);
723 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
725 u64 free_from = from;
728 * for compressed file, only support cluster size
729 * aligned truncation.
731 if (f2fs_compressed_file(inode)) {
732 size_t cluster_shift = PAGE_SHIFT +
733 F2FS_I(inode)->i_log_cluster_size;
734 size_t cluster_mask = (1 << cluster_shift) - 1;
736 free_from = from >> cluster_shift;
737 if (from & cluster_mask)
739 free_from <<= cluster_shift;
742 return do_truncate_blocks(inode, free_from, lock);
745 int f2fs_truncate(struct inode *inode)
749 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
752 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
753 S_ISLNK(inode->i_mode)))
756 trace_f2fs_truncate(inode);
758 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
759 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
763 /* we should check inline_data size */
764 if (!f2fs_may_inline_data(inode)) {
765 err = f2fs_convert_inline_inode(inode);
770 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
774 inode->i_mtime = inode->i_ctime = current_time(inode);
775 f2fs_mark_inode_dirty_sync(inode, false);
779 int f2fs_getattr(const struct path *path, struct kstat *stat,
780 u32 request_mask, unsigned int query_flags)
782 struct inode *inode = d_inode(path->dentry);
783 struct f2fs_inode_info *fi = F2FS_I(inode);
784 struct f2fs_inode *ri;
787 if (f2fs_has_extra_attr(inode) &&
788 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
789 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
790 stat->result_mask |= STATX_BTIME;
791 stat->btime.tv_sec = fi->i_crtime.tv_sec;
792 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
796 if (flags & F2FS_APPEND_FL)
797 stat->attributes |= STATX_ATTR_APPEND;
798 if (IS_ENCRYPTED(inode))
799 stat->attributes |= STATX_ATTR_ENCRYPTED;
800 if (flags & F2FS_IMMUTABLE_FL)
801 stat->attributes |= STATX_ATTR_IMMUTABLE;
802 if (flags & F2FS_NODUMP_FL)
803 stat->attributes |= STATX_ATTR_NODUMP;
804 if (IS_VERITY(inode))
805 stat->attributes |= STATX_ATTR_VERITY;
807 stat->attributes_mask |= (STATX_ATTR_APPEND |
808 STATX_ATTR_ENCRYPTED |
809 STATX_ATTR_IMMUTABLE |
813 generic_fillattr(inode, stat);
815 /* we need to show initial sectors used for inline_data/dentries */
816 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
817 f2fs_has_inline_dentry(inode))
818 stat->blocks += (stat->size + 511) >> 9;
823 #ifdef CONFIG_F2FS_FS_POSIX_ACL
824 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
826 unsigned int ia_valid = attr->ia_valid;
828 if (ia_valid & ATTR_UID)
829 inode->i_uid = attr->ia_uid;
830 if (ia_valid & ATTR_GID)
831 inode->i_gid = attr->ia_gid;
832 if (ia_valid & ATTR_ATIME) {
833 inode->i_atime = timestamp_truncate(attr->ia_atime,
836 if (ia_valid & ATTR_MTIME) {
837 inode->i_mtime = timestamp_truncate(attr->ia_mtime,
840 if (ia_valid & ATTR_CTIME) {
841 inode->i_ctime = timestamp_truncate(attr->ia_ctime,
844 if (ia_valid & ATTR_MODE) {
845 umode_t mode = attr->ia_mode;
847 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
849 set_acl_inode(inode, mode);
853 #define __setattr_copy setattr_copy
856 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
858 struct inode *inode = d_inode(dentry);
861 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
864 if ((attr->ia_valid & ATTR_SIZE) &&
865 !f2fs_is_compress_backend_ready(inode))
868 err = setattr_prepare(dentry, attr);
872 err = fscrypt_prepare_setattr(dentry, attr);
876 err = fsverity_prepare_setattr(dentry, attr);
880 if (is_quota_modification(inode, attr)) {
881 err = dquot_initialize(inode);
885 if ((attr->ia_valid & ATTR_UID &&
886 !uid_eq(attr->ia_uid, inode->i_uid)) ||
887 (attr->ia_valid & ATTR_GID &&
888 !gid_eq(attr->ia_gid, inode->i_gid))) {
889 f2fs_lock_op(F2FS_I_SB(inode));
890 err = dquot_transfer(inode, attr);
892 set_sbi_flag(F2FS_I_SB(inode),
893 SBI_QUOTA_NEED_REPAIR);
894 f2fs_unlock_op(F2FS_I_SB(inode));
898 * update uid/gid under lock_op(), so that dquot and inode can
899 * be updated atomically.
901 if (attr->ia_valid & ATTR_UID)
902 inode->i_uid = attr->ia_uid;
903 if (attr->ia_valid & ATTR_GID)
904 inode->i_gid = attr->ia_gid;
905 f2fs_mark_inode_dirty_sync(inode, true);
906 f2fs_unlock_op(F2FS_I_SB(inode));
909 if (attr->ia_valid & ATTR_SIZE) {
910 loff_t old_size = i_size_read(inode);
912 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
914 * should convert inline inode before i_size_write to
915 * keep smaller than inline_data size with inline flag.
917 err = f2fs_convert_inline_inode(inode);
922 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
923 down_write(&F2FS_I(inode)->i_mmap_sem);
925 truncate_setsize(inode, attr->ia_size);
927 if (attr->ia_size <= old_size)
928 err = f2fs_truncate(inode);
930 * do not trim all blocks after i_size if target size is
931 * larger than i_size.
933 up_write(&F2FS_I(inode)->i_mmap_sem);
934 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
938 down_write(&F2FS_I(inode)->i_sem);
939 inode->i_mtime = inode->i_ctime = current_time(inode);
940 F2FS_I(inode)->last_disk_size = i_size_read(inode);
941 up_write(&F2FS_I(inode)->i_sem);
944 __setattr_copy(inode, attr);
946 if (attr->ia_valid & ATTR_MODE) {
947 err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
948 if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
949 inode->i_mode = F2FS_I(inode)->i_acl_mode;
950 clear_inode_flag(inode, FI_ACL_MODE);
954 /* file size may changed here */
955 f2fs_mark_inode_dirty_sync(inode, true);
957 /* inode change will produce dirty node pages flushed by checkpoint */
958 f2fs_balance_fs(F2FS_I_SB(inode), true);
963 const struct inode_operations f2fs_file_inode_operations = {
964 .getattr = f2fs_getattr,
965 .setattr = f2fs_setattr,
966 .get_acl = f2fs_get_acl,
967 .set_acl = f2fs_set_acl,
968 #ifdef CONFIG_F2FS_FS_XATTR
969 .listxattr = f2fs_listxattr,
971 .fiemap = f2fs_fiemap,
974 static int fill_zero(struct inode *inode, pgoff_t index,
975 loff_t start, loff_t len)
977 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
983 f2fs_balance_fs(sbi, true);
986 page = f2fs_get_new_data_page(inode, NULL, index, false);
990 return PTR_ERR(page);
992 f2fs_wait_on_page_writeback(page, DATA, true, true);
993 zero_user(page, start, len);
994 set_page_dirty(page);
995 f2fs_put_page(page, 1);
999 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1003 while (pg_start < pg_end) {
1004 struct dnode_of_data dn;
1005 pgoff_t end_offset, count;
1007 set_new_dnode(&dn, inode, NULL, NULL, 0);
1008 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1010 if (err == -ENOENT) {
1011 pg_start = f2fs_get_next_page_offset(&dn,
1018 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1019 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1021 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1023 f2fs_truncate_data_blocks_range(&dn, count);
1024 f2fs_put_dnode(&dn);
1031 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1033 pgoff_t pg_start, pg_end;
1034 loff_t off_start, off_end;
1037 ret = f2fs_convert_inline_inode(inode);
1041 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1042 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1044 off_start = offset & (PAGE_SIZE - 1);
1045 off_end = (offset + len) & (PAGE_SIZE - 1);
1047 if (pg_start == pg_end) {
1048 ret = fill_zero(inode, pg_start, off_start,
1049 off_end - off_start);
1054 ret = fill_zero(inode, pg_start++, off_start,
1055 PAGE_SIZE - off_start);
1060 ret = fill_zero(inode, pg_end, 0, off_end);
1065 if (pg_start < pg_end) {
1066 struct address_space *mapping = inode->i_mapping;
1067 loff_t blk_start, blk_end;
1068 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1070 f2fs_balance_fs(sbi, true);
1072 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1073 blk_end = (loff_t)pg_end << PAGE_SHIFT;
1075 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1076 down_write(&F2FS_I(inode)->i_mmap_sem);
1078 truncate_inode_pages_range(mapping, blk_start,
1082 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1083 f2fs_unlock_op(sbi);
1085 up_write(&F2FS_I(inode)->i_mmap_sem);
1086 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1093 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1094 int *do_replace, pgoff_t off, pgoff_t len)
1096 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1097 struct dnode_of_data dn;
1101 set_new_dnode(&dn, inode, NULL, NULL, 0);
1102 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1103 if (ret && ret != -ENOENT) {
1105 } else if (ret == -ENOENT) {
1106 if (dn.max_level == 0)
1108 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1109 dn.ofs_in_node, len);
1115 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1116 dn.ofs_in_node, len);
1117 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1118 *blkaddr = datablock_addr(dn.inode,
1119 dn.node_page, dn.ofs_in_node);
1121 if (__is_valid_data_blkaddr(*blkaddr) &&
1122 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1123 DATA_GENERIC_ENHANCE)) {
1124 f2fs_put_dnode(&dn);
1125 return -EFSCORRUPTED;
1128 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1130 if (test_opt(sbi, LFS)) {
1131 f2fs_put_dnode(&dn);
1135 /* do not invalidate this block address */
1136 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1140 f2fs_put_dnode(&dn);
1149 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1150 int *do_replace, pgoff_t off, int len)
1152 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1153 struct dnode_of_data dn;
1156 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1157 if (*do_replace == 0)
1160 set_new_dnode(&dn, inode, NULL, NULL, 0);
1161 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1163 dec_valid_block_count(sbi, inode, 1);
1164 f2fs_invalidate_blocks(sbi, *blkaddr);
1166 f2fs_update_data_blkaddr(&dn, *blkaddr);
1168 f2fs_put_dnode(&dn);
1173 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1174 block_t *blkaddr, int *do_replace,
1175 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1177 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1182 if (blkaddr[i] == NULL_ADDR && !full) {
1187 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1188 struct dnode_of_data dn;
1189 struct node_info ni;
1193 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1194 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1198 ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1200 f2fs_put_dnode(&dn);
1204 ilen = min((pgoff_t)
1205 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1206 dn.ofs_in_node, len - i);
1208 dn.data_blkaddr = datablock_addr(dn.inode,
1209 dn.node_page, dn.ofs_in_node);
1210 f2fs_truncate_data_blocks_range(&dn, 1);
1212 if (do_replace[i]) {
1213 f2fs_i_blocks_write(src_inode,
1215 f2fs_i_blocks_write(dst_inode,
1217 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1218 blkaddr[i], ni.version, true, false);
1224 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1225 if (dst_inode->i_size < new_size)
1226 f2fs_i_size_write(dst_inode, new_size);
1227 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1229 f2fs_put_dnode(&dn);
1231 struct page *psrc, *pdst;
1233 psrc = f2fs_get_lock_data_page(src_inode,
1236 return PTR_ERR(psrc);
1237 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1240 f2fs_put_page(psrc, 1);
1241 return PTR_ERR(pdst);
1243 f2fs_copy_page(psrc, pdst);
1244 set_page_dirty(pdst);
1245 f2fs_put_page(pdst, 1);
1246 f2fs_put_page(psrc, 1);
1248 ret = f2fs_truncate_hole(src_inode,
1249 src + i, src + i + 1);
1258 static int __exchange_data_block(struct inode *src_inode,
1259 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1260 pgoff_t len, bool full)
1262 block_t *src_blkaddr;
1268 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1270 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1271 array_size(olen, sizeof(block_t)),
1276 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1277 array_size(olen, sizeof(int)),
1280 kvfree(src_blkaddr);
1284 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1285 do_replace, src, olen);
1289 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1290 do_replace, src, dst, olen, full);
1298 kvfree(src_blkaddr);
1304 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1305 kvfree(src_blkaddr);
1310 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1312 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1313 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1314 pgoff_t start = offset >> PAGE_SHIFT;
1315 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1318 f2fs_balance_fs(sbi, true);
1320 /* avoid gc operation during block exchange */
1321 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1322 down_write(&F2FS_I(inode)->i_mmap_sem);
1325 f2fs_drop_extent_tree(inode);
1326 truncate_pagecache(inode, offset);
1327 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1328 f2fs_unlock_op(sbi);
1330 up_write(&F2FS_I(inode)->i_mmap_sem);
1331 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1335 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1340 if (offset + len >= i_size_read(inode))
1343 /* collapse range should be aligned to block size of f2fs. */
1344 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1347 ret = f2fs_convert_inline_inode(inode);
1351 /* write out all dirty pages from offset */
1352 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1356 ret = f2fs_do_collapse(inode, offset, len);
1360 /* write out all moved pages, if possible */
1361 down_write(&F2FS_I(inode)->i_mmap_sem);
1362 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1363 truncate_pagecache(inode, offset);
1365 new_size = i_size_read(inode) - len;
1366 truncate_pagecache(inode, new_size);
1368 ret = f2fs_truncate_blocks(inode, new_size, true);
1369 up_write(&F2FS_I(inode)->i_mmap_sem);
1371 f2fs_i_size_write(inode, new_size);
1375 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1378 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1379 pgoff_t index = start;
1380 unsigned int ofs_in_node = dn->ofs_in_node;
1384 for (; index < end; index++, dn->ofs_in_node++) {
1385 if (datablock_addr(dn->inode, dn->node_page,
1386 dn->ofs_in_node) == NULL_ADDR)
1390 dn->ofs_in_node = ofs_in_node;
1391 ret = f2fs_reserve_new_blocks(dn, count);
1395 dn->ofs_in_node = ofs_in_node;
1396 for (index = start; index < end; index++, dn->ofs_in_node++) {
1397 dn->data_blkaddr = datablock_addr(dn->inode,
1398 dn->node_page, dn->ofs_in_node);
1400 * f2fs_reserve_new_blocks will not guarantee entire block
1403 if (dn->data_blkaddr == NULL_ADDR) {
1407 if (dn->data_blkaddr != NEW_ADDR) {
1408 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1409 dn->data_blkaddr = NEW_ADDR;
1410 f2fs_set_data_blkaddr(dn);
1414 f2fs_update_extent_cache_range(dn, start, 0, index - start);
1419 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1422 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1423 struct address_space *mapping = inode->i_mapping;
1424 pgoff_t index, pg_start, pg_end;
1425 loff_t new_size = i_size_read(inode);
1426 loff_t off_start, off_end;
1429 ret = inode_newsize_ok(inode, (len + offset));
1433 ret = f2fs_convert_inline_inode(inode);
1437 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1441 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1442 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1444 off_start = offset & (PAGE_SIZE - 1);
1445 off_end = (offset + len) & (PAGE_SIZE - 1);
1447 if (pg_start == pg_end) {
1448 ret = fill_zero(inode, pg_start, off_start,
1449 off_end - off_start);
1453 new_size = max_t(loff_t, new_size, offset + len);
1456 ret = fill_zero(inode, pg_start++, off_start,
1457 PAGE_SIZE - off_start);
1461 new_size = max_t(loff_t, new_size,
1462 (loff_t)pg_start << PAGE_SHIFT);
1465 for (index = pg_start; index < pg_end;) {
1466 struct dnode_of_data dn;
1467 unsigned int end_offset;
1470 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1471 down_write(&F2FS_I(inode)->i_mmap_sem);
1473 truncate_pagecache_range(inode,
1474 (loff_t)index << PAGE_SHIFT,
1475 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1479 set_new_dnode(&dn, inode, NULL, NULL, 0);
1480 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1482 f2fs_unlock_op(sbi);
1483 up_write(&F2FS_I(inode)->i_mmap_sem);
1484 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1488 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1489 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1491 ret = f2fs_do_zero_range(&dn, index, end);
1492 f2fs_put_dnode(&dn);
1494 f2fs_unlock_op(sbi);
1495 up_write(&F2FS_I(inode)->i_mmap_sem);
1496 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1498 f2fs_balance_fs(sbi, dn.node_changed);
1504 new_size = max_t(loff_t, new_size,
1505 (loff_t)index << PAGE_SHIFT);
1509 ret = fill_zero(inode, pg_end, 0, off_end);
1513 new_size = max_t(loff_t, new_size, offset + len);
1518 if (new_size > i_size_read(inode)) {
1519 if (mode & FALLOC_FL_KEEP_SIZE)
1520 file_set_keep_isize(inode);
1522 f2fs_i_size_write(inode, new_size);
1527 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1529 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1530 pgoff_t nr, pg_start, pg_end, delta, idx;
1534 new_size = i_size_read(inode) + len;
1535 ret = inode_newsize_ok(inode, new_size);
1539 if (offset >= i_size_read(inode))
1542 /* insert range should be aligned to block size of f2fs. */
1543 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1546 ret = f2fs_convert_inline_inode(inode);
1550 f2fs_balance_fs(sbi, true);
1552 down_write(&F2FS_I(inode)->i_mmap_sem);
1553 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1554 up_write(&F2FS_I(inode)->i_mmap_sem);
1558 /* write out all dirty pages from offset */
1559 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1563 pg_start = offset >> PAGE_SHIFT;
1564 pg_end = (offset + len) >> PAGE_SHIFT;
1565 delta = pg_end - pg_start;
1566 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1568 /* avoid gc operation during block exchange */
1569 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1570 down_write(&F2FS_I(inode)->i_mmap_sem);
1571 truncate_pagecache(inode, offset);
1573 while (!ret && idx > pg_start) {
1574 nr = idx - pg_start;
1580 f2fs_drop_extent_tree(inode);
1582 ret = __exchange_data_block(inode, inode, idx,
1583 idx + delta, nr, false);
1584 f2fs_unlock_op(sbi);
1586 up_write(&F2FS_I(inode)->i_mmap_sem);
1587 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1589 /* write out all moved pages, if possible */
1590 down_write(&F2FS_I(inode)->i_mmap_sem);
1591 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1592 truncate_pagecache(inode, offset);
1593 up_write(&F2FS_I(inode)->i_mmap_sem);
1596 f2fs_i_size_write(inode, new_size);
1600 static int expand_inode_data(struct inode *inode, loff_t offset,
1601 loff_t len, int mode)
1603 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1604 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1605 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1606 .m_may_create = true };
1608 loff_t new_size = i_size_read(inode);
1612 err = inode_newsize_ok(inode, (len + offset));
1616 err = f2fs_convert_inline_inode(inode);
1620 f2fs_balance_fs(sbi, true);
1622 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1623 off_end = (offset + len) & (PAGE_SIZE - 1);
1625 map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
1626 map.m_len = pg_end - map.m_lblk;
1633 if (f2fs_is_pinned_file(inode)) {
1634 block_t len = (map.m_len >> sbi->log_blocks_per_seg) <<
1635 sbi->log_blocks_per_seg;
1638 if (map.m_len % sbi->blocks_per_seg)
1639 len += sbi->blocks_per_seg;
1641 map.m_len = sbi->blocks_per_seg;
1643 if (has_not_enough_free_secs(sbi, 0,
1644 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1645 down_write(&sbi->gc_lock);
1646 err = f2fs_gc(sbi, true, false, NULL_SEGNO);
1647 if (err && err != -ENODATA && err != -EAGAIN)
1651 down_write(&sbi->pin_sem);
1652 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1653 f2fs_allocate_new_segments(sbi, CURSEG_COLD_DATA);
1654 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1655 up_write(&sbi->pin_sem);
1659 map.m_lblk += map.m_len;
1665 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1674 last_off = map.m_lblk + map.m_len - 1;
1676 /* update new size to the failed position */
1677 new_size = (last_off == pg_end) ? offset + len :
1678 (loff_t)(last_off + 1) << PAGE_SHIFT;
1680 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1683 if (new_size > i_size_read(inode)) {
1684 if (mode & FALLOC_FL_KEEP_SIZE)
1685 file_set_keep_isize(inode);
1687 f2fs_i_size_write(inode, new_size);
1693 static long f2fs_fallocate(struct file *file, int mode,
1694 loff_t offset, loff_t len)
1696 struct inode *inode = file_inode(file);
1699 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1701 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1703 if (!f2fs_is_compress_backend_ready(inode))
1706 /* f2fs only support ->fallocate for regular file */
1707 if (!S_ISREG(inode->i_mode))
1710 if (IS_ENCRYPTED(inode) &&
1711 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1714 if (f2fs_compressed_file(inode) &&
1715 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1716 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1719 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1720 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1721 FALLOC_FL_INSERT_RANGE))
1726 if (mode & FALLOC_FL_PUNCH_HOLE) {
1727 if (offset >= inode->i_size)
1730 ret = punch_hole(inode, offset, len);
1731 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1732 ret = f2fs_collapse_range(inode, offset, len);
1733 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1734 ret = f2fs_zero_range(inode, offset, len, mode);
1735 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1736 ret = f2fs_insert_range(inode, offset, len);
1738 ret = expand_inode_data(inode, offset, len, mode);
1742 inode->i_mtime = inode->i_ctime = current_time(inode);
1743 f2fs_mark_inode_dirty_sync(inode, false);
1744 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1748 inode_unlock(inode);
1750 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1754 static int f2fs_release_file(struct inode *inode, struct file *filp)
1757 * f2fs_relase_file is called at every close calls. So we should
1758 * not drop any inmemory pages by close called by other process.
1760 if (!(filp->f_mode & FMODE_WRITE) ||
1761 atomic_read(&inode->i_writecount) != 1)
1764 /* some remained atomic pages should discarded */
1765 if (f2fs_is_atomic_file(inode))
1766 f2fs_drop_inmem_pages(inode);
1767 if (f2fs_is_volatile_file(inode)) {
1768 set_inode_flag(inode, FI_DROP_CACHE);
1769 filemap_fdatawrite(inode->i_mapping);
1770 clear_inode_flag(inode, FI_DROP_CACHE);
1771 clear_inode_flag(inode, FI_VOLATILE_FILE);
1772 stat_dec_volatile_write(inode);
1777 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1779 struct inode *inode = file_inode(file);
1782 * If the process doing a transaction is crashed, we should do
1783 * roll-back. Otherwise, other reader/write can see corrupted database
1784 * until all the writers close its file. Since this should be done
1785 * before dropping file lock, it needs to do in ->flush.
1787 if (f2fs_is_atomic_file(inode) &&
1788 F2FS_I(inode)->inmem_task == current)
1789 f2fs_drop_inmem_pages(inode);
1793 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1795 struct f2fs_inode_info *fi = F2FS_I(inode);
1797 /* Is it quota file? Do not allow user to mess with it */
1798 if (IS_NOQUOTA(inode))
1801 if ((iflags ^ fi->i_flags) & F2FS_CASEFOLD_FL) {
1802 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1804 if (!f2fs_empty_dir(inode))
1808 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1809 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1811 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1815 if ((iflags ^ fi->i_flags) & F2FS_COMPR_FL) {
1816 if (S_ISREG(inode->i_mode) &&
1817 (fi->i_flags & F2FS_COMPR_FL || i_size_read(inode) ||
1818 F2FS_HAS_BLOCKS(inode)))
1820 if (iflags & F2FS_NOCOMP_FL)
1822 if (iflags & F2FS_COMPR_FL) {
1823 int err = f2fs_convert_inline_inode(inode);
1828 if (!f2fs_may_compress(inode))
1831 set_compress_context(inode);
1834 if ((iflags ^ fi->i_flags) & F2FS_NOCOMP_FL) {
1835 if (fi->i_flags & F2FS_COMPR_FL)
1839 fi->i_flags = iflags | (fi->i_flags & ~mask);
1840 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1841 (fi->i_flags & F2FS_NOCOMP_FL));
1843 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1844 set_inode_flag(inode, FI_PROJ_INHERIT);
1846 clear_inode_flag(inode, FI_PROJ_INHERIT);
1848 inode->i_ctime = current_time(inode);
1849 f2fs_set_inode_flags(inode);
1850 f2fs_mark_inode_dirty_sync(inode, true);
1854 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1857 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1858 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1859 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1860 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1863 static const struct {
1866 } f2fs_fsflags_map[] = {
1867 { F2FS_COMPR_FL, FS_COMPR_FL },
1868 { F2FS_SYNC_FL, FS_SYNC_FL },
1869 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1870 { F2FS_APPEND_FL, FS_APPEND_FL },
1871 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1872 { F2FS_NOATIME_FL, FS_NOATIME_FL },
1873 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
1874 { F2FS_INDEX_FL, FS_INDEX_FL },
1875 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1876 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
1877 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
1880 #define F2FS_GETTABLE_FS_FL ( \
1890 FS_PROJINHERIT_FL | \
1892 FS_INLINE_DATA_FL | \
1897 #define F2FS_SETTABLE_FS_FL ( \
1906 FS_PROJINHERIT_FL | \
1909 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1910 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1915 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1916 if (iflags & f2fs_fsflags_map[i].iflag)
1917 fsflags |= f2fs_fsflags_map[i].fsflag;
1922 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1923 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1928 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1929 if (fsflags & f2fs_fsflags_map[i].fsflag)
1930 iflags |= f2fs_fsflags_map[i].iflag;
1935 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1937 struct inode *inode = file_inode(filp);
1938 struct f2fs_inode_info *fi = F2FS_I(inode);
1939 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1941 if (IS_ENCRYPTED(inode))
1942 fsflags |= FS_ENCRYPT_FL;
1943 if (IS_VERITY(inode))
1944 fsflags |= FS_VERITY_FL;
1945 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1946 fsflags |= FS_INLINE_DATA_FL;
1947 if (is_inode_flag_set(inode, FI_PIN_FILE))
1948 fsflags |= FS_NOCOW_FL;
1950 fsflags &= F2FS_GETTABLE_FS_FL;
1952 return put_user(fsflags, (int __user *)arg);
1955 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1957 struct inode *inode = file_inode(filp);
1958 struct f2fs_inode_info *fi = F2FS_I(inode);
1959 u32 fsflags, old_fsflags;
1963 if (!inode_owner_or_capable(inode))
1966 if (get_user(fsflags, (int __user *)arg))
1969 if (fsflags & ~F2FS_GETTABLE_FS_FL)
1971 fsflags &= F2FS_SETTABLE_FS_FL;
1973 iflags = f2fs_fsflags_to_iflags(fsflags);
1974 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
1977 ret = mnt_want_write_file(filp);
1983 old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1984 ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
1988 ret = f2fs_setflags_common(inode, iflags,
1989 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
1991 inode_unlock(inode);
1992 mnt_drop_write_file(filp);
1996 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
1998 struct inode *inode = file_inode(filp);
2000 return put_user(inode->i_generation, (int __user *)arg);
2003 static int f2fs_ioc_start_atomic_write(struct file *filp)
2005 struct inode *inode = file_inode(filp);
2006 struct f2fs_inode_info *fi = F2FS_I(inode);
2007 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2010 if (!inode_owner_or_capable(inode))
2013 if (!S_ISREG(inode->i_mode))
2016 if (filp->f_flags & O_DIRECT)
2019 ret = mnt_want_write_file(filp);
2025 f2fs_disable_compressed_file(inode);
2027 if (f2fs_is_atomic_file(inode)) {
2028 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
2033 ret = f2fs_convert_inline_inode(inode);
2037 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2040 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2041 * f2fs_is_atomic_file.
2043 if (get_dirty_pages(inode))
2044 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2045 inode->i_ino, get_dirty_pages(inode));
2046 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2048 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2052 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2053 if (list_empty(&fi->inmem_ilist))
2054 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2055 sbi->atomic_files++;
2056 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2058 /* add inode in inmem_list first and set atomic_file */
2059 set_inode_flag(inode, FI_ATOMIC_FILE);
2060 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2061 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2063 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2064 F2FS_I(inode)->inmem_task = current;
2065 stat_update_max_atomic_write(inode);
2067 inode_unlock(inode);
2068 mnt_drop_write_file(filp);
2072 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2074 struct inode *inode = file_inode(filp);
2077 if (!inode_owner_or_capable(inode))
2080 ret = mnt_want_write_file(filp);
2084 f2fs_balance_fs(F2FS_I_SB(inode), true);
2088 if (f2fs_is_volatile_file(inode)) {
2093 if (f2fs_is_atomic_file(inode)) {
2094 ret = f2fs_commit_inmem_pages(inode);
2098 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2100 f2fs_drop_inmem_pages(inode);
2102 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2105 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2106 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2109 inode_unlock(inode);
2110 mnt_drop_write_file(filp);
2114 static int f2fs_ioc_start_volatile_write(struct file *filp)
2116 struct inode *inode = file_inode(filp);
2119 if (!inode_owner_or_capable(inode))
2122 if (!S_ISREG(inode->i_mode))
2125 ret = mnt_want_write_file(filp);
2131 if (f2fs_is_volatile_file(inode))
2134 ret = f2fs_convert_inline_inode(inode);
2138 stat_inc_volatile_write(inode);
2139 stat_update_max_volatile_write(inode);
2141 set_inode_flag(inode, FI_VOLATILE_FILE);
2142 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2144 inode_unlock(inode);
2145 mnt_drop_write_file(filp);
2149 static int f2fs_ioc_release_volatile_write(struct file *filp)
2151 struct inode *inode = file_inode(filp);
2154 if (!inode_owner_or_capable(inode))
2157 ret = mnt_want_write_file(filp);
2163 if (!f2fs_is_volatile_file(inode))
2166 if (!f2fs_is_first_block_written(inode)) {
2167 ret = truncate_partial_data_page(inode, 0, true);
2171 ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2173 inode_unlock(inode);
2174 mnt_drop_write_file(filp);
2178 static int f2fs_ioc_abort_volatile_write(struct file *filp)
2180 struct inode *inode = file_inode(filp);
2183 if (!inode_owner_or_capable(inode))
2186 ret = mnt_want_write_file(filp);
2192 if (f2fs_is_atomic_file(inode))
2193 f2fs_drop_inmem_pages(inode);
2194 if (f2fs_is_volatile_file(inode)) {
2195 clear_inode_flag(inode, FI_VOLATILE_FILE);
2196 stat_dec_volatile_write(inode);
2197 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2200 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2202 inode_unlock(inode);
2204 mnt_drop_write_file(filp);
2205 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2209 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2211 struct inode *inode = file_inode(filp);
2212 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2213 struct super_block *sb = sbi->sb;
2217 if (!capable(CAP_SYS_ADMIN))
2220 if (get_user(in, (__u32 __user *)arg))
2223 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2224 ret = mnt_want_write_file(filp);
2230 case F2FS_GOING_DOWN_FULLSYNC:
2231 sb = freeze_bdev(sb->s_bdev);
2237 f2fs_stop_checkpoint(sbi, false);
2238 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2239 thaw_bdev(sb->s_bdev, sb);
2242 case F2FS_GOING_DOWN_METASYNC:
2243 /* do checkpoint only */
2244 ret = f2fs_sync_fs(sb, 1);
2247 f2fs_stop_checkpoint(sbi, false);
2248 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2250 case F2FS_GOING_DOWN_NOSYNC:
2251 f2fs_stop_checkpoint(sbi, false);
2252 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2254 case F2FS_GOING_DOWN_METAFLUSH:
2255 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2256 f2fs_stop_checkpoint(sbi, false);
2257 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2259 case F2FS_GOING_DOWN_NEED_FSCK:
2260 set_sbi_flag(sbi, SBI_NEED_FSCK);
2261 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2262 set_sbi_flag(sbi, SBI_IS_DIRTY);
2263 /* do checkpoint only */
2264 ret = f2fs_sync_fs(sb, 1);
2271 f2fs_stop_gc_thread(sbi);
2272 f2fs_stop_discard_thread(sbi);
2274 f2fs_drop_discard_cmd(sbi);
2275 clear_opt(sbi, DISCARD);
2277 f2fs_update_time(sbi, REQ_TIME);
2279 if (in != F2FS_GOING_DOWN_FULLSYNC)
2280 mnt_drop_write_file(filp);
2282 trace_f2fs_shutdown(sbi, in, ret);
2287 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2289 struct inode *inode = file_inode(filp);
2290 struct super_block *sb = inode->i_sb;
2291 struct request_queue *q = bdev_get_queue(sb->s_bdev);
2292 struct fstrim_range range;
2295 if (!capable(CAP_SYS_ADMIN))
2298 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2301 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2305 ret = mnt_want_write_file(filp);
2309 range.minlen = max((unsigned int)range.minlen,
2310 q->limits.discard_granularity);
2311 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2312 mnt_drop_write_file(filp);
2316 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2319 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2323 static bool uuid_is_nonzero(__u8 u[16])
2327 for (i = 0; i < 16; i++)
2333 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2335 struct inode *inode = file_inode(filp);
2337 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2340 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2342 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2345 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2347 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2349 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2352 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2354 struct inode *inode = file_inode(filp);
2355 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2358 if (!f2fs_sb_has_encrypt(sbi))
2361 err = mnt_want_write_file(filp);
2365 down_write(&sbi->sb_lock);
2367 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2370 /* update superblock with uuid */
2371 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2373 err = f2fs_commit_super(sbi, false);
2376 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2380 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2384 up_write(&sbi->sb_lock);
2385 mnt_drop_write_file(filp);
2389 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2392 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2395 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2398 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2400 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2403 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2406 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2408 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2411 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2414 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2417 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2420 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2423 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2426 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2429 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2432 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2434 struct inode *inode = file_inode(filp);
2435 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2439 if (!capable(CAP_SYS_ADMIN))
2442 if (get_user(sync, (__u32 __user *)arg))
2445 if (f2fs_readonly(sbi->sb))
2448 ret = mnt_want_write_file(filp);
2453 if (!down_write_trylock(&sbi->gc_lock)) {
2458 down_write(&sbi->gc_lock);
2461 ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
2463 mnt_drop_write_file(filp);
2467 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2469 struct inode *inode = file_inode(filp);
2470 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2471 struct f2fs_gc_range range;
2475 if (!capable(CAP_SYS_ADMIN))
2478 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2482 if (f2fs_readonly(sbi->sb))
2485 end = range.start + range.len;
2486 if (end < range.start || range.start < MAIN_BLKADDR(sbi) ||
2487 end >= MAX_BLKADDR(sbi))
2490 ret = mnt_want_write_file(filp);
2496 if (!down_write_trylock(&sbi->gc_lock)) {
2501 down_write(&sbi->gc_lock);
2504 ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
2505 range.start += BLKS_PER_SEC(sbi);
2506 if (range.start <= end)
2509 mnt_drop_write_file(filp);
2513 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2515 struct inode *inode = file_inode(filp);
2516 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2519 if (!capable(CAP_SYS_ADMIN))
2522 if (f2fs_readonly(sbi->sb))
2525 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2526 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2530 ret = mnt_want_write_file(filp);
2534 ret = f2fs_sync_fs(sbi->sb, 1);
2536 mnt_drop_write_file(filp);
2540 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2542 struct f2fs_defragment *range)
2544 struct inode *inode = file_inode(filp);
2545 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2546 .m_seg_type = NO_CHECK_TYPE ,
2547 .m_may_create = false };
2548 struct extent_info ei = {0, 0, 0};
2549 pgoff_t pg_start, pg_end, next_pgofs;
2550 unsigned int blk_per_seg = sbi->blocks_per_seg;
2551 unsigned int total = 0, sec_num;
2552 block_t blk_end = 0;
2553 bool fragmented = false;
2556 /* if in-place-update policy is enabled, don't waste time here */
2557 if (f2fs_should_update_inplace(inode, NULL))
2560 pg_start = range->start >> PAGE_SHIFT;
2561 pg_end = (range->start + range->len) >> PAGE_SHIFT;
2563 f2fs_balance_fs(sbi, true);
2567 /* writeback all dirty pages in the range */
2568 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2569 range->start + range->len - 1);
2574 * lookup mapping info in extent cache, skip defragmenting if physical
2575 * block addresses are continuous.
2577 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2578 if (ei.fofs + ei.len >= pg_end)
2582 map.m_lblk = pg_start;
2583 map.m_next_pgofs = &next_pgofs;
2586 * lookup mapping info in dnode page cache, skip defragmenting if all
2587 * physical block addresses are continuous even if there are hole(s)
2588 * in logical blocks.
2590 while (map.m_lblk < pg_end) {
2591 map.m_len = pg_end - map.m_lblk;
2592 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2596 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2597 map.m_lblk = next_pgofs;
2601 if (blk_end && blk_end != map.m_pblk)
2604 /* record total count of block that we're going to move */
2607 blk_end = map.m_pblk + map.m_len;
2609 map.m_lblk += map.m_len;
2617 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2620 * make sure there are enough free section for LFS allocation, this can
2621 * avoid defragment running in SSR mode when free section are allocated
2624 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2629 map.m_lblk = pg_start;
2630 map.m_len = pg_end - pg_start;
2633 while (map.m_lblk < pg_end) {
2638 map.m_len = pg_end - map.m_lblk;
2639 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2643 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2644 map.m_lblk = next_pgofs;
2648 set_inode_flag(inode, FI_DO_DEFRAG);
2651 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2654 page = f2fs_get_lock_data_page(inode, idx, true);
2656 err = PTR_ERR(page);
2660 set_page_dirty(page);
2661 f2fs_put_page(page, 1);
2670 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2673 clear_inode_flag(inode, FI_DO_DEFRAG);
2675 err = filemap_fdatawrite(inode->i_mapping);
2680 clear_inode_flag(inode, FI_DO_DEFRAG);
2682 inode_unlock(inode);
2684 range->len = (u64)total << PAGE_SHIFT;
2688 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2690 struct inode *inode = file_inode(filp);
2691 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2692 struct f2fs_defragment range;
2695 if (!capable(CAP_SYS_ADMIN))
2698 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2701 if (f2fs_readonly(sbi->sb))
2704 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2708 /* verify alignment of offset & size */
2709 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2712 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2713 sbi->max_file_blocks))
2716 err = mnt_want_write_file(filp);
2720 err = f2fs_defragment_range(sbi, filp, &range);
2721 mnt_drop_write_file(filp);
2723 f2fs_update_time(sbi, REQ_TIME);
2727 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2734 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2735 struct file *file_out, loff_t pos_out, size_t len)
2737 struct inode *src = file_inode(file_in);
2738 struct inode *dst = file_inode(file_out);
2739 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2740 size_t olen = len, dst_max_i_size = 0;
2744 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2745 src->i_sb != dst->i_sb)
2748 if (unlikely(f2fs_readonly(src->i_sb)))
2751 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2754 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2758 if (pos_in == pos_out)
2760 if (pos_out > pos_in && pos_out < pos_in + len)
2767 if (!inode_trylock(dst))
2772 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2775 olen = len = src->i_size - pos_in;
2776 if (pos_in + len == src->i_size)
2777 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2783 dst_osize = dst->i_size;
2784 if (pos_out + olen > dst->i_size)
2785 dst_max_i_size = pos_out + olen;
2787 /* verify the end result is block aligned */
2788 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2789 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2790 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2793 ret = f2fs_convert_inline_inode(src);
2797 ret = f2fs_convert_inline_inode(dst);
2801 /* write out all dirty pages from offset */
2802 ret = filemap_write_and_wait_range(src->i_mapping,
2803 pos_in, pos_in + len);
2807 ret = filemap_write_and_wait_range(dst->i_mapping,
2808 pos_out, pos_out + len);
2812 f2fs_balance_fs(sbi, true);
2814 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2817 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2822 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2823 pos_out >> F2FS_BLKSIZE_BITS,
2824 len >> F2FS_BLKSIZE_BITS, false);
2828 f2fs_i_size_write(dst, dst_max_i_size);
2829 else if (dst_osize != dst->i_size)
2830 f2fs_i_size_write(dst, dst_osize);
2832 f2fs_unlock_op(sbi);
2835 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2837 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2846 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2848 struct f2fs_move_range range;
2852 if (!(filp->f_mode & FMODE_READ) ||
2853 !(filp->f_mode & FMODE_WRITE))
2856 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2860 dst = fdget(range.dst_fd);
2864 if (!(dst.file->f_mode & FMODE_WRITE)) {
2869 err = mnt_want_write_file(filp);
2873 err = f2fs_move_file_range(filp, range.pos_in, dst.file,
2874 range.pos_out, range.len);
2876 mnt_drop_write_file(filp);
2880 if (copy_to_user((struct f2fs_move_range __user *)arg,
2881 &range, sizeof(range)))
2888 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2890 struct inode *inode = file_inode(filp);
2891 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2892 struct sit_info *sm = SIT_I(sbi);
2893 unsigned int start_segno = 0, end_segno = 0;
2894 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2895 struct f2fs_flush_device range;
2898 if (!capable(CAP_SYS_ADMIN))
2901 if (f2fs_readonly(sbi->sb))
2904 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2907 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2911 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2912 __is_large_section(sbi)) {
2913 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2914 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2918 ret = mnt_want_write_file(filp);
2922 if (range.dev_num != 0)
2923 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2924 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2926 start_segno = sm->last_victim[FLUSH_DEVICE];
2927 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2928 start_segno = dev_start_segno;
2929 end_segno = min(start_segno + range.segments, dev_end_segno);
2931 while (start_segno < end_segno) {
2932 if (!down_write_trylock(&sbi->gc_lock)) {
2936 sm->last_victim[GC_CB] = end_segno + 1;
2937 sm->last_victim[GC_GREEDY] = end_segno + 1;
2938 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2939 ret = f2fs_gc(sbi, true, true, start_segno);
2947 mnt_drop_write_file(filp);
2951 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2953 struct inode *inode = file_inode(filp);
2954 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2956 /* Must validate to set it with SQLite behavior in Android. */
2957 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2959 return put_user(sb_feature, (u32 __user *)arg);
2963 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2965 struct dquot *transfer_to[MAXQUOTAS] = {};
2966 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2967 struct super_block *sb = sbi->sb;
2970 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2971 if (!IS_ERR(transfer_to[PRJQUOTA])) {
2972 err = __dquot_transfer(inode, transfer_to);
2974 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2975 dqput(transfer_to[PRJQUOTA]);
2980 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
2982 struct inode *inode = file_inode(filp);
2983 struct f2fs_inode_info *fi = F2FS_I(inode);
2984 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2989 if (!f2fs_sb_has_project_quota(sbi)) {
2990 if (projid != F2FS_DEF_PROJID)
2996 if (!f2fs_has_extra_attr(inode))
2999 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3001 if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
3005 /* Is it quota file? Do not allow user to mess with it */
3006 if (IS_NOQUOTA(inode))
3009 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3011 return PTR_ERR(ipage);
3013 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
3016 f2fs_put_page(ipage, 1);
3019 f2fs_put_page(ipage, 1);
3021 err = dquot_initialize(inode);
3026 err = f2fs_transfer_project_quota(inode, kprojid);
3030 F2FS_I(inode)->i_projid = kprojid;
3031 inode->i_ctime = current_time(inode);
3032 f2fs_mark_inode_dirty_sync(inode, true);
3034 f2fs_unlock_op(sbi);
3038 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3043 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3045 if (projid != F2FS_DEF_PROJID)
3051 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
3054 * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
3055 * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
3056 * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
3059 static const struct {
3062 } f2fs_xflags_map[] = {
3063 { F2FS_SYNC_FL, FS_XFLAG_SYNC },
3064 { F2FS_IMMUTABLE_FL, FS_XFLAG_IMMUTABLE },
3065 { F2FS_APPEND_FL, FS_XFLAG_APPEND },
3066 { F2FS_NODUMP_FL, FS_XFLAG_NODUMP },
3067 { F2FS_NOATIME_FL, FS_XFLAG_NOATIME },
3068 { F2FS_PROJINHERIT_FL, FS_XFLAG_PROJINHERIT },
3071 #define F2FS_SUPPORTED_XFLAGS ( \
3073 FS_XFLAG_IMMUTABLE | \
3076 FS_XFLAG_NOATIME | \
3077 FS_XFLAG_PROJINHERIT)
3079 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
3080 static inline u32 f2fs_iflags_to_xflags(u32 iflags)
3085 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3086 if (iflags & f2fs_xflags_map[i].iflag)
3087 xflags |= f2fs_xflags_map[i].xflag;
3092 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
3093 static inline u32 f2fs_xflags_to_iflags(u32 xflags)
3098 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3099 if (xflags & f2fs_xflags_map[i].xflag)
3100 iflags |= f2fs_xflags_map[i].iflag;
3105 static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
3107 struct f2fs_inode_info *fi = F2FS_I(inode);
3109 simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
3111 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3112 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3115 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
3117 struct inode *inode = file_inode(filp);
3120 f2fs_fill_fsxattr(inode, &fa);
3122 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
3127 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
3129 struct inode *inode = file_inode(filp);
3130 struct fsxattr fa, old_fa;
3134 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
3137 /* Make sure caller has proper permission */
3138 if (!inode_owner_or_capable(inode))
3141 if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
3144 iflags = f2fs_xflags_to_iflags(fa.fsx_xflags);
3145 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3148 err = mnt_want_write_file(filp);
3154 f2fs_fill_fsxattr(inode, &old_fa);
3155 err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
3159 err = f2fs_setflags_common(inode, iflags,
3160 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
3164 err = f2fs_ioc_setproject(filp, fa.fsx_projid);
3166 inode_unlock(inode);
3167 mnt_drop_write_file(filp);
3171 int f2fs_pin_file_control(struct inode *inode, bool inc)
3173 struct f2fs_inode_info *fi = F2FS_I(inode);
3174 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3176 /* Use i_gc_failures for normal file as a risk signal. */
3178 f2fs_i_gc_failures_write(inode,
3179 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3181 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3182 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3183 __func__, inode->i_ino,
3184 fi->i_gc_failures[GC_FAILURE_PIN]);
3185 clear_inode_flag(inode, FI_PIN_FILE);
3191 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3193 struct inode *inode = file_inode(filp);
3197 if (get_user(pin, (__u32 __user *)arg))
3200 if (!S_ISREG(inode->i_mode))
3203 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3206 ret = mnt_want_write_file(filp);
3212 if (f2fs_should_update_outplace(inode, NULL)) {
3218 clear_inode_flag(inode, FI_PIN_FILE);
3219 f2fs_i_gc_failures_write(inode, 0);
3223 if (f2fs_pin_file_control(inode, false)) {
3228 ret = f2fs_convert_inline_inode(inode);
3232 if (f2fs_disable_compressed_file(inode)) {
3237 set_inode_flag(inode, FI_PIN_FILE);
3238 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3240 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3242 inode_unlock(inode);
3243 mnt_drop_write_file(filp);
3247 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3249 struct inode *inode = file_inode(filp);
3252 if (is_inode_flag_set(inode, FI_PIN_FILE))
3253 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3254 return put_user(pin, (u32 __user *)arg);
3257 int f2fs_precache_extents(struct inode *inode)
3259 struct f2fs_inode_info *fi = F2FS_I(inode);
3260 struct f2fs_map_blocks map;
3261 pgoff_t m_next_extent;
3265 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3269 map.m_next_pgofs = NULL;
3270 map.m_next_extent = &m_next_extent;
3271 map.m_seg_type = NO_CHECK_TYPE;
3272 map.m_may_create = false;
3273 end = F2FS_I_SB(inode)->max_file_blocks;
3275 while (map.m_lblk < end) {
3276 map.m_len = end - map.m_lblk;
3278 down_write(&fi->i_gc_rwsem[WRITE]);
3279 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3280 up_write(&fi->i_gc_rwsem[WRITE]);
3284 map.m_lblk = m_next_extent;
3290 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3292 return f2fs_precache_extents(file_inode(filp));
3295 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3297 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3301 if (!capable(CAP_SYS_ADMIN))
3304 if (f2fs_readonly(sbi->sb))
3307 if (copy_from_user(&block_count, (void __user *)arg,
3308 sizeof(block_count)))
3311 ret = f2fs_resize_fs(sbi, block_count);
3316 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3318 struct inode *inode = file_inode(filp);
3320 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3322 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3323 f2fs_warn(F2FS_I_SB(inode),
3324 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3329 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3332 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3334 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3337 return fsverity_ioctl_measure(filp, (void __user *)arg);
3340 static int f2fs_get_volume_name(struct file *filp, unsigned long arg)
3342 struct inode *inode = file_inode(filp);
3343 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3348 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3352 down_read(&sbi->sb_lock);
3353 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3354 ARRAY_SIZE(sbi->raw_super->volume_name),
3355 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3356 up_read(&sbi->sb_lock);
3358 if (copy_to_user((char __user *)arg, vbuf,
3359 min(FSLABEL_MAX, count)))
3366 static int f2fs_set_volume_name(struct file *filp, unsigned long arg)
3368 struct inode *inode = file_inode(filp);
3369 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3373 if (!capable(CAP_SYS_ADMIN))
3376 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3378 return PTR_ERR(vbuf);
3380 err = mnt_want_write_file(filp);
3384 down_write(&sbi->sb_lock);
3386 memset(sbi->raw_super->volume_name, 0,
3387 sizeof(sbi->raw_super->volume_name));
3388 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3389 sbi->raw_super->volume_name,
3390 ARRAY_SIZE(sbi->raw_super->volume_name));
3392 err = f2fs_commit_super(sbi, false);
3394 up_write(&sbi->sb_lock);
3396 mnt_drop_write_file(filp);
3402 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3404 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
3406 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
3410 case F2FS_IOC_GETFLAGS:
3411 return f2fs_ioc_getflags(filp, arg);
3412 case F2FS_IOC_SETFLAGS:
3413 return f2fs_ioc_setflags(filp, arg);
3414 case F2FS_IOC_GETVERSION:
3415 return f2fs_ioc_getversion(filp, arg);
3416 case F2FS_IOC_START_ATOMIC_WRITE:
3417 return f2fs_ioc_start_atomic_write(filp);
3418 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
3419 return f2fs_ioc_commit_atomic_write(filp);
3420 case F2FS_IOC_START_VOLATILE_WRITE:
3421 return f2fs_ioc_start_volatile_write(filp);
3422 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
3423 return f2fs_ioc_release_volatile_write(filp);
3424 case F2FS_IOC_ABORT_VOLATILE_WRITE:
3425 return f2fs_ioc_abort_volatile_write(filp);
3426 case F2FS_IOC_SHUTDOWN:
3427 return f2fs_ioc_shutdown(filp, arg);
3429 return f2fs_ioc_fitrim(filp, arg);
3430 case F2FS_IOC_SET_ENCRYPTION_POLICY:
3431 return f2fs_ioc_set_encryption_policy(filp, arg);
3432 case F2FS_IOC_GET_ENCRYPTION_POLICY:
3433 return f2fs_ioc_get_encryption_policy(filp, arg);
3434 case F2FS_IOC_GET_ENCRYPTION_PWSALT:
3435 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
3436 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
3437 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
3438 case FS_IOC_ADD_ENCRYPTION_KEY:
3439 return f2fs_ioc_add_encryption_key(filp, arg);
3440 case FS_IOC_REMOVE_ENCRYPTION_KEY:
3441 return f2fs_ioc_remove_encryption_key(filp, arg);
3442 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
3443 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
3444 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
3445 return f2fs_ioc_get_encryption_key_status(filp, arg);
3446 case F2FS_IOC_GARBAGE_COLLECT:
3447 return f2fs_ioc_gc(filp, arg);
3448 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
3449 return f2fs_ioc_gc_range(filp, arg);
3450 case F2FS_IOC_WRITE_CHECKPOINT:
3451 return f2fs_ioc_write_checkpoint(filp, arg);
3452 case F2FS_IOC_DEFRAGMENT:
3453 return f2fs_ioc_defragment(filp, arg);
3454 case F2FS_IOC_MOVE_RANGE:
3455 return f2fs_ioc_move_range(filp, arg);
3456 case F2FS_IOC_FLUSH_DEVICE:
3457 return f2fs_ioc_flush_device(filp, arg);
3458 case F2FS_IOC_GET_FEATURES:
3459 return f2fs_ioc_get_features(filp, arg);
3460 case F2FS_IOC_FSGETXATTR:
3461 return f2fs_ioc_fsgetxattr(filp, arg);
3462 case F2FS_IOC_FSSETXATTR:
3463 return f2fs_ioc_fssetxattr(filp, arg);
3464 case F2FS_IOC_GET_PIN_FILE:
3465 return f2fs_ioc_get_pin_file(filp, arg);
3466 case F2FS_IOC_SET_PIN_FILE:
3467 return f2fs_ioc_set_pin_file(filp, arg);
3468 case F2FS_IOC_PRECACHE_EXTENTS:
3469 return f2fs_ioc_precache_extents(filp, arg);
3470 case F2FS_IOC_RESIZE_FS:
3471 return f2fs_ioc_resize_fs(filp, arg);
3472 case FS_IOC_ENABLE_VERITY:
3473 return f2fs_ioc_enable_verity(filp, arg);
3474 case FS_IOC_MEASURE_VERITY:
3475 return f2fs_ioc_measure_verity(filp, arg);
3476 case F2FS_IOC_GET_VOLUME_NAME:
3477 return f2fs_get_volume_name(filp, arg);
3478 case F2FS_IOC_SET_VOLUME_NAME:
3479 return f2fs_set_volume_name(filp, arg);
3485 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
3487 struct file *file = iocb->ki_filp;
3488 struct inode *inode = file_inode(file);
3490 if (!f2fs_is_compress_backend_ready(inode))
3493 return generic_file_read_iter(iocb, iter);
3496 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3498 struct file *file = iocb->ki_filp;
3499 struct inode *inode = file_inode(file);
3502 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
3507 if (!f2fs_is_compress_backend_ready(inode))
3510 if (iocb->ki_flags & IOCB_NOWAIT) {
3511 if (!inode_trylock(inode)) {
3519 ret = generic_write_checks(iocb, from);
3521 bool preallocated = false;
3522 size_t target_size = 0;
3525 if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
3526 set_inode_flag(inode, FI_NO_PREALLOC);
3528 if ((iocb->ki_flags & IOCB_NOWAIT)) {
3529 if (!f2fs_overwrite_io(inode, iocb->ki_pos,
3530 iov_iter_count(from)) ||
3531 f2fs_has_inline_data(inode) ||
3532 f2fs_force_buffered_io(inode, iocb, from)) {
3533 clear_inode_flag(inode, FI_NO_PREALLOC);
3534 inode_unlock(inode);
3541 if (is_inode_flag_set(inode, FI_NO_PREALLOC))
3544 if (iocb->ki_flags & IOCB_DIRECT) {
3546 * Convert inline data for Direct I/O before entering
3549 err = f2fs_convert_inline_inode(inode);
3553 * If force_buffere_io() is true, we have to allocate
3554 * blocks all the time, since f2fs_direct_IO will fall
3555 * back to buffered IO.
3557 if (!f2fs_force_buffered_io(inode, iocb, from) &&
3558 allow_outplace_dio(inode, iocb, from))
3561 preallocated = true;
3562 target_size = iocb->ki_pos + iov_iter_count(from);
3564 err = f2fs_preallocate_blocks(iocb, from);
3567 clear_inode_flag(inode, FI_NO_PREALLOC);
3568 inode_unlock(inode);
3573 ret = __generic_file_write_iter(iocb, from);
3574 clear_inode_flag(inode, FI_NO_PREALLOC);
3576 /* if we couldn't write data, we should deallocate blocks. */
3577 if (preallocated && i_size_read(inode) < target_size)
3578 f2fs_truncate(inode);
3581 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
3583 inode_unlock(inode);
3585 trace_f2fs_file_write_iter(inode, iocb->ki_pos,
3586 iov_iter_count(from), ret);
3588 ret = generic_write_sync(iocb, ret);
3592 #ifdef CONFIG_COMPAT
3593 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3596 case F2FS_IOC32_GETFLAGS:
3597 cmd = F2FS_IOC_GETFLAGS;
3599 case F2FS_IOC32_SETFLAGS:
3600 cmd = F2FS_IOC_SETFLAGS;
3602 case F2FS_IOC32_GETVERSION:
3603 cmd = F2FS_IOC_GETVERSION;
3605 case F2FS_IOC_START_ATOMIC_WRITE:
3606 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
3607 case F2FS_IOC_START_VOLATILE_WRITE:
3608 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
3609 case F2FS_IOC_ABORT_VOLATILE_WRITE:
3610 case F2FS_IOC_SHUTDOWN:
3612 case F2FS_IOC_SET_ENCRYPTION_POLICY:
3613 case F2FS_IOC_GET_ENCRYPTION_PWSALT:
3614 case F2FS_IOC_GET_ENCRYPTION_POLICY:
3615 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
3616 case FS_IOC_ADD_ENCRYPTION_KEY:
3617 case FS_IOC_REMOVE_ENCRYPTION_KEY:
3618 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
3619 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
3620 case F2FS_IOC_GARBAGE_COLLECT:
3621 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
3622 case F2FS_IOC_WRITE_CHECKPOINT:
3623 case F2FS_IOC_DEFRAGMENT:
3624 case F2FS_IOC_MOVE_RANGE:
3625 case F2FS_IOC_FLUSH_DEVICE:
3626 case F2FS_IOC_GET_FEATURES:
3627 case F2FS_IOC_FSGETXATTR:
3628 case F2FS_IOC_FSSETXATTR:
3629 case F2FS_IOC_GET_PIN_FILE:
3630 case F2FS_IOC_SET_PIN_FILE:
3631 case F2FS_IOC_PRECACHE_EXTENTS:
3632 case F2FS_IOC_RESIZE_FS:
3633 case FS_IOC_ENABLE_VERITY:
3634 case FS_IOC_MEASURE_VERITY:
3635 case F2FS_IOC_GET_VOLUME_NAME:
3636 case F2FS_IOC_SET_VOLUME_NAME:
3639 return -ENOIOCTLCMD;
3641 return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
3645 const struct file_operations f2fs_file_operations = {
3646 .llseek = f2fs_llseek,
3647 .read_iter = f2fs_file_read_iter,
3648 .write_iter = f2fs_file_write_iter,
3649 .open = f2fs_file_open,
3650 .release = f2fs_release_file,
3651 .mmap = f2fs_file_mmap,
3652 .flush = f2fs_file_flush,
3653 .fsync = f2fs_sync_file,
3654 .fallocate = f2fs_fallocate,
3655 .unlocked_ioctl = f2fs_ioctl,
3656 #ifdef CONFIG_COMPAT
3657 .compat_ioctl = f2fs_compat_ioctl,
3659 .splice_read = generic_file_splice_read,
3660 .splice_write = iter_file_splice_write,