1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2021, Alibaba Cloud
9 #include <trace/events/erofs.h>
12 * if inode is successfully read, return its inode page (or sometimes
13 * the inode payload page if it's an extended inode) in order to fill
14 * inline data if possible.
16 static void *erofs_read_inode(struct erofs_buf *buf,
17 struct inode *inode, unsigned int *ofs)
19 struct super_block *sb = inode->i_sb;
20 struct erofs_sb_info *sbi = EROFS_SB(sb);
21 struct erofs_inode *vi = EROFS_I(inode);
22 const erofs_off_t inode_loc = iloc(sbi, vi->nid);
24 erofs_blk_t blkaddr, nblks = 0;
26 struct erofs_inode_compact *dic;
27 struct erofs_inode_extended *die, *copied = NULL;
31 blkaddr = erofs_blknr(inode_loc);
32 *ofs = erofs_blkoff(inode_loc);
34 erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u",
35 __func__, vi->nid, *ofs, blkaddr);
37 kaddr = erofs_read_metabuf(buf, sb, blkaddr, EROFS_KMAP);
39 erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
40 vi->nid, PTR_ERR(kaddr));
45 ifmt = le16_to_cpu(dic->i_format);
47 if (ifmt & ~EROFS_I_ALL) {
48 erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu",
54 vi->datalayout = erofs_inode_datalayout(ifmt);
55 if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
56 erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
57 vi->datalayout, vi->nid);
62 switch (erofs_inode_version(ifmt)) {
63 case EROFS_INODE_LAYOUT_EXTENDED:
64 vi->inode_isize = sizeof(struct erofs_inode_extended);
65 /* check if the extended inode acrosses block boundary */
66 if (*ofs + vi->inode_isize <= EROFS_BLKSIZ) {
67 *ofs += vi->inode_isize;
68 die = (struct erofs_inode_extended *)dic;
70 const unsigned int gotten = EROFS_BLKSIZ - *ofs;
72 copied = kmalloc(vi->inode_isize, GFP_NOFS);
77 memcpy(copied, dic, gotten);
78 kaddr = erofs_read_metabuf(buf, sb, blkaddr + 1,
81 erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld",
82 vi->nid, PTR_ERR(kaddr));
86 *ofs = vi->inode_isize - gotten;
87 memcpy((u8 *)copied + gotten, kaddr, *ofs);
90 vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
92 inode->i_mode = le16_to_cpu(die->i_mode);
93 switch (inode->i_mode & S_IFMT) {
97 vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr);
102 new_decode_dev(le32_to_cpu(die->i_u.rdev));
111 i_uid_write(inode, le32_to_cpu(die->i_uid));
112 i_gid_write(inode, le32_to_cpu(die->i_gid));
113 set_nlink(inode, le32_to_cpu(die->i_nlink));
115 /* extended inode has its own timestamp */
116 inode->i_ctime.tv_sec = le64_to_cpu(die->i_ctime);
117 inode->i_ctime.tv_nsec = le32_to_cpu(die->i_ctime_nsec);
119 inode->i_size = le64_to_cpu(die->i_size);
121 /* total blocks for compressed files */
122 if (erofs_inode_is_data_compressed(vi->datalayout))
123 nblks = le32_to_cpu(die->i_u.compressed_blocks);
124 else if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
125 /* fill chunked inode summary info */
126 vi->chunkformat = le16_to_cpu(die->i_u.c.format);
130 case EROFS_INODE_LAYOUT_COMPACT:
131 vi->inode_isize = sizeof(struct erofs_inode_compact);
132 *ofs += vi->inode_isize;
133 vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
135 inode->i_mode = le16_to_cpu(dic->i_mode);
136 switch (inode->i_mode & S_IFMT) {
140 vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr);
145 new_decode_dev(le32_to_cpu(dic->i_u.rdev));
154 i_uid_write(inode, le16_to_cpu(dic->i_uid));
155 i_gid_write(inode, le16_to_cpu(dic->i_gid));
156 set_nlink(inode, le16_to_cpu(dic->i_nlink));
158 /* use build time for compact inodes */
159 inode->i_ctime.tv_sec = sbi->build_time;
160 inode->i_ctime.tv_nsec = sbi->build_time_nsec;
162 inode->i_size = le32_to_cpu(dic->i_size);
163 if (erofs_inode_is_data_compressed(vi->datalayout))
164 nblks = le32_to_cpu(dic->i_u.compressed_blocks);
165 else if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
166 vi->chunkformat = le16_to_cpu(dic->i_u.c.format);
169 erofs_err(inode->i_sb,
170 "unsupported on-disk inode version %u of nid %llu",
171 erofs_inode_version(ifmt), vi->nid);
176 if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
177 if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) {
178 erofs_err(inode->i_sb,
179 "unsupported chunk format %x of nid %llu",
180 vi->chunkformat, vi->nid);
184 vi->chunkbits = LOG_BLOCK_SIZE +
185 (vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
187 inode->i_mtime.tv_sec = inode->i_ctime.tv_sec;
188 inode->i_atime.tv_sec = inode->i_ctime.tv_sec;
189 inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec;
190 inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec;
192 inode->i_flags &= ~S_DAX;
193 if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
194 vi->datalayout == EROFS_INODE_FLAT_PLAIN)
195 inode->i_flags |= S_DAX;
197 /* measure inode.i_blocks as generic filesystems */
198 inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
200 inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK;
204 erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu",
205 inode->i_mode, vi->nid);
210 erofs_put_metabuf(buf);
214 static int erofs_fill_symlink(struct inode *inode, void *kaddr,
217 struct erofs_inode *vi = EROFS_I(inode);
220 /* if it cannot be handled with fast symlink scheme */
221 if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
222 inode->i_size >= EROFS_BLKSIZ) {
223 inode->i_op = &erofs_symlink_iops;
227 lnk = kmalloc(inode->i_size + 1, GFP_KERNEL);
231 m_pofs += vi->xattr_isize;
232 /* inline symlink data shouldn't cross block boundary */
233 if (m_pofs + inode->i_size > EROFS_BLKSIZ) {
235 erofs_err(inode->i_sb,
236 "inline data cross block boundary @ nid %llu",
239 return -EFSCORRUPTED;
241 memcpy(lnk, kaddr + m_pofs, inode->i_size);
242 lnk[inode->i_size] = '\0';
245 inode->i_op = &erofs_fast_symlink_iops;
249 static int erofs_fill_inode(struct inode *inode, int isdir)
251 struct erofs_inode *vi = EROFS_I(inode);
252 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
257 trace_erofs_fill_inode(inode, isdir);
259 /* read inode base data from disk */
260 kaddr = erofs_read_inode(&buf, inode, &ofs);
262 return PTR_ERR(kaddr);
264 /* setup the new inode */
265 switch (inode->i_mode & S_IFMT) {
267 inode->i_op = &erofs_generic_iops;
268 if (erofs_inode_is_data_compressed(vi->datalayout))
269 inode->i_fop = &generic_ro_fops;
271 inode->i_fop = &erofs_file_fops;
274 inode->i_op = &erofs_dir_iops;
275 inode->i_fop = &erofs_dir_fops;
278 err = erofs_fill_symlink(inode, kaddr, ofs);
281 inode_nohighmem(inode);
287 inode->i_op = &erofs_generic_iops;
288 init_special_inode(inode, inode->i_mode, inode->i_rdev);
295 if (erofs_inode_is_data_compressed(vi->datalayout)) {
296 err = z_erofs_fill_inode(inode);
299 inode->i_mapping->a_ops = &erofs_raw_access_aops;
302 erofs_put_metabuf(&buf);
307 * erofs nid is 64bits, but i_ino is 'unsigned long', therefore
308 * we should do more for 32-bit platform to find the right inode.
310 static int erofs_ilookup_test_actor(struct inode *inode, void *opaque)
312 const erofs_nid_t nid = *(erofs_nid_t *)opaque;
314 return EROFS_I(inode)->nid == nid;
317 static int erofs_iget_set_actor(struct inode *inode, void *opaque)
319 const erofs_nid_t nid = *(erofs_nid_t *)opaque;
321 inode->i_ino = erofs_inode_hash(nid);
325 static inline struct inode *erofs_iget_locked(struct super_block *sb,
328 const unsigned long hashval = erofs_inode_hash(nid);
330 return iget5_locked(sb, hashval, erofs_ilookup_test_actor,
331 erofs_iget_set_actor, &nid);
334 struct inode *erofs_iget(struct super_block *sb,
338 struct inode *inode = erofs_iget_locked(sb, nid);
341 return ERR_PTR(-ENOMEM);
343 if (inode->i_state & I_NEW) {
345 struct erofs_inode *vi = EROFS_I(inode);
349 err = erofs_fill_inode(inode, isdir);
351 unlock_new_inode(inode);
354 inode = ERR_PTR(err);
360 int erofs_getattr(struct user_namespace *mnt_userns, const struct path *path,
361 struct kstat *stat, u32 request_mask,
362 unsigned int query_flags)
364 struct inode *const inode = d_inode(path->dentry);
366 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
367 stat->attributes |= STATX_ATTR_COMPRESSED;
369 stat->attributes |= STATX_ATTR_IMMUTABLE;
370 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
371 STATX_ATTR_IMMUTABLE);
373 generic_fillattr(&init_user_ns, inode, stat);
377 const struct inode_operations erofs_generic_iops = {
378 .getattr = erofs_getattr,
379 .listxattr = erofs_listxattr,
380 .get_acl = erofs_get_acl,
381 .fiemap = erofs_fiemap,
384 const struct inode_operations erofs_symlink_iops = {
385 .get_link = page_get_link,
386 .getattr = erofs_getattr,
387 .listxattr = erofs_listxattr,
388 .get_acl = erofs_get_acl,
391 const struct inode_operations erofs_fast_symlink_iops = {
392 .get_link = simple_get_link,
393 .getattr = erofs_getattr,
394 .listxattr = erofs_listxattr,
395 .get_acl = erofs_get_acl,