2 * Copyright (c) 2004-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/compat.h>
19 #include <linux/ioctl.h>
20 #include <linux/mount.h>
21 #include <linux/slab.h>
22 #include <asm/uaccess.h>
25 #include "xfs_format.h"
26 #include "xfs_log_format.h"
27 #include "xfs_trans_resv.h"
28 #include "xfs_mount.h"
29 #include "xfs_inode.h"
30 #include "xfs_itable.h"
31 #include "xfs_error.h"
32 #include "xfs_fsops.h"
33 #include "xfs_alloc.h"
34 #include "xfs_rtalloc.h"
36 #include "xfs_ioctl.h"
37 #include "xfs_ioctl32.h"
38 #include "xfs_trace.h"
40 #define _NATIVE_IOC(cmd, type) \
41 _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type))
43 #ifdef BROKEN_X86_ALIGNMENT
45 xfs_compat_flock64_copyin(
47 compat_xfs_flock64_t __user *arg32)
49 if (get_user(bf->l_type, &arg32->l_type) ||
50 get_user(bf->l_whence, &arg32->l_whence) ||
51 get_user(bf->l_start, &arg32->l_start) ||
52 get_user(bf->l_len, &arg32->l_len) ||
53 get_user(bf->l_sysid, &arg32->l_sysid) ||
54 get_user(bf->l_pid, &arg32->l_pid) ||
55 copy_from_user(bf->l_pad, &arg32->l_pad, 4*sizeof(u32)))
61 xfs_compat_ioc_fsgeometry_v1(
63 compat_xfs_fsop_geom_v1_t __user *arg32)
65 xfs_fsop_geom_t fsgeo;
68 error = xfs_fs_geometry(mp, &fsgeo, 3);
71 /* The 32-bit variant simply has some padding at the end */
72 if (copy_to_user(arg32, &fsgeo, sizeof(struct compat_xfs_fsop_geom_v1)))
78 xfs_compat_growfs_data_copyin(
79 struct xfs_growfs_data *in,
80 compat_xfs_growfs_data_t __user *arg32)
82 if (get_user(in->newblocks, &arg32->newblocks) ||
83 get_user(in->imaxpct, &arg32->imaxpct))
89 xfs_compat_growfs_rt_copyin(
90 struct xfs_growfs_rt *in,
91 compat_xfs_growfs_rt_t __user *arg32)
93 if (get_user(in->newblocks, &arg32->newblocks) ||
94 get_user(in->extsize, &arg32->extsize))
100 xfs_inumbers_fmt_compat(
101 void __user *ubuffer,
102 const struct xfs_inogrp *buffer,
106 compat_xfs_inogrp_t __user *p32 = ubuffer;
109 for (i = 0; i < count; i++) {
110 if (put_user(buffer[i].xi_startino, &p32[i].xi_startino) ||
111 put_user(buffer[i].xi_alloccount, &p32[i].xi_alloccount) ||
112 put_user(buffer[i].xi_allocmask, &p32[i].xi_allocmask))
115 *written = count * sizeof(*p32);
120 #define xfs_inumbers_fmt_compat xfs_inumbers_fmt
121 #endif /* BROKEN_X86_ALIGNMENT */
124 xfs_ioctl32_bstime_copyin(
125 xfs_bstime_t *bstime,
126 compat_xfs_bstime_t __user *bstime32)
128 compat_time_t sec32; /* tv_sec differs on 64 vs. 32 */
130 if (get_user(sec32, &bstime32->tv_sec) ||
131 get_user(bstime->tv_nsec, &bstime32->tv_nsec))
133 bstime->tv_sec = sec32;
137 /* xfs_bstat_t has differing alignment on intel, & bstime_t sizes everywhere */
139 xfs_ioctl32_bstat_copyin(
141 compat_xfs_bstat_t __user *bstat32)
143 if (get_user(bstat->bs_ino, &bstat32->bs_ino) ||
144 get_user(bstat->bs_mode, &bstat32->bs_mode) ||
145 get_user(bstat->bs_nlink, &bstat32->bs_nlink) ||
146 get_user(bstat->bs_uid, &bstat32->bs_uid) ||
147 get_user(bstat->bs_gid, &bstat32->bs_gid) ||
148 get_user(bstat->bs_rdev, &bstat32->bs_rdev) ||
149 get_user(bstat->bs_blksize, &bstat32->bs_blksize) ||
150 get_user(bstat->bs_size, &bstat32->bs_size) ||
151 xfs_ioctl32_bstime_copyin(&bstat->bs_atime, &bstat32->bs_atime) ||
152 xfs_ioctl32_bstime_copyin(&bstat->bs_mtime, &bstat32->bs_mtime) ||
153 xfs_ioctl32_bstime_copyin(&bstat->bs_ctime, &bstat32->bs_ctime) ||
154 get_user(bstat->bs_blocks, &bstat32->bs_size) ||
155 get_user(bstat->bs_xflags, &bstat32->bs_size) ||
156 get_user(bstat->bs_extsize, &bstat32->bs_extsize) ||
157 get_user(bstat->bs_extents, &bstat32->bs_extents) ||
158 get_user(bstat->bs_gen, &bstat32->bs_gen) ||
159 get_user(bstat->bs_projid_lo, &bstat32->bs_projid_lo) ||
160 get_user(bstat->bs_projid_hi, &bstat32->bs_projid_hi) ||
161 get_user(bstat->bs_forkoff, &bstat32->bs_forkoff) ||
162 get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) ||
163 get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) ||
164 get_user(bstat->bs_aextents, &bstat32->bs_aextents))
169 /* XFS_IOC_FSBULKSTAT and friends */
172 xfs_bstime_store_compat(
173 compat_xfs_bstime_t __user *p32,
174 const xfs_bstime_t *p)
179 if (put_user(sec32, &p32->tv_sec) ||
180 put_user(p->tv_nsec, &p32->tv_nsec))
185 /* Return 0 on success or positive error (to xfs_bulkstat()) */
187 xfs_bulkstat_one_fmt_compat(
188 void __user *ubuffer,
191 const xfs_bstat_t *buffer)
193 compat_xfs_bstat_t __user *p32 = ubuffer;
195 if (ubsize < sizeof(*p32))
198 if (put_user(buffer->bs_ino, &p32->bs_ino) ||
199 put_user(buffer->bs_mode, &p32->bs_mode) ||
200 put_user(buffer->bs_nlink, &p32->bs_nlink) ||
201 put_user(buffer->bs_uid, &p32->bs_uid) ||
202 put_user(buffer->bs_gid, &p32->bs_gid) ||
203 put_user(buffer->bs_rdev, &p32->bs_rdev) ||
204 put_user(buffer->bs_blksize, &p32->bs_blksize) ||
205 put_user(buffer->bs_size, &p32->bs_size) ||
206 xfs_bstime_store_compat(&p32->bs_atime, &buffer->bs_atime) ||
207 xfs_bstime_store_compat(&p32->bs_mtime, &buffer->bs_mtime) ||
208 xfs_bstime_store_compat(&p32->bs_ctime, &buffer->bs_ctime) ||
209 put_user(buffer->bs_blocks, &p32->bs_blocks) ||
210 put_user(buffer->bs_xflags, &p32->bs_xflags) ||
211 put_user(buffer->bs_extsize, &p32->bs_extsize) ||
212 put_user(buffer->bs_extents, &p32->bs_extents) ||
213 put_user(buffer->bs_gen, &p32->bs_gen) ||
214 put_user(buffer->bs_projid, &p32->bs_projid) ||
215 put_user(buffer->bs_projid_hi, &p32->bs_projid_hi) ||
216 put_user(buffer->bs_forkoff, &p32->bs_forkoff) ||
217 put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) ||
218 put_user(buffer->bs_dmstate, &p32->bs_dmstate) ||
219 put_user(buffer->bs_aextents, &p32->bs_aextents))
222 *ubused = sizeof(*p32);
227 xfs_bulkstat_one_compat(
228 xfs_mount_t *mp, /* mount point for filesystem */
229 xfs_ino_t ino, /* inode number to get data for */
230 void __user *buffer, /* buffer to place output in */
231 int ubsize, /* size of buffer */
232 int *ubused, /* bytes used by me */
233 int *stat) /* BULKSTAT_RV_... */
235 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
236 xfs_bulkstat_one_fmt_compat,
240 /* copied from xfs_ioctl.c */
242 xfs_compat_ioc_bulkstat(
245 compat_xfs_fsop_bulkreq_t __user *p32)
248 xfs_fsop_bulkreq_t bulkreq;
249 int count; /* # of records returned */
250 xfs_ino_t inlast; /* last inode number */
254 /* done = 1 if there are more stats to get and if bulkstat */
255 /* should be called again (unused here, but used in dmapi) */
257 if (!capable(CAP_SYS_ADMIN))
260 if (XFS_FORCED_SHUTDOWN(mp))
263 if (get_user(addr, &p32->lastip))
265 bulkreq.lastip = compat_ptr(addr);
266 if (get_user(bulkreq.icount, &p32->icount) ||
267 get_user(addr, &p32->ubuffer))
269 bulkreq.ubuffer = compat_ptr(addr);
270 if (get_user(addr, &p32->ocount))
272 bulkreq.ocount = compat_ptr(addr);
274 if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64)))
277 if ((count = bulkreq.icount) <= 0)
280 if (bulkreq.ubuffer == NULL)
283 if (cmd == XFS_IOC_FSINUMBERS_32) {
284 error = xfs_inumbers(mp, &inlast, &count,
285 bulkreq.ubuffer, xfs_inumbers_fmt_compat);
286 } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE_32) {
289 error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer,
290 sizeof(compat_xfs_bstat_t), NULL, &res);
291 } else if (cmd == XFS_IOC_FSBULKSTAT_32) {
292 error = xfs_bulkstat(mp, &inlast, &count,
293 xfs_bulkstat_one_compat, sizeof(compat_xfs_bstat_t),
294 bulkreq.ubuffer, &done);
300 if (bulkreq.ocount != NULL) {
301 if (copy_to_user(bulkreq.lastip, &inlast,
305 if (copy_to_user(bulkreq.ocount, &count, sizeof(count)))
313 xfs_compat_handlereq_copyin(
314 xfs_fsop_handlereq_t *hreq,
315 compat_xfs_fsop_handlereq_t __user *arg32)
317 compat_xfs_fsop_handlereq_t hreq32;
319 if (copy_from_user(&hreq32, arg32, sizeof(compat_xfs_fsop_handlereq_t)))
322 hreq->fd = hreq32.fd;
323 hreq->path = compat_ptr(hreq32.path);
324 hreq->oflags = hreq32.oflags;
325 hreq->ihandle = compat_ptr(hreq32.ihandle);
326 hreq->ihandlen = hreq32.ihandlen;
327 hreq->ohandle = compat_ptr(hreq32.ohandle);
328 hreq->ohandlen = compat_ptr(hreq32.ohandlen);
333 STATIC struct dentry *
334 xfs_compat_handlereq_to_dentry(
335 struct file *parfilp,
336 compat_xfs_fsop_handlereq_t *hreq)
338 return xfs_handle_to_dentry(parfilp,
339 compat_ptr(hreq->ihandle), hreq->ihandlen);
343 xfs_compat_attrlist_by_handle(
344 struct file *parfilp,
348 attrlist_cursor_kern_t *cursor;
349 compat_xfs_fsop_attrlist_handlereq_t __user *p = arg;
350 compat_xfs_fsop_attrlist_handlereq_t al_hreq;
351 struct dentry *dentry;
354 if (!capable(CAP_SYS_ADMIN))
356 if (copy_from_user(&al_hreq, arg,
357 sizeof(compat_xfs_fsop_attrlist_handlereq_t)))
359 if (al_hreq.buflen < sizeof(struct attrlist) ||
360 al_hreq.buflen > XFS_XATTR_LIST_MAX)
364 * Reject flags, only allow namespaces.
366 if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE))
369 dentry = xfs_compat_handlereq_to_dentry(parfilp, &al_hreq.hreq);
371 return PTR_ERR(dentry);
374 kbuf = kmem_zalloc_large(al_hreq.buflen, KM_SLEEP);
378 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
379 error = xfs_attr_list(XFS_I(d_inode(dentry)), kbuf, al_hreq.buflen,
380 al_hreq.flags, cursor);
384 if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) {
389 if (copy_to_user(compat_ptr(al_hreq.buffer), kbuf, al_hreq.buflen))
400 xfs_compat_attrmulti_by_handle(
401 struct file *parfilp,
405 compat_xfs_attr_multiop_t *ops;
406 compat_xfs_fsop_attrmulti_handlereq_t am_hreq;
407 struct dentry *dentry;
408 unsigned int i, size;
409 unsigned char *attr_name;
411 if (!capable(CAP_SYS_ADMIN))
413 if (copy_from_user(&am_hreq, arg,
414 sizeof(compat_xfs_fsop_attrmulti_handlereq_t)))
418 if (am_hreq.opcount >= INT_MAX / sizeof(compat_xfs_attr_multiop_t))
421 dentry = xfs_compat_handlereq_to_dentry(parfilp, &am_hreq.hreq);
423 return PTR_ERR(dentry);
426 size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t);
427 if (!size || size > 16 * PAGE_SIZE)
430 ops = memdup_user(compat_ptr(am_hreq.ops), size);
432 error = PTR_ERR(ops);
437 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
442 for (i = 0; i < am_hreq.opcount; i++) {
443 ops[i].am_error = strncpy_from_user((char *)attr_name,
444 compat_ptr(ops[i].am_attrname),
446 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
448 if (ops[i].am_error < 0)
451 switch (ops[i].am_opcode) {
453 ops[i].am_error = xfs_attrmulti_attr_get(
454 d_inode(dentry), attr_name,
455 compat_ptr(ops[i].am_attrvalue),
456 &ops[i].am_length, ops[i].am_flags);
459 ops[i].am_error = mnt_want_write_file(parfilp);
462 ops[i].am_error = xfs_attrmulti_attr_set(
463 d_inode(dentry), attr_name,
464 compat_ptr(ops[i].am_attrvalue),
465 ops[i].am_length, ops[i].am_flags);
466 mnt_drop_write_file(parfilp);
469 ops[i].am_error = mnt_want_write_file(parfilp);
472 ops[i].am_error = xfs_attrmulti_attr_remove(
473 d_inode(dentry), attr_name,
475 mnt_drop_write_file(parfilp);
478 ops[i].am_error = -EINVAL;
482 if (copy_to_user(compat_ptr(am_hreq.ops), ops, size))
494 xfs_compat_fssetdm_by_handle(
495 struct file *parfilp,
499 struct fsdmidata fsd;
500 compat_xfs_fsop_setdm_handlereq_t dmhreq;
501 struct dentry *dentry;
503 if (!capable(CAP_MKNOD))
505 if (copy_from_user(&dmhreq, arg,
506 sizeof(compat_xfs_fsop_setdm_handlereq_t)))
509 dentry = xfs_compat_handlereq_to_dentry(parfilp, &dmhreq.hreq);
511 return PTR_ERR(dentry);
513 if (IS_IMMUTABLE(d_inode(dentry)) || IS_APPEND(d_inode(dentry))) {
518 if (copy_from_user(&fsd, compat_ptr(dmhreq.data), sizeof(fsd))) {
523 error = xfs_set_dmattrs(XFS_I(d_inode(dentry)), fsd.fsd_dmevmask,
532 xfs_file_compat_ioctl(
537 struct inode *inode = file_inode(filp);
538 struct xfs_inode *ip = XFS_I(inode);
539 struct xfs_mount *mp = ip->i_mount;
540 void __user *arg = (void __user *)p;
544 if (filp->f_mode & FMODE_NOCMTIME)
545 ioflags |= XFS_IO_INVIS;
547 trace_xfs_file_compat_ioctl(ip);
550 /* No size or alignment issues on any arch */
551 case XFS_IOC_DIOINFO:
552 case XFS_IOC_FSGEOMETRY:
553 case XFS_IOC_FSGETXATTR:
554 case XFS_IOC_FSSETXATTR:
555 case XFS_IOC_FSGETXATTRA:
556 case XFS_IOC_FSSETDM:
557 case XFS_IOC_GETBMAP:
558 case XFS_IOC_GETBMAPA:
559 case XFS_IOC_GETBMAPX:
560 case XFS_IOC_FSCOUNTS:
561 case XFS_IOC_SET_RESBLKS:
562 case XFS_IOC_GET_RESBLKS:
563 case XFS_IOC_FSGROWFSLOG:
564 case XFS_IOC_GOINGDOWN:
565 case XFS_IOC_ERROR_INJECTION:
566 case XFS_IOC_ERROR_CLEARALL:
567 return xfs_file_ioctl(filp, cmd, p);
568 #ifndef BROKEN_X86_ALIGNMENT
569 /* These are handled fine if no alignment issues */
570 case XFS_IOC_ALLOCSP:
573 case XFS_IOC_UNRESVSP:
574 case XFS_IOC_ALLOCSP64:
575 case XFS_IOC_FREESP64:
576 case XFS_IOC_RESVSP64:
577 case XFS_IOC_UNRESVSP64:
578 case XFS_IOC_FSGEOMETRY_V1:
579 case XFS_IOC_FSGROWFSDATA:
580 case XFS_IOC_FSGROWFSRT:
581 case XFS_IOC_ZERO_RANGE:
582 return xfs_file_ioctl(filp, cmd, p);
584 case XFS_IOC_ALLOCSP_32:
585 case XFS_IOC_FREESP_32:
586 case XFS_IOC_ALLOCSP64_32:
587 case XFS_IOC_FREESP64_32:
588 case XFS_IOC_RESVSP_32:
589 case XFS_IOC_UNRESVSP_32:
590 case XFS_IOC_RESVSP64_32:
591 case XFS_IOC_UNRESVSP64_32:
592 case XFS_IOC_ZERO_RANGE_32: {
593 struct xfs_flock64 bf;
595 if (xfs_compat_flock64_copyin(&bf, arg))
597 cmd = _NATIVE_IOC(cmd, struct xfs_flock64);
598 return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf);
600 case XFS_IOC_FSGEOMETRY_V1_32:
601 return xfs_compat_ioc_fsgeometry_v1(mp, arg);
602 case XFS_IOC_FSGROWFSDATA_32: {
603 struct xfs_growfs_data in;
605 if (xfs_compat_growfs_data_copyin(&in, arg))
607 error = mnt_want_write_file(filp);
610 error = xfs_growfs_data(mp, &in);
611 mnt_drop_write_file(filp);
614 case XFS_IOC_FSGROWFSRT_32: {
615 struct xfs_growfs_rt in;
617 if (xfs_compat_growfs_rt_copyin(&in, arg))
619 error = mnt_want_write_file(filp);
622 error = xfs_growfs_rt(mp, &in);
623 mnt_drop_write_file(filp);
627 /* long changes size, but xfs only copiese out 32 bits */
628 case XFS_IOC_GETXFLAGS_32:
629 case XFS_IOC_SETXFLAGS_32:
630 case XFS_IOC_GETVERSION_32:
631 cmd = _NATIVE_IOC(cmd, long);
632 return xfs_file_ioctl(filp, cmd, p);
633 case XFS_IOC_SWAPEXT_32: {
634 struct xfs_swapext sxp;
635 struct compat_xfs_swapext __user *sxu = arg;
637 /* Bulk copy in up to the sx_stat field, then copy bstat */
638 if (copy_from_user(&sxp, sxu,
639 offsetof(struct xfs_swapext, sx_stat)) ||
640 xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat))
642 error = mnt_want_write_file(filp);
645 error = xfs_ioc_swapext(&sxp);
646 mnt_drop_write_file(filp);
649 case XFS_IOC_FSBULKSTAT_32:
650 case XFS_IOC_FSBULKSTAT_SINGLE_32:
651 case XFS_IOC_FSINUMBERS_32:
652 return xfs_compat_ioc_bulkstat(mp, cmd, arg);
653 case XFS_IOC_FD_TO_HANDLE_32:
654 case XFS_IOC_PATH_TO_HANDLE_32:
655 case XFS_IOC_PATH_TO_FSHANDLE_32: {
656 struct xfs_fsop_handlereq hreq;
658 if (xfs_compat_handlereq_copyin(&hreq, arg))
660 cmd = _NATIVE_IOC(cmd, struct xfs_fsop_handlereq);
661 return xfs_find_handle(cmd, &hreq);
663 case XFS_IOC_OPEN_BY_HANDLE_32: {
664 struct xfs_fsop_handlereq hreq;
666 if (xfs_compat_handlereq_copyin(&hreq, arg))
668 return xfs_open_by_handle(filp, &hreq);
670 case XFS_IOC_READLINK_BY_HANDLE_32: {
671 struct xfs_fsop_handlereq hreq;
673 if (xfs_compat_handlereq_copyin(&hreq, arg))
675 return xfs_readlink_by_handle(filp, &hreq);
677 case XFS_IOC_ATTRLIST_BY_HANDLE_32:
678 return xfs_compat_attrlist_by_handle(filp, arg);
679 case XFS_IOC_ATTRMULTI_BY_HANDLE_32:
680 return xfs_compat_attrmulti_by_handle(filp, arg);
681 case XFS_IOC_FSSETDM_BY_HANDLE_32:
682 return xfs_compat_fssetdm_by_handle(filp, arg);