/* * linux/fs/fcntl.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/init.h> #include <linux/mm.h> #include <linux/file.h> #include <linux/dnotify.h> #include <linux/smp_lock.h> #include <linux/slab.h> #include <asm/poll.h> #include <asm/siginfo.h> #include <asm/uaccess.h> extern int sock_fcntl (struct file *, unsigned int cmd, unsigned long arg); extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg); extern int fcntl_getlease(struct file *filp); /* Expand files. Return <0 on error; 0 nothing done; 1 files expanded, * we may have blocked. * * Should be called with the files->file_lock spinlock held for write. */ 27 static int expand_files(struct files_struct *files, int nr) { int err, expand = 0; #ifdef FDSET_DEBUG printk (KERN_ERR __FUNCTION__ " %d: nr = %d\n", current->pid, nr); #endif 34 if (nr >= files->max_fdset) { expand = 1; 36 if ((err = expand_fdset(files, nr))) 37 goto out; } 39 if (nr >= files->max_fds) { expand = 1; 41 if ((err = expand_fd_array(files, nr))) 42 goto out; } err = expand; out: #ifdef FDSET_DEBUG if (err) printk (KERN_ERR __FUNCTION__ " %d: return %d\n", current->pid, err); #endif 50 return err; } /* * locate_fd finds a free file descriptor in the open_fds fdset, * expanding the fd arrays if necessary. The files write lock will be * held on exit to ensure that the fd can be entered atomically. */ 59 static int locate_fd(struct files_struct *files, struct file *file, int orig_start) { unsigned int newfd; int error; int start; write_lock(&files->file_lock); repeat: /* * Someone might have closed fd's in the range * orig_start..files->next_fd */ start = orig_start; 74 if (start < files->next_fd) start = files->next_fd; newfd = start; 78 if (start < files->max_fdset) { newfd = find_next_zero_bit(files->open_fds->fds_bits, files->max_fdset, start); } error = -EMFILE; 84 if (newfd >= current->rlim[RLIMIT_NOFILE].rlim_cur) 85 goto out; error = expand_files(files, newfd); 88 if (error < 0) 89 goto out; /* * If we needed to expand the fs array we * might have blocked - try again. */ 95 if (error) 96 goto repeat; 98 if (start <= files->next_fd) files->next_fd = newfd + 1; error = newfd; out: 104 return error; } 107 static inline void allocate_fd(struct files_struct *files, struct file *file, int fd) { FD_SET(fd, files->open_fds); FD_CLR(fd, files->close_on_exec); 112 write_unlock(&files->file_lock); fd_install(fd, file); } 116 static int dupfd(struct file *file, int start) { struct files_struct * files = current->files; int ret; ret = locate_fd(files, file, start); 122 if (ret < 0) 123 goto out_putf; allocate_fd(files, file, ret); 125 return ret; out_putf: 128 write_unlock(&files->file_lock); fput(file); 130 return ret; } 133 asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd) { int err = -EBADF; struct file * file, *tofree; struct files_struct * files = current->files; write_lock(&files->file_lock); 140 if (!(file = fcheck(oldfd))) 141 goto out_unlock; err = newfd; 143 if (newfd == oldfd) 144 goto out_unlock; err = -EBADF; 146 if (newfd >= current->rlim[RLIMIT_NOFILE].rlim_cur) 147 goto out_unlock; get_file(file); /* We are now finished with oldfd */ err = expand_files(files, newfd); 151 if (err < 0) 152 goto out_fput; /* To avoid races with open() and dup(), we will mark the fd as * in-use in the open-file bitmap throughout the entire dup2() * process. This is quite safe: do_close() uses the fd array * entry, not the bitmap, to decide what work needs to be * done. --sct */ /* Doesn't work. open() might be there first. --AV */ /* Yes. It's a race. In user space. Nothing sane to do */ err = -EBUSY; tofree = files->fd[newfd]; 164 if (!tofree && FD_ISSET(newfd, files->open_fds)) 165 goto out_fput; files->fd[newfd] = file; FD_SET(newfd, files->open_fds); FD_CLR(newfd, files->close_on_exec); 170 write_unlock(&files->file_lock); 172 if (tofree) filp_close(tofree, files); err = newfd; out: 176 return err; out_unlock: 178 write_unlock(&files->file_lock); 179 goto out; out_fput: 182 write_unlock(&files->file_lock); fput(file); 184 goto out; } 187 asmlinkage long sys_dup(unsigned int fildes) { int ret = -EBADF; struct file * file = fget(fildes); 192 if (file) ret = dupfd(file, 0); 194 return ret; } #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC) 199 static int setfl(int fd, struct file * filp, unsigned long arg) { struct inode * inode = filp->f_dentry->d_inode; int error; /* * In the case of an append-only file, O_APPEND * cannot be cleared */ 208 if (!(arg & O_APPEND) && IS_APPEND(inode)) 209 return -EPERM; /* Did FASYNC state change? */ 212 if ((arg ^ filp->f_flags) & FASYNC) { 213 if (filp->f_op && filp->f_op->fasync) { error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0); 215 if (error < 0) 216 return error; } } /* required for strict SunOS emulation */ 221 if (O_NONBLOCK != O_NDELAY) 222 if (arg & O_NDELAY) arg |= O_NONBLOCK; filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK); 226 return 0; } 229 static long do_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg, struct file * filp) { long err = -EINVAL; 234 switch (cmd) { 235 case F_DUPFD: 236 if (arg < NR_OPEN) { get_file(filp); err = dupfd(filp, arg); } 240 break; 241 case F_GETFD: err = get_close_on_exec(fd); 243 break; 244 case F_SETFD: err = 0; set_close_on_exec(fd, arg&1); 247 break; 248 case F_GETFL: err = filp->f_flags; 250 break; 251 case F_SETFL: 252 lock_kernel(); err = setfl(fd, filp, arg); 254 unlock_kernel(); 255 break; 256 case F_GETLK: err = fcntl_getlk(fd, (struct flock *) arg); 258 break; 259 case F_SETLK: 260 case F_SETLKW: err = fcntl_setlk(fd, cmd, (struct flock *) arg); 262 break; 263 case F_GETOWN: /* * XXX If f_owner is a process group, the * negative return value will get converted * into an error. Oops. If we keep the * current syscall conventions, the only way * to fix this will be in libc. */ err = filp->f_owner.pid; 272 break; 273 case F_SETOWN: 274 lock_kernel(); filp->f_owner.pid = arg; filp->f_owner.uid = current->uid; filp->f_owner.euid = current->euid; err = 0; 279 if (S_ISSOCK (filp->f_dentry->d_inode->i_mode)) err = sock_fcntl (filp, F_SETOWN, arg); 281 unlock_kernel(); 282 break; 283 case F_GETSIG: err = filp->f_owner.signum; 285 break; 286 case F_SETSIG: /* arg == 0 restores default behaviour. */ 288 if (arg < 0 || arg > _NSIG) { 289 break; } err = 0; filp->f_owner.signum = arg; 293 break; 294 case F_GETLEASE: err = fcntl_getlease(filp); 296 break; 297 case F_SETLEASE: err = fcntl_setlease(fd, filp, arg); 299 break; 300 case F_NOTIFY: err = fcntl_dirnotify(fd, filp, arg); 302 break; 303 default: /* sockets need a few special fcntls. */ err = -EINVAL; 306 if (S_ISSOCK (filp->f_dentry->d_inode->i_mode)) err = sock_fcntl (filp, cmd, arg); 308 break; } 311 return err; } 314 asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg) { struct file * filp; long err = -EBADF; filp = fget(fd); 320 if (!filp) 321 goto out; err = do_fcntl(fd, cmd, arg, filp); fput(filp); out: 327 return err; } #if BITS_PER_LONG == 32 331 asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg) { struct file * filp; long err; err = -EBADF; filp = fget(fd); 338 if (!filp) 339 goto out; 341 lock_kernel(); 342 switch (cmd) { 343 case F_GETLK64: err = fcntl_getlk64(fd, (struct flock64 *) arg); 345 break; 346 case F_SETLK64: err = fcntl_setlk64(fd, cmd, (struct flock64 *) arg); 348 break; 349 case F_SETLKW64: err = fcntl_setlk64(fd, cmd, (struct flock64 *) arg); 351 break; 352 default: err = do_fcntl(fd, cmd, arg, filp); 354 break; } 356 unlock_kernel(); fput(filp); out: 359 return err; } #endif /* Table to convert sigio signal codes into poll band bitmaps */ static long band_table[NSIGPOLL] = { POLLIN | POLLRDNORM, /* POLL_IN */ POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */ POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */ POLLERR, /* POLL_ERR */ POLLPRI | POLLRDBAND, /* POLL_PRI */ POLLHUP | POLLERR /* POLL_HUP */ }; 374 static void send_sigio_to_task(struct task_struct *p, struct fown_struct *fown, int fd, int reason) { if ((fown->euid != 0) && (fown->euid ^ p->suid) && (fown->euid ^ p->uid) && 381 (fown->uid ^ p->suid) && (fown->uid ^ p->uid)) 382 return; 383 switch (fown->signum) { siginfo_t si; 385 default: /* Queue a rt signal with the appropriate fd as its value. We use SI_SIGIO as the source, not SI_KERNEL, since kernel signals always get delivered even if we can't queue. Failure to queue in this case _should_ be reported; we fall back to SIGIO in that case. --sct */ si.si_signo = fown->signum; si.si_errno = 0; si.si_code = reason & ~__SI_MASK; /* Make sure we are called with one of the POLL_* reasons, otherwise we could leak kernel stack into userspace. */ 398 if ((reason & __SI_MASK) != __SI_POLL) 399 BUG(); 400 if (reason - POLL_IN >= NSIGPOLL) si.si_band = ~0L; 402 else si.si_band = band_table[reason - POLL_IN]; si.si_fd = fd; 405 if (!send_sig_info(fown->signum, &si, p)) 406 break; /* fall-through: fall back on the old plain SIGIO signal */ 408 case 0: send_sig(SIGIO, p, 1); } } 413 void send_sigio(struct fown_struct *fown, int fd, int band) { struct task_struct * p; int pid = fown->pid; read_lock(&tasklist_lock); 419 if ( (pid > 0) && (p = find_task_by_pid(pid)) ) { send_sigio_to_task(p, fown, fd, band); 421 goto out; } 423 for_each_task(p) { int match = p->pid; 425 if (pid < 0) match = -p->pgrp; 427 if (pid != match) 428 continue; send_sigio_to_task(p, fown, fd, band); } out: 432 read_unlock(&tasklist_lock); } static rwlock_t fasync_lock = RW_LOCK_UNLOCKED; static kmem_cache_t *fasync_cache; /* * fasync_helper() is used by some character device drivers (mainly mice) * to set up the fasync queue. It returns negative on error, 0 if it did * no changes and positive if it added/deleted the entry. */ 443 int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp) { struct fasync_struct *fa, **fp; struct fasync_struct *new = NULL; int result = 0; 449 if (on) { new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL); 451 if (!new) 452 return -ENOMEM; } 454 write_lock_irq(&fasync_lock); 455 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { 456 if (fa->fa_file == filp) { 457 if(on) { fa->fa_fd = fd; kmem_cache_free(fasync_cache, new); 460 } else { *fp = fa->fa_next; kmem_cache_free(fasync_cache, fa); result = 1; } 465 goto out; } } 469 if (on) { new->magic = FASYNC_MAGIC; new->fa_file = filp; new->fa_fd = fd; new->fa_next = *fapp; *fapp = new; result = 1; } out: 478 write_unlock_irq(&fasync_lock); 479 return result; } 482 void __kill_fasync(struct fasync_struct *fa, int sig, int band) { 484 while (fa) { struct fown_struct * fown; 486 if (fa->magic != FASYNC_MAGIC) { printk(KERN_ERR "kill_fasync: bad magic number in " "fasync_struct!\n"); 489 return; } fown = &fa->fa_file->f_owner; /* Don't send SIGURG to processes which have not set a queued signum: SIGURG has its own default signalling mechanism. */ 495 if (fown->pid && !(sig == SIGURG && fown->signum == 0)) send_sigio(fown, fa->fa_fd, band); fa = fa->fa_next; } } 501 void kill_fasync(struct fasync_struct **fp, int sig, int band) { read_lock(&fasync_lock); __kill_fasync(*fp, sig, band); 505 read_unlock(&fasync_lock); } 508 static int __init fasync_init(void) { fasync_cache = kmem_cache_create("fasync cache", sizeof(struct fasync_struct), 0, 0, NULL, NULL); 512 if (!fasync_cache) panic("cannot create fasync slab cache"); 514 return 0; } module_init(fasync_init)