/* * This file contains the procedures for the handling of select and poll * * Created for Linux based loosely upon Mathius Lattner's minix * patches by Peter MacDonald. Heavily edited by Linus. * * 4 February 1994 * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS * flag set in its personality we do *not* modify the given timeout * parameter to reflect time remaining. * * 24 January 2000 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian). */ #include <linux/malloc.h> #include <linux/smp_lock.h> #include <linux/poll.h> #include <linux/file.h> #include <asm/uaccess.h> #define ROUND_UP(x,y) (((x)+(y)-1)/(y)) #define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM) struct poll_table_entry { struct file * filp; wait_queue_t wait; wait_queue_head_t * wait_address; }; struct poll_table_page { struct poll_table_page * next; struct poll_table_entry * entry; struct poll_table_entry entries[0]; }; #define POLL_TABLE_FULL(table) \ ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table)) /* * Ok, Peter made a complicated, but straightforward multiple_wait() function. * I have rewritten this, taking some shortcuts: This code may not be easy to * follow, but it should be free of race-conditions, and it's practical. If you * understand what I'm doing here, then you understand how the linux * sleep/wakeup mechanism works. * * Two very simple procedures, poll_wait() and poll_freewait() make all the * work. poll_wait() is an inline-function defined in <linux/poll.h>, * as all select/poll functions have to call it to add an entry to the * poll table. */ 55 void poll_freewait(poll_table* pt) { struct poll_table_page * p = pt->table; 58 while (p) { struct poll_table_entry * entry; struct poll_table_page *old; entry = p->entry; 63 do { entry--; remove_wait_queue(entry->wait_address,&entry->wait); fput(entry->filp); 67 } while (entry > p->entries); old = p; p = p->next; free_page((unsigned long) old); } } 74 void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) { struct poll_table_page *table = p->table; 78 if (!table || POLL_TABLE_FULL(table)) { struct poll_table_page *new_table; new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL); 82 if (!new_table) { p->error = -ENOMEM; 84 __set_current_state(TASK_RUNNING); 85 return; } new_table->entry = new_table->entries; new_table->next = table; p->table = new_table; table = new_table; } /* Add a new entry */ { struct poll_table_entry * entry = table->entry; table->entry = entry+1; get_file(filp); entry->filp = filp; entry->wait_address = wait_address; init_waitqueue_entry(&entry->wait, current); add_wait_queue(wait_address,&entry->wait); } } #define __IN(fds, n) (fds->in + n) #define __OUT(fds, n) (fds->out + n) #define __EX(fds, n) (fds->ex + n) #define __RES_IN(fds, n) (fds->res_in + n) #define __RES_OUT(fds, n) (fds->res_out + n) #define __RES_EX(fds, n) (fds->res_ex + n) #define BITS(fds, n) (*__IN(fds, n)|*__OUT(fds, n)|*__EX(fds, n)) 114 static int max_select_fd(unsigned long n, fd_set_bits *fds) { unsigned long *open_fds; unsigned long set; int max; /* handle last in-complete long-word first */ set = ~(~0UL << (n & (__NFDBITS-1))); n /= __NFDBITS; open_fds = current->files->open_fds->fds_bits+n; max = 0; 125 if (set) { set &= BITS(fds, n); 127 if (set) { 128 if (!(set & ~*open_fds)) 129 goto get_max; 130 return -EBADF; } } 133 while (n) { open_fds--; n--; set = BITS(fds, n); 137 if (!set) 138 continue; 139 if (set & ~*open_fds) 140 return -EBADF; 141 if (max) 142 continue; get_max: 144 do { max++; set >>= 1; 147 } while (set); max += n * __NFDBITS; } 151 return max; } #define BIT(i) (1UL << ((i)&(__NFDBITS-1))) #define MEM(i,m) ((m)+(unsigned)(i)/__NFDBITS) #define ISSET(i,m) (((i)&*(m)) != 0) #define SET(i,m) (*(m) |= (i)) #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR) #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR) #define POLLEX_SET (POLLPRI) 163 int do_select(int n, fd_set_bits *fds, long *timeout) { poll_table table, *wait; int retval, i, off; long __timeout = *timeout; read_lock(¤t->files->file_lock); retval = max_select_fd(n, fds); 171 read_unlock(¤t->files->file_lock); 173 if (retval < 0) 174 return retval; n = retval; poll_initwait(&table); wait = &table; 179 if (!__timeout) wait = NULL; retval = 0; 182 for (;;) { 183 set_current_state(TASK_INTERRUPTIBLE); 184 for (i = 0 ; i < n; i++) { unsigned long bit = BIT(i); unsigned long mask; struct file *file; off = i / __NFDBITS; 190 if (!(bit & BITS(fds, off))) 191 continue; file = fget(i); mask = POLLNVAL; 194 if (file) { mask = DEFAULT_POLLMASK; 196 if (file->f_op && file->f_op->poll) mask = file->f_op->poll(file, wait); fput(file); } 200 if ((mask & POLLIN_SET) && ISSET(bit, __IN(fds,off))) { SET(bit, __RES_IN(fds,off)); retval++; wait = NULL; } 205 if ((mask & POLLOUT_SET) && ISSET(bit, __OUT(fds,off))) { SET(bit, __RES_OUT(fds,off)); retval++; wait = NULL; } 210 if ((mask & POLLEX_SET) && ISSET(bit, __EX(fds,off))) { SET(bit, __RES_EX(fds,off)); retval++; wait = NULL; } } wait = NULL; 217 if (retval || !__timeout || signal_pending(current)) 218 break; 219 if(table.error) { retval = table.error; 221 break; } __timeout = schedule_timeout(__timeout); } current->state = TASK_RUNNING; poll_freewait(&table); /* * Up-to-date the caller timeout. */ *timeout = __timeout; 233 return retval; } 236 static void *select_bits_alloc(int size) { 238 return kmalloc(6 * size, GFP_KERNEL); } 241 static void select_bits_free(void *bits, int size) { kfree(bits); } /* * We can actually return ERESTARTSYS instead of EINTR, but I'd * like to be certain this leads to no problems. So I return * EINTR just for safety. * * Update: ERESTARTSYS breaks at least the xview clock binary, so * I'm trying ERESTARTNOHAND which restart only when you want to. */ #define MAX_SELECT_SECONDS \ ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1) asmlinkage long 258 sys_select(int n, fd_set *inp, fd_set *outp, fd_set *exp, struct timeval *tvp) { fd_set_bits fds; char *bits; long timeout; int ret, size; timeout = MAX_SCHEDULE_TIMEOUT; 266 if (tvp) { time_t sec, usec; if ((ret = verify_area(VERIFY_READ, tvp, sizeof(*tvp))) || (ret = __get_user(sec, &tvp->tv_sec)) 271 || (ret = __get_user(usec, &tvp->tv_usec))) 272 goto out_nofds; ret = -EINVAL; 275 if (sec < 0 || usec < 0) 276 goto out_nofds; 278 if ((unsigned long) sec < MAX_SELECT_SECONDS) { timeout = ROUND_UP(usec, 1000000/HZ); timeout += sec * (unsigned long) HZ; } } ret = -EINVAL; 285 if (n < 0) 286 goto out_nofds; 288 if (n > current->files->max_fdset) n = current->files->max_fdset; /* * We need 6 bitmaps (in/out/ex for both incoming and outgoing), * since we used fdset we need to allocate memory in units of * long-words. */ ret = -ENOMEM; size = FDS_BYTES(n); bits = select_bits_alloc(size); 299 if (!bits) 300 goto out_nofds; fds.in = (unsigned long *) bits; fds.out = (unsigned long *) (bits + size); fds.ex = (unsigned long *) (bits + 2*size); fds.res_in = (unsigned long *) (bits + 3*size); fds.res_out = (unsigned long *) (bits + 4*size); fds.res_ex = (unsigned long *) (bits + 5*size); if ((ret = get_fd_set(n, inp, fds.in)) || (ret = get_fd_set(n, outp, fds.out)) || 310 (ret = get_fd_set(n, exp, fds.ex))) 311 goto out; zero_fd_set(n, fds.res_in); zero_fd_set(n, fds.res_out); zero_fd_set(n, fds.res_ex); ret = do_select(n, &fds, &timeout); 318 if (tvp && !(current->personality & STICKY_TIMEOUTS)) { time_t sec = 0, usec = 0; 320 if (timeout) { sec = timeout / HZ; usec = timeout % HZ; usec *= (1000000/HZ); } put_user(sec, &tvp->tv_sec); put_user(usec, &tvp->tv_usec); } 329 if (ret < 0) 330 goto out; 331 if (!ret) { ret = -ERESTARTNOHAND; 333 if (signal_pending(current)) 334 goto out; ret = 0; } set_fd_set(n, inp, fds.res_in); set_fd_set(n, outp, fds.res_out); set_fd_set(n, exp, fds.res_ex); out: select_bits_free(bits, size); out_nofds: 345 return ret; } #define POLLFD_PER_PAGE ((PAGE_SIZE) / sizeof(struct pollfd)) 350 static void do_pollfd(unsigned int num, struct pollfd * fdpage, poll_table ** pwait, int *count) { int i; 355 for (i = 0; i < num; i++) { int fd; unsigned int mask; struct pollfd *fdp; mask = 0; fdp = fdpage+i; fd = fdp->fd; 363 if (fd >= 0) { struct file * file = fget(fd); mask = POLLNVAL; 366 if (file != NULL) { mask = DEFAULT_POLLMASK; 368 if (file->f_op && file->f_op->poll) mask = file->f_op->poll(file, *pwait); mask &= fdp->events | POLLERR | POLLHUP; fput(file); } 373 if (mask) { *pwait = NULL; (*count)++; } } fdp->revents = mask; } } 382 static int do_poll(unsigned int nfds, unsigned int nchunks, unsigned int nleft, struct pollfd *fds[], poll_table *wait, long timeout) { int count; poll_table* pt = wait; 388 for (;;) { unsigned int i; 391 set_current_state(TASK_INTERRUPTIBLE); count = 0; 393 for (i=0; i < nchunks; i++) do_pollfd(POLLFD_PER_PAGE, fds[i], &pt, &count); 395 if (nleft) do_pollfd(nleft, fds[nchunks], &pt, &count); pt = NULL; 398 if (count || !timeout || signal_pending(current)) 399 break; count = wait->error; 401 if (count) 402 break; timeout = schedule_timeout(timeout); } current->state = TASK_RUNNING; 406 return count; } 409 asmlinkage long sys_poll(struct pollfd * ufds, unsigned int nfds, long timeout) { int i, j, fdcount, err; struct pollfd **fds; poll_table table, *wait; int nchunks, nleft; /* Do a sanity check on nfds ... */ 417 if (nfds > current->files->max_fds) 418 return -EINVAL; 420 if (timeout) { /* Careful about overflow in the intermediate values */ 422 if ((unsigned long) timeout < MAX_SCHEDULE_TIMEOUT / HZ) timeout = (unsigned long)(timeout*HZ+999)/1000+1; 424 else /* Negative or overflow */ timeout = MAX_SCHEDULE_TIMEOUT; } poll_initwait(&table); wait = &table; 430 if (!timeout) wait = NULL; err = -ENOMEM; fds = NULL; 435 if (nfds != 0) { fds = (struct pollfd **)kmalloc( (1 + (nfds - 1) / POLLFD_PER_PAGE) * sizeof(struct pollfd *), GFP_KERNEL); 439 if (fds == NULL) 440 goto out; } nchunks = 0; nleft = nfds; 445 while (nleft > POLLFD_PER_PAGE) { /* allocate complete PAGE_SIZE chunks */ fds[nchunks] = (struct pollfd *)__get_free_page(GFP_KERNEL); 447 if (fds[nchunks] == NULL) 448 goto out_fds; nchunks++; nleft -= POLLFD_PER_PAGE; } 452 if (nleft) { /* allocate last PAGE_SIZE chunk, only nleft elements used */ fds[nchunks] = (struct pollfd *)__get_free_page(GFP_KERNEL); 454 if (fds[nchunks] == NULL) 455 goto out_fds; } err = -EFAULT; 459 for (i=0; i < nchunks; i++) 460 if (copy_from_user(fds[i], ufds + i*POLLFD_PER_PAGE, PAGE_SIZE)) 461 goto out_fds1; 462 if (nleft) { if (copy_from_user(fds[nchunks], ufds + nchunks*POLLFD_PER_PAGE, 464 nleft * sizeof(struct pollfd))) 465 goto out_fds1; } fdcount = do_poll(nfds, nchunks, nleft, fds, wait, timeout); /* OK, now copy the revents fields back to user space. */ 471 for(i=0; i < nchunks; i++) 472 for (j=0; j < POLLFD_PER_PAGE; j++, ufds++) __put_user((fds[i] + j)->revents, &ufds->revents); 474 if (nleft) 475 for (j=0; j < nleft; j++, ufds++) __put_user((fds[nchunks] + j)->revents, &ufds->revents); err = fdcount; 479 if (!fdcount && signal_pending(current)) err = -EINTR; out_fds1: 483 if (nleft) free_page((unsigned long)(fds[nchunks])); out_fds: 486 for (i=0; i < nchunks; i++) free_page((unsigned long)(fds[i])); 488 if (nfds != 0) kfree(fds); out: poll_freewait(&table); 492 return err; }