Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 | /* * linux/fs/file_table.c * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) */ #include <linux/string.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/init.h> #include <linux/module.h> #include <linux/smp_lock.h> #include <linux/fs.h> #include <linux/security.h> #include <linux/eventpoll.h> #include <linux/mount.h> #include <linux/cdev.h> /* sysctl tunables... */ struct files_stat_struct files_stat = { .max_files = NR_FILE }; /* public *and* exported. Not pretty! */ spinlock_t __cacheline_aligned_in_smp files_lock = SPIN_LOCK_UNLOCKED; static spinlock_t filp_count_lock = SPIN_LOCK_UNLOCKED; /* slab constructors and destructors are called from arbitrary * context and must be fully threaded - use a local spinlock * to protect files_stat.nr_files */ void filp_ctor(void * objp, struct kmem_cache_s *cachep, unsigned long cflags) { if ((cflags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR) { unsigned long flags; spin_lock_irqsave(&filp_count_lock, flags); files_stat.nr_files++; spin_unlock_irqrestore(&filp_count_lock, flags); } } void filp_dtor(void * objp, struct kmem_cache_s *cachep, unsigned long dflags) { unsigned long flags; spin_lock_irqsave(&filp_count_lock, flags); files_stat.nr_files--; spin_unlock_irqrestore(&filp_count_lock, flags); } static inline void file_free(struct file *f) { kmem_cache_free(filp_cachep, f); } /* Find an unused file structure and return a pointer to it. * Returns NULL, if there are no more free file structures or * we run out of memory. */ struct file *get_empty_filp(void) { static int old_max = 0; struct file * f; /* * Privileged users can go above max_files */ if (files_stat.nr_files < files_stat.max_files || capable(CAP_SYS_ADMIN)) { f = kmem_cache_alloc(filp_cachep, GFP_KERNEL); if (f) { memset(f, 0, sizeof(*f)); if (security_file_alloc(f)) { file_free(f); goto fail; } eventpoll_init_file(f); atomic_set(&f->f_count, 1); f->f_uid = current->fsuid; f->f_gid = current->fsgid; f->f_owner.lock = RW_LOCK_UNLOCKED; /* f->f_version: 0 */ INIT_LIST_HEAD(&f->f_list); return f; } } /* Ran out of filps - report that */ if (files_stat.max_files >= old_max) { printk(KERN_INFO "VFS: file-max limit %d reached\n", files_stat.max_files); old_max = files_stat.max_files; } else { /* Big problems... */ printk(KERN_WARNING "VFS: filp allocation failed\n"); } fail: return NULL; } /* * Clear and initialize a (private) struct file for the given dentry, * allocate the security structure, and call the open function (if any). * The file should be released using close_private_file. */ int open_private_file(struct file *filp, struct dentry *dentry, int flags) { int error; memset(filp, 0, sizeof(*filp)); eventpoll_init_file(filp); filp->f_flags = flags; filp->f_mode = (flags+1) & O_ACCMODE; atomic_set(&filp->f_count, 1); filp->f_dentry = dentry; filp->f_uid = current->fsuid; filp->f_gid = current->fsgid; filp->f_op = dentry->d_inode->i_fop; INIT_LIST_HEAD(&filp->f_list); error = security_file_alloc(filp); if (!error) if (filp->f_op && filp->f_op->open) { error = filp->f_op->open(dentry->d_inode, filp); if (error) security_file_free(filp); } return error; } /* * Release a private file by calling the release function (if any) and * freeing the security structure. */ void close_private_file(struct file *file) { struct inode * inode = file->f_dentry->d_inode; if (file->f_op && file->f_op->release) file->f_op->release(inode, file); security_file_free(file); } void fput(struct file *file) { if (atomic_dec_and_test(&file->f_count)) __fput(file); } /* __fput is called from task context when aio completion releases the last * last use of a struct file *. Do not use otherwise. */ void __fput(struct file *file) { struct dentry *dentry = file->f_dentry; struct vfsmount *mnt = file->f_vfsmnt; struct inode *inode = dentry->d_inode; /* * The function eventpoll_release() should be the first called * in the file cleanup chain. */ eventpoll_release(file); locks_remove_flock(file); if (file->f_op && file->f_op->release) file->f_op->release(inode, file); security_file_free(file); if (unlikely(inode->i_cdev != NULL)) cdev_put(inode->i_cdev); fops_put(file->f_op); if (file->f_mode & FMODE_WRITE) put_write_access(inode); file->f_dentry = NULL; file->f_vfsmnt = NULL; file_kill(file); file_free(file); dput(dentry); mntput(mnt); } struct file *fget(unsigned int fd) { struct file *file; struct files_struct *files = current->files; spin_lock(&files->file_lock); file = fcheck(fd); if (file) get_file(file); spin_unlock(&files->file_lock); return file; } /* * Lightweight file lookup - no refcnt increment if fd table isn't shared. * You can use this only if it is guranteed that the current task already * holds a refcnt to that file. That check has to be done at fget() only * and a flag is returned to be passed to the corresponding fput_light(). * There must not be a cloning between an fget_light/fput_light pair. */ struct file *fget_light(unsigned int fd, int *fput_needed) { struct file *file; struct files_struct *files = current->files; *fput_needed = 0; if (likely((atomic_read(&files->count) == 1))) { file = fcheck(fd); } else { spin_lock(&files->file_lock); file = fcheck(fd); if (file) { get_file(file); *fput_needed = 1; } spin_unlock(&files->file_lock); } return file; } void put_filp(struct file *file) { if (atomic_dec_and_test(&file->f_count)) { security_file_free(file); file_kill(file); file_free(file); } } void file_move(struct file *file, struct list_head *list) { if (!list) return; file_list_lock(); list_move(&file->f_list, list); file_list_unlock(); } void file_kill(struct file *file) { if (!list_empty(&file->f_list)) { file_list_lock(); list_del_init(&file->f_list); file_list_unlock(); } } int fs_may_remount_ro(struct super_block *sb) { struct list_head *p; /* Check that no files are currently opened for writing. */ file_list_lock(); list_for_each(p, &sb->s_files) { struct file *file = list_entry(p, struct file, f_list); struct inode *inode = file->f_dentry->d_inode; /* File with pending delete? */ if (inode->i_nlink == 0) goto too_bad; /* Writeable file? */ if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE)) goto too_bad; } file_list_unlock(); return 1; /* Tis' cool bro. */ too_bad: file_list_unlock(); return 0; } void __init files_init(unsigned long mempages) { int n; /* One file with associated inode and dcache is very roughly 1K. * Per default don't use more than 10% of our memory for files. */ n = (mempages * (PAGE_SIZE / 1024)) / 10; files_stat.max_files = n; if (files_stat.max_files < NR_FILE) files_stat.max_files = NR_FILE; } |