Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 | /* kmod, the new module loader (replaces kerneld) Kirk Petersen Reorganized not to be a daemon by Adam Richter, with guidance from Greg Zornetzer. Modified to avoid chroot and file sharing problems. Mikael Pettersson Limit the concurrent number of kmod modprobes to catch loops from "modprobe needs a service that is in a module". Keith Owens <kaos@ocs.com.au> December 1999 Unblock all signals when we exec a usermode process. Shuu Yamaguchi <shuu@wondernetworkresources.com> December 2000 */ #define __KERNEL_SYSCALLS__ #include <linux/config.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/unistd.h> #include <linux/kmod.h> #include <linux/smp_lock.h> #include <linux/completion.h> #include <asm/uaccess.h> extern int max_threads; static inline void use_init_fs_context(void) { struct fs_struct *our_fs, *init_fs; struct dentry *root, *pwd; struct vfsmount *rootmnt, *pwdmnt; /* * Make modprobe's fs context be a copy of init's. * * We cannot use the user's fs context, because it * may have a different root than init. * Since init was created with CLONE_FS, we can grab * its fs context from "init_task". * * The fs context has to be a copy. If it is shared * with init, then any chdir() call in modprobe will * also affect init and the other threads sharing * init_task's fs context. * * We created the exec_modprobe thread without CLONE_FS, * so we can update the fields in our fs context freely. */ init_fs = init_task.fs; read_lock(&init_fs->lock); rootmnt = mntget(init_fs->rootmnt); root = dget(init_fs->root); pwdmnt = mntget(init_fs->pwdmnt); pwd = dget(init_fs->pwd); read_unlock(&init_fs->lock); /* FIXME - unsafe ->fs access */ our_fs = current->fs; our_fs->umask = init_fs->umask; set_fs_root(our_fs, rootmnt, root); set_fs_pwd(our_fs, pwdmnt, pwd); write_lock(&our_fs->lock); if (our_fs->altroot) { struct vfsmount *mnt = our_fs->altrootmnt; struct dentry *dentry = our_fs->altroot; our_fs->altrootmnt = NULL; our_fs->altroot = NULL; write_unlock(&our_fs->lock); dput(dentry); mntput(mnt); } else write_unlock(&our_fs->lock); dput(root); mntput(rootmnt); dput(pwd); mntput(pwdmnt); } int exec_usermodehelper(char *program_path, char *argv[], char *envp[]) { int i; struct task_struct *curtask = current; curtask->session = 1; curtask->pgrp = 1; use_init_fs_context(); /* Prevent parent user process from sending signals to child. Otherwise, if the modprobe program does not exist, it might be possible to get a user defined signal handler to execute as the super user right after the execve fails if you time the signal just right. */ spin_lock_irq(&curtask->sigmask_lock); sigemptyset(&curtask->blocked); flush_signals(curtask); flush_signal_handlers(curtask); recalc_sigpending(curtask); spin_unlock_irq(&curtask->sigmask_lock); for (i = 0; i < curtask->files->max_fds; i++ ) { if (curtask->files->fd[i]) close(i); } /* Drop the "current user" thing */ { struct user_struct *user = curtask->user; curtask->user = INIT_USER; atomic_inc(&INIT_USER->__count); atomic_inc(&INIT_USER->processes); atomic_dec(&user->processes); free_uid(user); } /* Give kmod all effective privileges.. */ curtask->euid = curtask->fsuid = 0; curtask->egid = curtask->fsgid = 0; cap_set_full(curtask->cap_effective); /* Allow execve args to be in kernel space. */ set_fs(KERNEL_DS); /* Go, go, go... */ if (execve(program_path, argv, envp) < 0) return -errno; return 0; } #ifdef CONFIG_KMOD /* modprobe_path is set via /proc/sys. */ char modprobe_path[256] = "/sbin/modprobe"; static int exec_modprobe(void * module_name) { static char * envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; char *argv[] = { modprobe_path, "-s", "-k", "--", (char*)module_name, NULL }; int ret; ret = exec_usermodehelper(modprobe_path, argv, envp); if (ret) { printk(KERN_ERR "kmod: failed to exec %s -s -k %s, errno = %d\n", modprobe_path, (char*) module_name, errno); } return ret; } /** * request_module - try to load a kernel module * @module_name: Name of module * * Load a module using the user mode module loader. The function returns * zero on success or a negative errno code on failure. Note that a * successful module load does not mean the module did not then unload * and exit on an error of its own. Callers must check that the service * they requested is now available not blindly invoke it. * * If module auto-loading support is disabled then this function * becomes a no-operation. */ int request_module(const char * module_name) { pid_t pid; int waitpid_result; sigset_t tmpsig; int i; static atomic_t kmod_concurrent = ATOMIC_INIT(0); #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */ static int kmod_loop_msg; /* Don't allow request_module() before the root fs is mounted! */ if ( ! current->fs->root ) { printk(KERN_ERR "request_module[%s]: Root fs not mounted\n", module_name); return -EPERM; } /* If modprobe needs a service that is in a module, we get a recursive * loop. Limit the number of running kmod threads to max_threads/2 or * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method * would be to run the parents of this process, counting how many times * kmod was invoked. That would mean accessing the internals of the * process tables to get the command line, proc_pid_cmdline is static * and it is not worth changing the proc code just to handle this case. * KAO. */ i = max_threads/2; if (i > MAX_KMOD_CONCURRENT) i = MAX_KMOD_CONCURRENT; atomic_inc(&kmod_concurrent); if (atomic_read(&kmod_concurrent) > i) { if (kmod_loop_msg++ < 5) printk(KERN_ERR "kmod: runaway modprobe loop assumed and stopped\n"); atomic_dec(&kmod_concurrent); return -ENOMEM; } pid = kernel_thread(exec_modprobe, (void*) module_name, 0); if (pid < 0) { printk(KERN_ERR "request_module[%s]: fork failed, errno %d\n", module_name, -pid); atomic_dec(&kmod_concurrent); return pid; } /* Block everything but SIGKILL/SIGSTOP */ spin_lock_irq(¤t->sigmask_lock); tmpsig = current->blocked; siginitsetinv(¤t->blocked, sigmask(SIGKILL) | sigmask(SIGSTOP)); recalc_sigpending(current); spin_unlock_irq(¤t->sigmask_lock); waitpid_result = waitpid(pid, NULL, __WCLONE); atomic_dec(&kmod_concurrent); /* Allow signals again.. */ spin_lock_irq(¤t->sigmask_lock); current->blocked = tmpsig; recalc_sigpending(current); spin_unlock_irq(¤t->sigmask_lock); if (waitpid_result != pid) { printk(KERN_ERR "request_module[%s]: waitpid(%d,...) failed, errno %d\n", module_name, pid, -waitpid_result); } return 0; } #endif /* CONFIG_KMOD */ #ifdef CONFIG_HOTPLUG /* hotplug path is set via /proc/sys invoked by hotplug-aware bus drivers, with exec_usermodehelper and some thread-spawner argv [0] = hotplug_path; argv [1] = "usb", "scsi", "pci", "network", etc; ... plus optional type-specific parameters argv [n] = 0; envp [*] = HOME, PATH; optional type-specific parameters a hotplug bus should invoke this for device add/remove events. the command is expected to load drivers when necessary, and may perform additional system setup. */ char hotplug_path[256] = "/sbin/hotplug"; EXPORT_SYMBOL(hotplug_path); #endif /* CONFIG_HOTPLUG */ struct subprocess_info { struct completion *complete; char *path; char **argv; char **envp; pid_t retval; }; /* * This is the task which runs the usermode application */ static int ____call_usermodehelper(void *data) { struct subprocess_info *sub_info = data; int retval; retval = -EPERM; if (current->fs->root) retval = exec_usermodehelper(sub_info->path, sub_info->argv, sub_info->envp); /* Exec failed? */ sub_info->retval = (pid_t)retval; do_exit(0); } /* * This is run by keventd. */ static void __call_usermodehelper(void *data) { struct subprocess_info *sub_info = data; pid_t pid; /* * CLONE_VFORK: wait until the usermode helper has execve'd successfully * We need the data structures to stay around until that is done. */ pid = kernel_thread(____call_usermodehelper, sub_info, CLONE_VFORK | SIGCHLD); if (pid < 0) sub_info->retval = pid; complete(sub_info->complete); } /** * call_usermodehelper - start a usermode application * @path: pathname for the application * @argv: null-terminated argument list * @envp: null-terminated environment list * * Runs a user-space application. The application is started asynchronously. It * runs as a child of keventd. It runs with full root capabilities. keventd silently * reaps the child when it exits. * * Must be called from process context. Returns zero on success, else a negative * error code. */ int call_usermodehelper(char *path, char **argv, char **envp) { DECLARE_COMPLETION(work); struct subprocess_info sub_info = { complete: &work, path: path, argv: argv, envp: envp, retval: 0, }; struct tq_struct tqs = { routine: __call_usermodehelper, data: &sub_info, }; if (path[0] == '\0') goto out; if (current_is_keventd()) { /* We can't wait on keventd! */ __call_usermodehelper(&sub_info); } else { schedule_task(&tqs); wait_for_completion(&work); } out: return sub_info.retval; } /* * This is for the serialisation of device probe() functions * against device open() functions */ static DECLARE_MUTEX(dev_probe_sem); void dev_probe_lock(void) { down(&dev_probe_sem); } void dev_probe_unlock(void) { up(&dev_probe_sem); } EXPORT_SYMBOL(exec_usermodehelper); EXPORT_SYMBOL(call_usermodehelper); #ifdef CONFIG_KMOD EXPORT_SYMBOL(request_module); #endif |