aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-10-17 12:25:43 +0200
committerIngo Molnar <mingo@elte.hu>2008-10-17 12:25:43 +0200
commit0f1f6dec9593c8319714c927dafb443548a1452a (patch)
tree74f887dc86747523e82f08900501073f451996cc /kernel
parent8cd162ce230b154e564a1285bb5f89fcf73f0dce (diff)
parent2e532d68a2b3e2aa6b19731501222069735c741c (diff)
Merge branch 'linus' into sched/urgent
Diffstat (limited to 'kernel')
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/auditsc.c9
-rw-r--r--kernel/cgroup.c4
-rw-r--r--kernel/compat.c58
-rw-r--r--kernel/dma-coherent.c2
-rw-r--r--kernel/dma.c2
-rw-r--r--kernel/exit.c9
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/irq/manage.c9
-rw-r--r--kernel/kallsyms.c1
-rw-r--r--kernel/kmod.c67
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/ksysfs.c35
-rw-r--r--kernel/module.c45
-rw-r--r--kernel/panic.c65
-rw-r--r--kernel/power/disk.c11
-rw-r--r--kernel/power/main.c7
-rw-r--r--kernel/power/user.c10
-rw-r--r--kernel/printk.c42
-rw-r--r--kernel/profile.c41
-rw-r--r--kernel/resource.c150
-rw-r--r--kernel/sched_debug.c2
-rw-r--r--kernel/softirq.c13
-rw-r--r--kernel/softlockup.c2
-rw-r--r--kernel/sys.c38
-rw-r--r--kernel/sys_ni.c6
-rw-r--r--kernel/sysctl.c123
-rw-r--r--kernel/time/Kconfig1
-rw-r--r--kernel/time/tick-sched.c13
-rw-r--r--kernel/timer.c1
-rw-r--r--kernel/utsname_sysctl.c5
-rw-r--r--kernel/wait.c14
-rw-r--r--kernel/workqueue.c2
33 files changed, 551 insertions, 245 deletions
diff --git a/kernel/acct.c b/kernel/acct.c
index dd68b905941..f6006a60df5 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -548,7 +548,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
#endif
spin_lock_irq(&current->sighand->siglock);
- tty = current->signal->tty;
+ tty = current->signal->tty; /* Safe as we hold the siglock */
ac.ac_tty = tty ? old_encode_dev(tty_devnum(tty)) : 0;
ac.ac_utime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_utime)));
ac.ac_stime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_stime)));
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 59cedfb040e..cf5bc2f5f9c 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -246,8 +246,8 @@ static int audit_match_perm(struct audit_context *ctx, int mask)
unsigned n;
if (unlikely(!ctx))
return 0;
-
n = ctx->major;
+
switch (audit_classify_syscall(ctx->arch, n)) {
case 0: /* native */
if ((mask & AUDIT_PERM_WRITE) &&
@@ -1204,13 +1204,13 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
(context->return_valid==AUDITSC_SUCCESS)?"yes":"no",
context->return_code);
- mutex_lock(&tty_mutex);
- read_lock(&tasklist_lock);
+ spin_lock_irq(&tsk->sighand->siglock);
if (tsk->signal && tsk->signal->tty && tsk->signal->tty->name)
tty = tsk->signal->tty->name;
else
tty = "(none)";
- read_unlock(&tasklist_lock);
+ spin_unlock_irq(&tsk->sighand->siglock);
+
audit_log_format(ab,
" a0=%lx a1=%lx a2=%lx a3=%lx items=%d"
" ppid=%d pid=%d auid=%u uid=%u gid=%u"
@@ -1230,7 +1230,6 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
context->egid, context->sgid, context->fsgid, tty,
tsk->sessionid);
- mutex_unlock(&tty_mutex);
audit_log_task_info(ab, tsk);
if (context->filterkey) {
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index a0123d75ec9..8c6e1c17e6d 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2735,6 +2735,8 @@ void cgroup_fork_callbacks(struct task_struct *child)
* Called on every change to mm->owner. mm_init_owner() does not
* invoke this routine, since it assigns the mm->owner the first time
* and does not change it.
+ *
+ * The callbacks are invoked with mmap_sem held in read mode.
*/
void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
{
@@ -2750,7 +2752,7 @@ void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
if (oldcgrp == newcgrp)
continue;
if (ss->mm_owner_changed)
- ss->mm_owner_changed(ss, oldcgrp, newcgrp);
+ ss->mm_owner_changed(ss, oldcgrp, newcgrp, new);
}
}
}
diff --git a/kernel/compat.c b/kernel/compat.c
index 32c254a8ab9..143990e48cb 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -26,6 +26,64 @@
#include <asm/uaccess.h>
+/*
+ * Note that the native side is already converted to a timespec, because
+ * that's what we want anyway.
+ */
+static int compat_get_timeval(struct timespec *o,
+ struct compat_timeval __user *i)
+{
+ long usec;
+
+ if (get_user(o->tv_sec, &i->tv_sec) ||
+ get_user(usec, &i->tv_usec))
+ return -EFAULT;
+ o->tv_nsec = usec * 1000;
+ return 0;
+}
+
+static int compat_put_timeval(struct compat_timeval __user *o,
+ struct timeval *i)
+{
+ return (put_user(i->tv_sec, &o->tv_sec) ||
+ put_user(i->tv_usec, &o->tv_usec)) ? -EFAULT : 0;
+}
+
+asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
+ struct timezone __user *tz)
+{
+ if (tv) {
+ struct timeval ktv;
+ do_gettimeofday(&ktv);
+ if (compat_put_timeval(tv, &ktv))
+ return -EFAULT;
+ }
+ if (tz) {
+ if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
+ struct timezone __user *tz)
+{
+ struct timespec kts;
+ struct timezone ktz;
+
+ if (tv) {
+ if (compat_get_timeval(&kts, tv))
+ return -EFAULT;
+ }
+ if (tz) {
+ if (copy_from_user(&ktz, tz, sizeof(ktz)))
+ return -EFAULT;
+ }
+
+ return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
+}
+
int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts)
{
return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) ||
diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c
index c1d4d5b4c61..f013a0c2e11 100644
--- a/kernel/dma-coherent.c
+++ b/kernel/dma-coherent.c
@@ -124,6 +124,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
}
return (mem != NULL);
}
+EXPORT_SYMBOL(dma_alloc_from_coherent);
/**
* dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
@@ -151,3 +152,4 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
}
return 0;
}
+EXPORT_SYMBOL(dma_release_from_coherent);
diff --git a/kernel/dma.c b/kernel/dma.c
index d2c60a82279..f903189c530 100644
--- a/kernel/dma.c
+++ b/kernel/dma.c
@@ -1,4 +1,4 @@
-/* $Id: dma.c,v 1.7 1994/12/28 03:35:33 root Exp root $
+/*
* linux/kernel/dma.c: A DMA channel allocator. Inspired by linux/kernel/irq.c.
*
* Written by Hennus Bergman, 1992.
diff --git a/kernel/exit.c b/kernel/exit.c
index 85a83c83185..0ef4673e351 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -640,24 +640,23 @@ retry:
assign_new_owner:
BUG_ON(c == p);
get_task_struct(c);
+ read_unlock(&tasklist_lock);
+ down_write(&mm->mmap_sem);
/*
* The task_lock protects c->mm from changing.
* We always want mm->owner->mm == mm
*/
task_lock(c);
- /*
- * Delay read_unlock() till we have the task_lock()
- * to ensure that c does not slip away underneath us
- */
- read_unlock(&tasklist_lock);
if (c->mm != mm) {
task_unlock(c);
+ up_write(&mm->mmap_sem);
put_task_struct(c);
goto retry;
}
cgroup_mm_owner_callbacks(mm->owner, c);
mm->owner = c;
task_unlock(c);
+ up_write(&mm->mmap_sem);
put_task_struct(c);
}
#endif /* CONFIG_MM_OWNER */
diff --git a/kernel/fork.c b/kernel/fork.c
index 7ce2ebe8479..30de644a40c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -802,6 +802,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
sig->leader = 0; /* session leadership doesn't inherit */
sig->tty_old_pgrp = NULL;
+ sig->tty = NULL;
sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
sig->gtime = cputime_zero;
@@ -838,6 +839,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
void __cleanup_signal(struct signal_struct *sig)
{
exit_thread_group_keys(sig);
+ tty_kref_put(sig->tty);
kmem_cache_free(signal_cachep, sig);
}
@@ -1227,7 +1229,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->nsproxy->pid_ns->child_reaper = p;
p->signal->leader_pid = pid;
- p->signal->tty = current->signal->tty;
+ tty_kref_put(p->signal->tty);
+ p->signal->tty = tty_kref_get(current->signal->tty);
set_task_pgrp(p, task_pgrp_nr(current));
set_task_session(p, task_session_nr(current));
attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0314074fa23..60c49e32439 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -89,7 +89,14 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
set_balance_irq_affinity(irq, cpumask);
#ifdef CONFIG_GENERIC_PENDING_IRQ
- set_pending_irq(irq, cpumask);
+ if (desc->status & IRQ_MOVE_PCNTXT) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ desc->chip->set_affinity(irq, cpumask);
+ spin_unlock_irqrestore(&desc->lock, flags);
+ } else
+ set_pending_irq(irq, cpumask);
#else
desc->affinity = cpumask;
desc->chip->set_affinity(irq, cpumask);
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 38fc10ac754..5072cf1685a 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -260,7 +260,6 @@ const char *kallsyms_lookup(unsigned long addr,
/* see if it's in a module */
return module_address_lookup(addr, symbolsize, offset, modname,
namebuf);
- return NULL;
}
int lookup_symbol_name(unsigned long addr, char *symname)
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 2456d1a0bef..3d3c3ea3a02 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -113,7 +113,7 @@ int request_module(const char *fmt, ...)
return ret;
}
EXPORT_SYMBOL(request_module);
-#endif /* CONFIG_KMOD */
+#endif /* CONFIG_MODULES */
struct subprocess_info {
struct work_struct work;
@@ -265,7 +265,7 @@ static void __call_usermodehelper(struct work_struct *work)
}
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
/*
* If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
* (used for preventing user land processes from being created after the user
@@ -288,39 +288,37 @@ static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
*/
#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
-static int usermodehelper_pm_callback(struct notifier_block *nfb,
- unsigned long action,
- void *ignored)
+/**
+ * usermodehelper_disable - prevent new helpers from being started
+ */
+int usermodehelper_disable(void)
{
long retval;
- switch (action) {
- case PM_HIBERNATION_PREPARE:
- case PM_SUSPEND_PREPARE:
- usermodehelper_disabled = 1;
- smp_mb();
- /*
- * From now on call_usermodehelper_exec() won't start any new
- * helpers, so it is sufficient if running_helpers turns out to
- * be zero at one point (it may be increased later, but that
- * doesn't matter).
- */
- retval = wait_event_timeout(running_helpers_waitq,
+ usermodehelper_disabled = 1;
+ smp_mb();
+ /*
+ * From now on call_usermodehelper_exec() won't start any new
+ * helpers, so it is sufficient if running_helpers turns out to
+ * be zero at one point (it may be increased later, but that
+ * doesn't matter).
+ */
+ retval = wait_event_timeout(running_helpers_waitq,
atomic_read(&running_helpers) == 0,
RUNNING_HELPERS_TIMEOUT);
- if (retval) {
- return NOTIFY_OK;
- } else {
- usermodehelper_disabled = 0;
- return NOTIFY_BAD;
- }
- case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
- usermodehelper_disabled = 0;
- return NOTIFY_OK;
- }
+ if (retval)
+ return 0;
- return NOTIFY_DONE;
+ usermodehelper_disabled = 0;
+ return -EAGAIN;
+}
+
+/**
+ * usermodehelper_enable - allow new helpers to be started again
+ */
+void usermodehelper_enable(void)
+{
+ usermodehelper_disabled = 0;
}
static void helper_lock(void)
@@ -334,18 +332,12 @@ static void helper_unlock(void)
if (atomic_dec_and_test(&running_helpers))
wake_up(&running_helpers_waitq);
}
-
-static void register_pm_notifier_callback(void)
-{
- pm_notifier(usermodehelper_pm_callback, 0);
-}
-#else /* CONFIG_PM */
+#else /* CONFIG_PM_SLEEP */
#define usermodehelper_disabled 0
static inline void helper_lock(void) {}
static inline void helper_unlock(void) {}
-static inline void register_pm_notifier_callback(void) {}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
/**
* call_usermodehelper_setup - prepare to call a usermode helper
@@ -515,5 +507,4 @@ void __init usermodehelper_init(void)
{
khelper_wq = create_singlethread_workqueue("khelper");
BUG_ON(!khelper_wq);
- register_pm_notifier_callback();
}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 75bc2cd9ebc..8b57a2597f2 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -404,7 +404,7 @@ void kretprobe_hash_lock(struct task_struct *tsk,
spin_lock_irqsave(hlist_lock, *flags);
}
-void kretprobe_table_lock(unsigned long hash, unsigned long *flags)
+static void kretprobe_table_lock(unsigned long hash, unsigned long *flags)
{
spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
spin_lock_irqsave(hlist_lock, *flags);
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index e53bc30e9ba..08dd8ed86c7 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kexec.h>
+#include <linux/profile.h>
#include <linux/sched.h>
#define KERNEL_ATTR_RO(_name) \
@@ -53,6 +54,37 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
KERNEL_ATTR_RW(uevent_helper);
#endif
+#ifdef CONFIG_PROFILING
+static ssize_t profiling_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", prof_on);
+}
+static ssize_t profiling_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+
+ if (prof_on)
+ return -EEXIST;
+ /*
+ * This eventually calls into get_option() which
+ * has a ton of callers and is not const. It is
+ * easiest to cast it away here.
+ */
+ profile_setup((char *)buf);
+ ret = profile_init();
+ if (ret)
+ return ret;
+ ret = create_proc_profile();
+ if (ret)
+ return ret;
+ return count;
+}
+KERNEL_ATTR_RW(profiling);
+#endif
+
#ifdef CONFIG_KEXEC
static ssize_t kexec_loaded_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
@@ -109,6 +141,9 @@ static struct attribute * kernel_attrs[] = {
&uevent_seqnum_attr.attr,
&uevent_helper_attr.attr,
#endif
+#ifdef CONFIG_PROFILING
+ &profiling_attr.attr,
+#endif
#ifdef CONFIG_KEXEC
&kexec_loaded_attr.attr,
&kexec_crash_loaded_attr.attr,
diff --git a/kernel/module.c b/kernel/module.c
index 9db11911e04..b7205f67cfa 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -100,7 +100,7 @@ static inline int strong_try_module_get(struct module *mod)
static inline void add_taint_module(struct module *mod, unsigned flag)
{
add_taint(flag);
- mod->taints |= flag;
+ mod->taints |= (1U << flag);
}
/*
@@ -784,6 +784,7 @@ sys_delete_module(const char __user *name_user, unsigned int flags)
mutex_lock(&module_mutex);
/* Store the name of the last unloaded module for diagnostic purposes */
strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
+ unregister_dynamic_debug_module(mod->name);
free_module(mod);
out:
@@ -923,7 +924,7 @@ static const char vermagic[] = VERMAGIC_STRING;
static int try_to_force_load(struct module *mod, const char *symname)
{
#ifdef CONFIG_MODULE_FORCE_LOAD
- if (!(tainted & TAINT_FORCED_MODULE))
+ if (!test_taint(TAINT_FORCED_MODULE))
printk("%s: no version for \"%s\" found: kernel tainted.\n",
mod->name, symname);
add_taint_module(mod, TAINT_FORCED_MODULE);
@@ -1033,7 +1034,7 @@ static unsigned long resolve_symbol(Elf_Shdr *sechdrs,
const unsigned long *crc;
ret = find_symbol(name, &owner, &crc,
- !(mod->taints & TAINT_PROPRIETARY_MODULE), true);
+ !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
if (!IS_ERR_VALUE(ret)) {
/* use_module can fail due to OOM,
or module initialization or unloading */
@@ -1173,7 +1174,7 @@ static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
while (i-- > 0)
sysfs_remove_bin_file(notes_attrs->dir,
&notes_attrs->attrs[i]);
- kobject_del(notes_attrs->dir);
+ kobject_put(notes_attrs->dir);
}
kfree(notes_attrs);
}
@@ -1634,7 +1635,7 @@ static void set_license(struct module *mod, const char *license)
license = "unspecified";
if (!license_is_gpl_compatible(license)) {
- if (!(tainted & TAINT_PROPRIETARY_MODULE))
+ if (!test_taint(TAINT_PROPRIETARY_MODULE))
printk(KERN_WARNING "%s: module license '%s' taints "
"kernel.\n", mod->name, license);
add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
@@ -1783,6 +1784,33 @@ static inline void add_kallsyms(struct module *mod,
}
#endif /* CONFIG_KALLSYMS */
+#ifdef CONFIG_DYNAMIC_PRINTK_DEBUG
+static void dynamic_printk_setup(Elf_Shdr *sechdrs, unsigned int verboseindex)
+{
+ struct mod_debug *debug_info;
+ unsigned long pos, end;
+ unsigned int num_verbose;
+
+ pos = sechdrs[verboseindex].sh_addr;
+ num_verbose = sechdrs[verboseindex].sh_size /
+ sizeof(struct mod_debug);
+ end = pos + (num_verbose * sizeof(struct mod_debug));
+
+ for (; pos < end; pos += sizeof(struct mod_debug)) {
+ debug_info = (struct mod_debug *)pos;
+ register_dynamic_debug_module(debug_info->modname,
+ debug_info->type, debug_info->logical_modname,
+ debug_info->flag_names, debug_info->hash,
+ debug_info->hash2);
+ }
+}
+#else
+static inline void dynamic_printk_setup(Elf_Shdr *sechdrs,
+ unsigned int verboseindex)
+{
+}
+#endif /* CONFIG_DYNAMIC_PRINTK_DEBUG */
+
static void *module_alloc_update_bounds(unsigned long size)
{
void *ret = module_alloc(size);
@@ -1831,6 +1859,7 @@ static noinline struct module *load_module(void __user *umod,
#endif
unsigned int markersindex;
unsigned int markersstringsindex;
+ unsigned int verboseindex;
struct module *mod;
long err = 0;
void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
@@ -2117,6 +2146,7 @@ static noinline struct module *load_module(void __user *umod,
markersindex = find_sec(hdr, sechdrs, secstrings, "__markers");
markersstringsindex = find_sec(hdr, sechdrs, secstrings,
"__markers_strings");
+ verboseindex = find_sec(hdr, sechdrs, secstrings, "__verbose");
/* Now do relocations. */
for (i = 1; i < hdr->e_shnum; i++) {
@@ -2167,6 +2197,7 @@ static noinline struct module *load_module(void __user *umod,
marker_update_probe_range(mod->markers,
mod->markers + mod->num_markers);
#endif
+ dynamic_printk_setup(sechdrs, verboseindex);
err = module_finalize(hdr, sechdrs, mod);
if (err < 0)
goto cleanup;
@@ -2552,9 +2583,9 @@ static char *module_flags(struct module *mod, char *buf)
mod->state == MODULE_STATE_GOING ||
mod->state == MODULE_STATE_COMING) {
buf[bx++] = '(';
- if (mod->taints & TAINT_PROPRIETARY_MODULE)
+ if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
buf[bx++] = 'P';
- if (mod->taints & TAINT_FORCED_MODULE)
+ if (mod->taints & (1 << TAINT_FORCED_MODULE))
buf[bx++] = 'F';
/*
* TAINT_FORCED_RMMOD: could be added.
diff --git a/kernel/panic.c b/kernel/panic.c
index 12c5a0a6c89..f290e8e866f 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -23,7 +23,7 @@
#include <linux/kallsyms.h>
int panic_on_oops;
-int tainted;
+static unsigned long tainted_mask;
static int pause_on_oops;
static int pause_on_oops_flag;
static DEFINE_SPINLOCK(pause_on_oops_lock);
@@ -143,6 +143,26 @@ NORET_TYPE void panic(const char * fmt, ...)
EXPORT_SYMBOL(panic);
+
+struct tnt {
+ u8 bit;
+ char true;
+ char false;
+};
+
+static const struct tnt tnts[] = {
+ { TAINT_PROPRIETARY_MODULE, 'P', 'G' },
+ { TAINT_FORCED_MODULE, 'F', ' ' },
+ { TAINT_UNSAFE_SMP, 'S', ' ' },
+ { TAINT_FORCED_RMMOD, 'R', ' ' },
+ { TAINT_MACHINE_CHECK, 'M', ' ' },
+ { TAINT_BAD_PAGE, 'B', ' ' },
+ { TAINT_USER, 'U', ' ' },
+ { TAINT_DIE, 'D', ' ' },
+ { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' },
+ { TAINT_WARN, 'W', ' ' },
+};
+
/**
* print_tainted - return a string to represent the kernel taint state.
*
@@ -158,32 +178,41 @@ EXPORT_SYMBOL(panic);
*
* The string is overwritten by the next call to print_taint().
*/
-
const char *print_tainted(void)
{
- static char buf[20];
- if (tainted) {
- snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c%c%c%c%c",
- tainted & TAINT_PROPRIETARY_MODULE ? 'P' : 'G',
- tainted & TAINT_FORCED_MODULE ? 'F' : ' ',
- tainted & TAINT_UNSAFE_SMP ? 'S' : ' ',
- tainted & TAINT_FORCED_RMMOD ? 'R' : ' ',
- tainted & TAINT_MACHINE_CHECK ? 'M' : ' ',
- tainted & TAINT_BAD_PAGE ? 'B' : ' ',
- tainted & TAINT_USER ? 'U' : ' ',
- tainted & TAINT_DIE ? 'D' : ' ',
- tainted & TAINT_OVERRIDDEN_ACPI_TABLE ? 'A' : ' ',
- tainted & TAINT_WARN ? 'W' : ' ');
- }
- else
+ static char buf[ARRAY_SIZE(tnts) + sizeof("Tainted: ") + 1];
+
+ if (tainted_mask) {
+ char *s;
+ int i;
+
+ s = buf + sprintf(buf, "Tainted: ");
+ for (i = 0; i < ARRAY_SIZE(tnts); i++) {
+ const struct tnt *t = &tnts[i];
+ *s++ = test_bit(t->bit, &tainted_mask) ?
+ t->true : t->false;
+ }
+ *s = 0;
+ } else
snprintf(buf, sizeof(buf), "Not tainted");
return(buf);
}
+int test_taint(unsigned flag)
+{
+ return test_bit(flag, &tainted_mask);
+}
+EXPORT_SYMBOL(test_taint);
+
+unsigned long get_taint(void)
+{
+ return tainted_mask;
+}
+
void add_taint(unsigned flag)
{
debug_locks = 0; /* can't trust the integrity of the kernel anymore */
- tainted |= flag;
+ set_bit(flag, &tainted_mask);
}
EXPORT_SYMBOL(add_taint);
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index bbd85c60f74..331f9836383 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -14,6 +14,7 @@
#include <linux/reboot.h>
#include <linux/string.h>
#include <linux/device.h>
+#include <linux/kmod.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/mount.h>
@@ -520,6 +521,10 @@ int hibernate(void)
if (error)
goto Exit;
+ error = usermodehelper_disable();
+ if (error)
+ goto Exit;
+
/* Allocate memory management structures */
error = create_basic_memory_bitmaps();
if (error)
@@ -558,6 +563,7 @@ int hibernate(void)
thaw_processes();
Finish:
free_basic_memory_bitmaps();
+ usermodehelper_enable();
Exit:
pm_notifier_call_chain(PM_POST_HIBERNATION);
pm_restore_console();
@@ -634,6 +640,10 @@ static int software_resume(void)
if (error)
goto Finish;
+ error = usermodehelper_disable();
+ if (error)
+ goto Finish;
+
error = create_basic_memory_bitmaps();
if (error)
goto Finish;
@@ -656,6 +666,7 @@ static int software_resume(void)
thaw_processes();
Done:
free_basic_memory_bitmaps();
+ usermodehelper_enable();
Finish:
pm_notifier_call_chain(PM_POST_RESTORE);
pm_restore_console();
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 540b16b6856..19122cf6d82 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -14,6 +14,7 @@
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/errno.h>
+#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/cpu.h>
@@ -237,6 +238,10 @@ static int suspend_prepare(void)
if (error)
goto Finish;
+ error = usermodehelper_disable();
+ if (error)
+ goto Finish;
+
if (suspend_freeze_processes()) {
error = -EAGAIN;
goto Thaw;
@@ -256,6 +261,7 @@ static int suspend_prepare(void)
Thaw:
suspend_thaw_processes();
+ usermodehelper_enable();
Finish:
pm_notifier_call_chain(PM_POST_SUSPEND);
pm_restore_console();
@@ -376,6 +382,7 @@ int suspend_devices_and_enter(suspend_state_t state)
static void suspend_finish(void)
{
suspend_thaw_processes();
+ usermodehelper_enable();
pm_notifier_call_chain(PM_POST_SUSPEND);
pm_restore_console();
}
diff --git a/kernel/power/user.c b/kernel/power/user.c
index a6332a31326..005b93d839b 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -212,13 +212,20 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
case SNAPSHOT_FREEZE:
if (data->frozen)
break;
+
printk("Syncing filesystems ... ");
sys_sync();
printk("done.\n");
- error = freeze_processes();
+ error = usermodehelper_disable();
if (error)
+ break;
+
+ error = freeze_processes();
+ if (error) {
thaw_processes();
+ usermodehelper_enable();
+ }
if (!error)
data->frozen = 1;
break;
@@ -227,6 +234,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
if (!data->frozen || data->ready)
break;
thaw_processes();
+ usermodehelper_enable();
data->frozen = 0;
break;
diff --git a/kernel/printk.c b/kernel/printk.c
index b51b1567bb5..6341af77eb6 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -13,7 +13,7 @@
* Fixed SMP synchronization, 08/08/99, Manfred Spraul
* manfred@colorfullife.com
* Rewrote bits to get rid of console_lock
- * 01Mar01 Andrew Morton <andrewm@uow.edu.au>
+ * 01Mar01 Andrew Morton
*/
#include <linux/kernel.h>
@@ -577,9 +577,6 @@ static int have_callable_console(void)
* @fmt: format string
*
* This is printk(). It can be called from any context. We want it to work.
- * Be aware of the fact that if oops_in_progress is not set, we might try to
- * wake klogd up which could deadlock on runqueue lock if printk() is called
- * from scheduler code.
*
* We try to grab the console_sem. If we succeed, it's easy - we log the output and
* call the console drivers. If we fail to get the semaphore we place the output
@@ -593,6 +590,8 @@ static int have_callable_console(void)
*
* See also:
* printf(3)
+ *
+ * See the vsnprintf() documentation for format string extensions over C99.
*/
asmlinkage int printk(const char *fmt, ...)
@@ -982,10 +981,25 @@ int is_console_locked(void)
return console_locked;
}
-void wake_up_klogd(void)
+static DEFINE_PER_CPU(int, printk_pending);
+
+void printk_tick(void)
{
- if (!oops_in_progress && waitqueue_active(&log_wait))
+ if (__get_cpu_var(printk_pending)) {
+ __get_cpu_var(printk_pending) = 0;
wake_up_interruptible(&log_wait);
+ }
+}
+
+int printk_needs_cpu(int cpu)
+{
+ return per_cpu(printk_pending, cpu);
+}
+
+void wake_up_klogd(void)
+{
+ if (waitqueue_active(&log_wait))
+ __raw_get_cpu_var(printk_pending) = 1;
}
/**
@@ -1291,22 +1305,6 @@ static int __init disable_boot_consoles(void)
}
late_initcall(disable_boot_consoles);
-/**
- * tty_write_message - write a message to a certain tty, not just the console.
- * @tty: the destination tty_struct
- * @msg: the message to write
- *
- * This is used for messages that need to be redirected to a specific tty.
- * We don't put it into the syslog queue right now maybe in the future if
- * really needed.
- */
-void tty_write_message(struct tty_struct *tty, char *msg)
-{
- if (tty && tty->ops->write)
- tty->ops->write(tty, msg, strlen(msg));
- return;
-}
-
#if defined CONFIG_PRINTK
/*
diff --git a/kernel/profile.c b/kernel/profile.c
index cd26bed4cc2..a9e422df6bf 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -22,6 +22,8 @@
#include <linux/cpu.h>
#include <linux/highmem.h>
#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <asm/sections.h>
#include <asm/irq_regs.h>
#include <asm/ptrace.h>
@@ -50,11 +52,11 @@ static DEFINE_PER_CPU(int, cpu_profile_flip);
static DEFINE_MUTEX(profile_flip_mutex);
#endif /* CONFIG_SMP */
-static int __init profile_setup(char *str)
+int profile_setup(char *str)
{
- static char __initdata schedstr[] = "schedule";
- static char __initdata sleepstr[] = "sleep";
- static char __initdata kvmstr[] = "kvm";
+ static char schedstr[] = "schedule";
+ static char sleepstr[] = "sleep";
+ static char kvmstr[] = "kvm";
int par;
if (!strncmp(str, sleepstr, strlen(sleepstr))) {
@@ -100,14 +102,33 @@ static int __init profile_setup(char *str)
__setup("profile=", profile_setup);
-void __init profile_init(void)
+int profile_init(void)
{
+ int buffer_bytes;
if (!prof_on)
- return;
+ return 0;
/* only text is profiled */
prof_len = (_etext - _stext) >> prof_shift;
- prof_buffer = alloc_bootmem(prof_len*sizeof(atomic_t));
+ buffer_bytes = prof_len*sizeof(atomic_t);
+ if (!slab_is_available()) {
+ prof_buffer = alloc_bootmem(buffer_bytes);
+ return 0;
+ }
+
+ prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL);
+ if (prof_buffer)
+ return 0;
+
+ prof_buffer = alloc_pages_exact(buffer_bytes, GFP_KERNEL|__GFP_ZERO);
+ if (prof_buffer)
+ return 0;
+
+ prof_buffer = vmalloc(buffer_bytes);
+ if (prof_buffer)
+ return 0;
+
+ return -ENOMEM;
}
/* Profile event notifications */
@@ -527,7 +548,7 @@ static void __init profile_nop(void *unused)
{
}
-static int __init create_hash_tables(void)
+static int create_hash_tables(void)
{
int cpu;
@@ -575,14 +596,14 @@ out_cleanup:
#define create_hash_tables() ({ 0; })
#endif
-static int __init create_proc_profile(void)
+int create_proc_profile(void)
{
struct proc_dir_entry *entry;
if (!prof_on)
return 0;
if (create_hash_tables())
- return -1;
+ return -ENOMEM;
entry = proc_create("profile", S_IWUSR | S_IRUGO,
NULL, &proc_profile_operations);
if (!entry)
diff --git a/kernel/resource.c b/kernel/resource.c
index 03d796c1b2e..4089d12af6e 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -38,10 +38,6 @@ EXPORT_SYMBOL(iomem_resource);
static DEFINE_RWLOCK(resource_lock);
-#ifdef CONFIG_PROC_FS
-
-enum { MAX_IORES_LEVEL = 5 };
-
static void *r_next(struct seq_file *m, void *v, loff_t *pos)
{
struct resource *p = v;
@@ -53,6 +49,10 @@ static void *r_next(struct seq_file *m, void *v, loff_t *pos)
return p->sibling;
}
+#ifdef CONFIG_PROC_FS
+
+enum { MAX_IORES_LEVEL = 5 };
+
static void *r_start(struct seq_file *m, loff_t *pos)
__acquires(resource_lock)
{
@@ -516,6 +516,70 @@ int adjust_resource(struct resource *res, resource_size_t start, resource_size_t
return result;
}
+static void __init __reserve_region_with_split(struct resource *root,
+ resource_size_t start, resource_size_t end,
+ const char *name)
+{
+ struct resource *parent = root;
+ struct resource *conflict;
+ struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
+
+ if (!res)
+ return;
+
+ res->name = name;
+ res->start = start;
+ res->end = end;
+ res->flags = IORESOURCE_BUSY;
+
+ for (;;) {
+ conflict = __request_resource(parent, res);
+ if (!conflict)
+ break;
+ if (conflict != parent) {
+ parent = conflict;
+ if (!(conflict->flags & IORESOURCE_BUSY))
+ continue;
+ }
+
+ /* Uhhuh, that didn't work out.. */
+ kfree(res);
+ res = NULL;
+ break;
+ }
+
+ if (!res) {
+ /* failed, split and try again */
+
+ /* conflict covered whole area */
+ if (conflict->start <= start && conflict->end >= end)
+ return;
+
+ if (conflict->start > start)
+ __reserve_region_with_split(root, start, conflict->start-1, name);
+ if (!(conflict->flags & IORESOURCE_BUSY)) {
+ resource_size_t common_start, common_end;
+
+ common_start = max(conflict->start, start);
+ common_end = min(conflict->end, end);
+ if (common_start < common_end)
+ __reserve_region_with_split(root, common_start, common_end, name);
+ }
+ if (conflict->end < end)
+ __reserve_region_with_split(root, conflict->end+1, end, name);
+ }
+
+}
+
+void reserve_region_with_split(struct resource *root,
+ resource_size_t start, resource_size_t end,
+ const char *name)
+{
+ write_lock(&resource_lock);
+ __reserve_region_with_split(root, start, end, name);
+ write_unlock(&resource_lock);
+}
+
EXPORT_SYMBOL(adjust_resource);
/**
@@ -562,33 +626,34 @@ struct resource * __request_region(struct resource *parent,
{
struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
- if (res) {
- res->name = name;
- res->start = start;
- res->end = start + n - 1;
- res->flags = IORESOURCE_BUSY;
+ if (!res)
+ return NULL;
- write_lock(&resource_lock);
+ res->name = name;
+ res->start = start;
+ res->end = start + n - 1;
+ res->flags = IORESOURCE_BUSY;
- for (;;) {
- struct resource *conflict;
+ write_lock(&resource_lock);
- conflict = __request_resource(parent, res);
- if (!conflict)
- break;
- if (conflict != parent) {
- parent = conflict;
- if (!(conflict->flags & IORESOURCE_BUSY))
- continue;
- }
+ for (;;) {
+ struct resource *conflict;
- /* Uhhuh, that didn't work out.. */
- kfree(res);
- res = NULL;
+ conflict = __request_resource(parent, res);
+ if (!conflict)
break;
+ if (conflict != parent) {
+ parent = conflict;
+ if (!(conflict->flags & IORESOURCE_BUSY))
+ continue;
}
- write_unlock(&resource_lock);
+
+ /* Uhhuh, that didn't work out.. */
+ kfree(res);
+ res = NULL;
+ break;
}
+ write_unlock(&resource_lock);
return res;
}
EXPORT_SYMBOL(__request_region);
@@ -763,3 +828,40 @@ static int __init reserve_setup(char *str)
}
__setup("reserve=", reserve_setup);
+
+/*
+ * Check if the requested addr and size spans more than any slot in the
+ * iomem resource tree.
+ */
+int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
+{
+ struct resource *p = &iomem_resource;
+ int err = 0;
+ loff_t l;
+
+ read_lock(&resource_lock);
+ for (p = p->child; p ; p = r_next(NULL, p, &l)) {
+ /*
+ * We can probably skip the resources without
+ * IORESOURCE_IO attribute?
+ */
+ if (p->start >= addr + size)
+ continue;
+ if (p->end < addr)
+ continue;
+ if (p->start <= addr && (p->end >= addr + size - 1))
+ continue;
+ printk(KERN_WARNING "resource map sanity check conflict: "
+ "0x%llx 0x%llx 0x%llx 0x%llx %s\n",
+ (unsigned long long)addr,
+ (unsigned long long)(addr + size - 1),
+ (unsigned long long)p->start,
+ (unsigned long long)p->end,
+ p->name);
+ err = -1;
+ break;
+ }
+ read_unlock(&resource_lock);
+
+ return err;
+}
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index bbe6b31c3c5..ad958c1ec70 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -333,12 +333,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
unsigned long flags;
int num_threads = 1;
- rcu_read_lock();
if (lock_task_sighand(p, &flags)) {
num_threads = atomic_read(&p->signal->count);
unlock_task_sighand(p, &flags);
}
- rcu_read_unlock();
SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads);
SEQ_printf(m,
diff --git a/kernel/softirq.c b/kernel/softirq.c
index c506f266a6b..37d67aa2d56 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -46,7 +46,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
EXPORT_SYMBOL(irq_stat);
#endif
-static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;
+static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
@@ -205,7 +205,18 @@ restart:
do {
if (pending & 1) {
+ int prev_count = preempt_count();
+
h->action(h);
+
+ if (unlikely(prev_count != preempt_count())) {
+ printk(KERN_ERR "huh, entered softirq %td %p"
+ "with preempt_count %08x,"
+ " exited with %08x?\n", h - softirq_vec,
+ h->action, prev_count, preempt_count());
+ preempt_count() = prev_count;
+ }
+
rcu_bh_qsctr_inc(cpu);
}
h++;
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index cb838ee93a8..3953e4aed73 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -226,7 +226,7 @@ static void check_hung_uninterruptible_tasks(int this_cpu)
* If the system crashed already then all bets are off,
* do not report extra hung tasks:
*/
- if ((tainted & TAINT_DIE) || did_panic)
+ if (test_taint(TAINT_DIE) || did_panic)
return;
read_lock(&tasklist_lock);
diff --git a/kernel/sys.c b/kernel/sys.c
index 038a7bc0901..0bc8fa3c228 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1060,9 +1060,7 @@ asmlinkage long sys_setsid(void)
group_leader->signal->leader = 1;
__set_special_pids(sid);
- spin_lock(&group_leader->sighand->siglock);
- group_leader->signal->tty = NULL;
- spin_unlock(&group_leader->sighand->siglock);
+ proc_clear_tty(group_leader);
err = session;
out:
@@ -1351,8 +1349,10 @@ asmlinkage long sys_sethostname(char __user *name, int len)
down_write(&uts_sem);
errno = -EFAULT;
if (!copy_from_user(tmp, name, len)) {
- memcpy(utsname()->nodename, tmp, len);
- utsname()->nodename[len] = 0;
+ struct new_utsname *u = utsname();
+
+ memcpy(u->nodename, tmp, len);
+ memset(u->nodename + len, 0, sizeof(u->nodename) - len);
errno = 0;
}
up_write(&uts_sem);
@@ -1364,15 +1364,17 @@ asmlinkage long sys_sethostname(char __user *name, int len)
asmlinkage long sys_gethostname(char __user *name, int len)
{
int i, errno;
+ struct new_utsname *u;
if (len < 0)
return -EINVAL;
down_read(&uts_sem);
- i = 1 + strlen(utsname()->nodename);
+ u = utsname();
+ i = 1 + strlen(u->nodename);
if (i > len)
i = len;
errno = 0;
- if (copy_to_user(name, utsname()->nodename, i))
+ if (copy_to_user(name, u->nodename, i))
errno = -EFAULT;
up_read(&uts_sem);
return errno;
@@ -1397,8 +1399,10 @@ asmlinkage long sys_setdomainname(char __user *name, int len)
down_write(&uts_sem);
errno = -EFAULT;
if (!copy_from_user(tmp, name, len)) {
- memcpy(utsname()->domainname, tmp, len);
- utsname()->domainname[len] = 0;
+ struct new_utsname *u = utsname();
+
+ memcpy(u->domainname, tmp, len);
+ memset(u->domainname + len, 0, sizeof(u->domainname) - len);
errno = 0;
}
up_write(&uts_sem);
@@ -1452,14 +1456,22 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
return -EINVAL;
if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
return -EFAULT;
- if (new_rlim.rlim_cur > new_rlim.rlim_max)
- return -EINVAL;
old_rlim = current->signal->rlim + resource;
if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
!capable(CAP_SYS_RESOURCE))
return -EPERM;
- if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open)
- return -EPERM;
+
+ if (resource == RLIMIT_NOFILE) {
+ if (new_rlim.rlim_max == RLIM_INFINITY)
+ new_rlim.rlim_max = sysctl_nr_open;
+ if (new_rlim.rlim_cur == RLIM_INFINITY)
+ new_rlim.rlim_cur = sysctl_nr_open;
+ if (new_rlim.rlim_max > sysctl_nr_open)
+ return -EPERM;
+ }
+
+ if (new_rlim.rlim_cur > new_rlim.rlim_max)
+ return -EINVAL;
retval = security_task_setrlimit(resource, &new_rlim);
if (retval)
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 08d6e1bb99a..a77b27b11b0 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -125,6 +125,12 @@ cond_syscall(sys_vm86old);
cond_syscall(sys_vm86);
cond_syscall(compat_sys_ipc);
cond_syscall(compat_sys_sysctl);
+cond_syscall(sys_flock);
+cond_syscall(sys_io_setup);
+cond_syscall(sys_io_destroy);
+cond_syscall(sys_io_submit);
+cond_syscall(sys_io_cancel);
+cond_syscall(sys_io_getevents);
/* arch-specific weak syscall entries */
cond_syscall(sys_pciconfig_read);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 50ec0886fa3..617d41e4d6a 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -80,7 +80,6 @@ extern int pid_max_min, pid_max_max;
extern int sysctl_drop_caches;
extern int percpu_pagelist_fraction;
extern int compat_log;
-extern int maps_protect;
extern int latencytop_enabled;
extern int sysctl_nr_open_min, sysctl_nr_open_max;
#ifdef CONFIG_RCU_TORTURE_TEST
@@ -97,7 +96,7 @@ static int sixty = 60;
static int neg_one = -1;
#endif
-#ifdef CONFIG_MMU
+#if defined(CONFIG_MMU) && defined(CONFIG_FILE_LOCKING)
static int two = 2;
#endif
@@ -118,10 +117,8 @@ extern char modprobe_path[];
extern int sg_big_buff;
#endif
-#ifdef __sparc__
-extern char reboot_command [];
-extern int stop_a_enabled;
-extern int scons_pwroff;
+#ifdef CONFIG_SPARC
+#include <asm/system.h>
#endif
#ifdef __hppa__
@@ -152,7 +149,7 @@ extern int max_lock_depth;
#ifdef CONFIG_PROC_SYSCTL
static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos);
-static int proc_dointvec_taint(struct ctl_table *table, int write, struct file *filp,
+static int proc_taint(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos);
#endif
@@ -382,10 +379,9 @@ static struct ctl_table kern_table[] = {
#ifdef CONFIG_PROC_SYSCTL
{
.procname = "tainted",
- .data = &tainted,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(long),
.mode = 0644,
- .proc_handler = &proc_dointvec_taint,
+ .proc_handler = &proc_taint,
},
#endif
#ifdef CONFIG_LATENCYTOP
@@ -415,7 +411,7 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = &proc_dointvec,
},
-#ifdef __sparc__
+#ifdef CONFIG_SPARC
{
.ctl_name = KERN_SPARC_REBOOT,
.procname = "reboot-cmd",
@@ -810,16 +806,6 @@ static struct ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
#endif
-#ifdef CONFIG_PROC_FS
- {
- .ctl_name = CTL_UNNUMBERED,
- .procname = "maps_protect",
- .data = &maps_protect,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- },
-#endif
{
.ctl_name = CTL_UNNUMBERED,
.procname = "poweroff_cmd",
@@ -1261,6 +1247,7 @@ static struct ctl_table fs_table[] = {
.extra1 = &minolduid,
.extra2 = &maxolduid,
},
+#ifdef CONFIG_FILE_LOCKING
{
.ctl_name = FS_LEASES,
.procname = "leases-enable",
@@ -1269,6 +1256,7 @@ static struct ctl_table fs_table[] = {
.mode = 0644,
.proc_handler = &proc_dointvec,
},
+#endif
#ifdef CONFIG_DNOTIFY
{
.ctl_name = FS_DIR_NOTIFY,
@@ -1280,6 +1268,7 @@ static struct ctl_table fs_table[] = {
},
#endif
#ifdef CONFIG_MMU
+#ifdef CONFIG_FILE_LOCKING
{
.ctl_name = FS_LEASE_TIME,
.procname = "lease-break-time",
@@ -1291,6 +1280,8 @@ static struct ctl_table fs_table[] = {
.extra1 = &zero,
.extra2 = &two,
},
+#endif
+#ifdef CONFIG_AIO
{
.procname = "aio-nr",
.data = &aio_nr,
@@ -1305,6 +1296,7 @@ static struct ctl_table fs_table[] = {
.mode = 0644,
.proc_handler = &proc_doulongvec_minmax,
},
+#endif /* CONFIG_AIO */
#ifdef CONFIG_INOTIFY_USER
{
.ctl_name = FS_INOTIFY,
@@ -1510,7 +1502,6 @@ void register_sysctl_root(struct ctl_table_root *root)
/* Perform the actual read/write of a sysctl table entry. */
static int do_sysctl_strategy(struct ctl_table_root *root,
struct ctl_table *table,
- int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
@@ -1524,8 +1515,7 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
return -EPERM;
if (table->strategy) {
- rc = table->strategy(table, name, nlen, oldval, oldlenp,
- newval, newlen);
+ rc = table->strategy(table, oldval, oldlenp, newval, newlen);
if (rc < 0)
return rc;
if (rc > 0)
@@ -1535,8 +1525,7 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
/* If there is no strategy routine, or if the strategy returns
* zero, proceed with automatic r/w */
if (table->data && table->maxlen) {
- rc = sysctl_data(table, name, nlen, oldval, oldlenp,
- newval, newlen);
+ rc = sysctl_data(table, oldval, oldlenp, newval, newlen);
if (rc < 0)
return rc;
}
@@ -1568,7 +1557,7 @@ repeat:
table = table->child;
goto repeat;
}
- error = do_sysctl_strategy(root, table, name, nlen,
+ error = do_sysctl_strategy(root, table,
oldval, oldlenp,
newval, newlen);
return error;
@@ -2237,49 +2226,39 @@ int proc_dointvec(struct ctl_table *table, int write, struct file *filp,
NULL,NULL);
}
-#define OP_SET 0
-#define OP_AND 1
-#define OP_OR 2
-
-static int do_proc_dointvec_bset_conv(int *negp, unsigned long *lvalp,
- int *valp,
- int write, void *data)
-{
- int op = *(int *)data;
- if (write) {
- int val = *negp ? -*lvalp : *lvalp;
- switch(op) {
- case OP_SET: *valp = val; break;
- case OP_AND: *valp &= val; break;
- case OP_OR: *valp |= val; break;
- }
- } else {
- int val = *valp;
- if (val < 0) {
- *negp = -1;
- *lvalp = (unsigned long)-val;
- } else {
- *negp = 0;
- *lvalp = (unsigned long)val;
- }
- }
- return 0;
-}
-
/*
- * Taint values can only be increased
+ * Taint values can only be increased
+ * This means we can safely use a temporary.
*/
-static int proc_dointvec_taint(struct ctl_table *table, int write, struct file *filp,
+static int proc_taint(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- int op;
+ struct ctl_table t;
+ unsigned long tmptaint = get_taint();
+ int err;
if (write && !capable(CAP_SYS_ADMIN))
return -EPERM;
- op = OP_OR;
- return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
- do_proc_dointvec_bset_conv,&op);
+ t = *table;
+ t.data = &tmptaint;
+ err = proc_doulongvec_minmax(&t, write, filp, buffer, lenp, ppos);
+ if (err < 0)
+ return err;
+
+ if (write) {
+ /*
+ * Poor man's atomic or. Not worth adding a primitive
+ * to everyone's atomic.h for this
+ */
+ int i;
+ for (i = 0; i < BITS_PER_LONG && tmptaint >> i; i++) {
+ if ((tmptaint >> i) & 1)
+ add_taint(i);
+ }
+ }
+
+ return err;
}
struct do_proc_dointvec_minmax_conv_param {
@@ -2727,7 +2706,7 @@ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
*/
/* The generic sysctl data routine (used if no strategy routine supplied) */
-int sysctl_data(struct ctl_table *table, int __user *name, int nlen,
+int sysctl_data(struct ctl_table *table,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
@@ -2761,7 +2740,7 @@ int sysctl_data(struct ctl_table *table, int __user *name, int nlen,
}
/* The generic string strategy routine: */
-int sysctl_string(struct ctl_table *table, int __user *name, int nlen,
+int sysctl_string(struct ctl_table *table,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
@@ -2807,7 +2786,7 @@ int sysctl_string(struct ctl_table *table, int __user *name, int nlen,
* are between the minimum and maximum values given in the arrays
* table->extra1 and table->extra2, respectively.
*/
-int sysctl_intvec(struct ctl_table *table, int __user *name, int nlen,
+int sysctl_intvec(struct ctl_table *table,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
@@ -2843,7 +2822,7 @@ int sysctl_intvec(struct ctl_table *table, int __user *name, int nlen,
}
/* Strategy function to convert jiffies to seconds */
-int sysctl_jiffies(struct ctl_table *table, int __user *name, int nlen,
+int sysctl_jiffies(struct ctl_table *table,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
@@ -2877,7 +2856,7 @@ int sysctl_jiffies(struct ctl_table *table, int __user *name, int nlen,
}
/* Strategy function to convert jiffies to seconds */
-int sysctl_ms_jiffies(struct ctl_table *table, int __user *name, int nlen,
+int sysctl_ms_jiffies(struct ctl_table *table,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
@@ -2932,35 +2911,35 @@ asmlinkage long sys_sysctl(struct __sysctl_args __user *args)
return error;
}
-int sysctl_data(struct ctl_table *table, int __user *name, int nlen,
+int sysctl_data(struct ctl_table *table,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
return -ENOSYS;
}
-int sysctl_string(struct ctl_table *table, int __user *name, int nlen,
+int sysctl_string(struct ctl_table *table,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
return -ENOSYS;
}
-int sysctl_intvec(struct ctl_table *table, int __user *name, int nlen,
+int sysctl_intvec(struct ctl_table *table,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
return -ENOSYS;
}
-int sysctl_jiffies(struct ctl_table *table, int __user *name, int nlen,
+int sysctl_jiffies(struct ctl_table *table,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
return -ENOSYS;
}
-int sysctl_ms_jiffies(struct ctl_table *table, int __user *name, int nlen,
+int sysctl_ms_jiffies(struct ctl_table *table,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 8d53106a0a9..95ed42951e0 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -3,7 +3,6 @@
#
config TICK_ONESHOT
bool
- default n
config NO_HZ
bool "Tickless System (Dynamic Ticks)"
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index cb02324bdb8..b711ffcb106 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -20,6 +20,7 @@
#include <linux/profile.h>
#include <linux/sched.h>
#include <linux/tick.h>
+#include <linux/module.h>
#include <asm/irq_regs.h>
@@ -190,9 +191,17 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
- *last_update_time = ktime_to_us(ts->idle_lastupdate);
+ if (!tick_nohz_enabled)
+ return -1;
+
+ if (ts->idle_active)
+ *last_update_time = ktime_to_us(ts->idle_lastupdate);
+ else
+ *last_update_time = ktime_to_us(ktime_get());
+
return ktime_to_us(ts->idle_sleeptime);
}
+EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
/**
* tick_nohz_stop_sched_tick - stop the idle tick from the idle task
@@ -261,7 +270,7 @@ void tick_nohz_stop_sched_tick(int inidle)
next_jiffies = get_next_timer_interrupt(last_jiffies);
delta_jiffies = next_jiffies - last_jiffies;
- if (rcu_needs_cpu(cpu))
+ if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu))
delta_jiffies = 1;
/*
* Do not stop the tick, if we are only one off
diff --git a/kernel/timer.c b/kernel/timer.c
index 03bc7f1f159..510fe69351c 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -978,6 +978,7 @@ void update_process_times(int user_tick)
run_local_timers();
if (rcu_pending(cpu))
rcu_check_callbacks(cpu, user_tick);
+ printk_tick();
scheduler_tick();
run_posix_cpu_timers(p);
}
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index 4ab9659d269..3b34b354593 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -60,7 +60,7 @@ static int proc_do_uts_string(ctl_table *table, int write, struct file *filp,
#ifdef CONFIG_SYSCTL_SYSCALL
/* The generic string strategy routine: */
-static int sysctl_uts_string(ctl_table *table, int __user *name, int nlen,
+static int sysctl_uts_string(ctl_table *table,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
@@ -69,8 +69,7 @@ static int sysctl_uts_string(ctl_table *table, int __user *name, int nlen,
write = newval && newlen;
memcpy(&uts_table, table, sizeof(uts_table));
uts_table.data = get_uts(table, write);
- r = sysctl_string(&uts_table, name, nlen,
- oldval, oldlenp, newval, newlen);
+ r = sysctl_string(&uts_table, oldval, oldlenp, newval, newlen);
put_uts(table, write, uts_table.data);
return r;
}
diff --git a/kernel/wait.c b/kernel/wait.c
index c275c56cf2d..cd87131f2fc 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -72,12 +72,7 @@ prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
spin_lock_irqsave(&q->lock, flags);
if (list_empty(&wait->task_list))
__add_wait_queue(q, wait);
- /*
- * don't alter the task state if this is just going to
- * queue an async wait queue callback
- */
- if (is_sync_wait(wait))
- set_current_state(state);
+ set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(prepare_to_wait);
@@ -91,12 +86,7 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
spin_lock_irqsave(&q->lock, flags);
if (list_empty(&wait->task_list))
__add_wait_queue_tail(q, wait);
- /*
- * don't alter the task state if this is just going to
- * queue an async wait queue callback
- */
- if (is_sync_wait(wait))
- set_current_state(state);
+ set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(prepare_to_wait_exclusive);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 4048e92aa04..714afad4653 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -9,7 +9,7 @@
* Derived from the taskqueue/keventd code by:
*
* David Woodhouse <dwmw2@infradead.org>
- * Andrew Morton <andrewm@uow.edu.au>
+ * Andrew Morton
* Kai Petzke <wpp@marie.physik.tu-berlin.de>
* Theodore Ts'o <tytso@mit.edu>
*