summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/feature-removal-schedule.txt30
-rw-r--r--Documentation/filesystems/ext3.txt16
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt1
-rw-r--r--MAINTAINERS7
-rw-r--r--Makefile46
-rw-r--r--arch/x86/Kconfig11
-rw-r--r--arch/x86/include/asm/paravirt.h28
-rw-r--r--arch/x86/include/asm/paravirt_types.h10
-rw-r--r--arch/x86/kernel/irq.c2
-rw-r--r--arch/x86/kernel/pci-dma.c2
-rw-r--r--arch/x86/kernel/smp.c1
-rw-r--r--arch/x86/kernel/time.c3
-rw-r--r--arch/x86/kernel/trampoline.c12
-rw-r--r--arch/x86/kernel/trampoline_64.S4
-rw-r--r--arch/x86/kernel/vmi_32.c2
-rw-r--r--block/blk-core.c16
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/blk-settings.c2
-rw-r--r--block/blk-tag.c2
-rw-r--r--block/cfq-iosched.c259
-rw-r--r--block/elevator.c4
-rw-r--r--block/genhd.c4
-rw-r--r--drivers/block/cciss.c79
-rw-r--r--drivers/char/genrtc.c1
-rw-r--r--drivers/char/rtc.c1
-rw-r--r--drivers/char/sonypi.c1
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-twinhan.c4
-rw-r--r--drivers/hid/hidraw.c5
-rw-r--r--drivers/md/dm.c16
-rw-r--r--drivers/mfd/twl4030-core.c89
-rw-r--r--drivers/net/wan/c101.c1
-rw-r--r--drivers/net/wan/n2.c1
-rw-r--r--drivers/net/wan/pci200syn.c1
-rw-r--r--drivers/oprofile/event_buffer.c35
-rw-r--r--drivers/pci/dmar.c13
-rw-r--r--drivers/pci/hotplug/cpqphp.h1
-rw-r--r--drivers/pci/intel-iommu.c82
-rw-r--r--drivers/pci/pci.c13
-rw-r--r--drivers/pci/quirks.c13
-rw-r--r--fs/ext3/super.c13
-rw-r--r--fs/partitions/check.c12
-rw-r--r--include/linux/blkdev.h4
-rw-r--r--include/linux/genhd.h21
-rw-r--r--include/linux/kernel.h6
-rw-r--r--kernel/lockdep.c20
-rw-r--r--kernel/sched.c13
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_events_filter.c3
-rw-r--r--mm/backing-dev.c2
-rw-r--r--mm/page-writeback.c3
-rw-r--r--mm/percpu.c5
-rw-r--r--scripts/Kbuild.include2
-rw-r--r--scripts/Makefile.lib2
-rwxr-xr-xscripts/checkkconfigsymbols.sh4
-rw-r--r--scripts/headers_install.pl2
-rwxr-xr-xscripts/mkcompile_h12
-rw-r--r--scripts/package/Makefile11
-rwxr-xr-xscripts/package/mkspec2
-rw-r--r--sound/arm/aaci.c1
-rw-r--r--sound/pci/bt87x.c2
-rw-r--r--sound/pci/hda/patch_nvhdmi.c31
-rw-r--r--sound/pci/hda/patch_realtek.c2
-rw-r--r--sound/pci/hda/patch_sigmatel.c29
-rw-r--r--sound/pci/ice1712/amp.c8
-rw-r--r--sound/pci/ice1712/ice1724.c2
-rw-r--r--tools/perf/Makefile21
-rw-r--r--tools/perf/builtin-sched.c4
-rw-r--r--tools/perf/util/parse-events.c5
-rw-r--r--tools/perf/util/trace-event-parse.c8
70 files changed, 660 insertions, 414 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 89a47b5aff07..04e6c819b28a 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -451,3 +451,33 @@ Why: OSS sound_core grabs all legacy minors (0-255) of SOUND_MAJOR
will also allow making ALSA OSS emulation independent of
sound_core. The dependency will be broken then too.
Who: Tejun Heo <tj@kernel.org>
+
+----------------------------
+
+What: Support for VMware's guest paravirtuliazation technique [VMI] will be
+ dropped.
+When: 2.6.37 or earlier.
+Why: With the recent innovations in CPU hardware acceleration technologies
+ from Intel and AMD, VMware ran a few experiments to compare these
+ techniques to guest paravirtualization technique on VMware's platform.
+ These hardware assisted virtualization techniques have outperformed the
+ performance benefits provided by VMI in most of the workloads. VMware
+ expects that these hardware features will be ubiquitous in a couple of
+ years, as a result, VMware has started a phased retirement of this
+ feature from the hypervisor. We will be removing this feature from the
+ Kernel too. Right now we are targeting 2.6.37 but can retire earlier if
+ technical reasons (read opportunity to remove major chunk of pvops)
+ arise.
+
+ Please note that VMI has always been an optimization and non-VMI kernels
+ still work fine on VMware's platform.
+ Latest versions of VMware's product which support VMI are,
+ Workstation 7.0 and VSphere 4.0 on ESX side, future maintainence
+ releases for these products will continue supporting VMI.
+
+ For more details about VMI retirement take a look at this,
+ http://blogs.vmware.com/guestosguide/2009/09/vmi-retirement.html
+
+Who: Alok N Kataria <akataria@vmware.com>
+
+----------------------------
diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt
index 570f9bd9be2b..05d5cf1d743f 100644
--- a/Documentation/filesystems/ext3.txt
+++ b/Documentation/filesystems/ext3.txt
@@ -123,10 +123,18 @@ resuid=n The user ID which may use the reserved blocks.
sb=n Use alternate superblock at this location.
-quota
-noquota
-grpquota
-usrquota
+quota These options are ignored by the filesystem. They
+noquota are used only by quota tools to recognize volumes
+grpquota where quota should be turned on. See documentation
+usrquota in the quota-tools package for more details
+ (http://sourceforge.net/projects/linuxquota).
+
+jqfmt=<quota type> These options tell filesystem details about quota
+usrjquota=<file> so that quota information can be properly updated
+grpjquota=<file> during journal replay. They replace the above
+ quota options. See documentation in the quota-tools
+ package for more details
+ (http://sourceforge.net/projects/linuxquota).
bh (*) ext3 associates buffer heads to data pages to
nobh (a) cache disk block mapping information
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index 75fddb40f416..4c7f9aee5c4e 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -359,6 +359,7 @@ STAC9227/9228/9229/927x
5stack-no-fp D965 5stack without front panel
dell-3stack Dell Dimension E520
dell-bios Fixes with Dell BIOS setup
+ volknob Fixes with volume-knob widget 0x24
auto BIOS setup (default)
STAC92HD71B*
diff --git a/MAINTAINERS b/MAINTAINERS
index ff968842ce56..6b057778477d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4076,6 +4076,13 @@ M: Peter Zijlstra <a.p.zijlstra@chello.nl>
M: Paul Mackerras <paulus@samba.org>
M: Ingo Molnar <mingo@elte.hu>
S: Supported
+F: kernel/perf_event.c
+F: include/linux/perf_event.h
+F: arch/*/*/kernel/perf_event.c
+F: arch/*/include/asm/perf_event.h
+F: arch/*/lib/perf_event.c
+F: arch/*/kernel/perf_callchain.c
+F: tools/perf/
PERSONALITY HANDLING
M: Christoph Hellwig <hch@infradead.org>
diff --git a/Makefile b/Makefile
index 927d7a32ed9e..326791575b0a 100644
--- a/Makefile
+++ b/Makefile
@@ -179,46 +179,9 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
# Alternatively CROSS_COMPILE can be set in the environment.
# Default value for CROSS_COMPILE is not to prefix executables
# Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
-#
-# To force ARCH and CROSS_COMPILE settings include kernel.* files
-# in the kernel tree - do not patch this file.
export KBUILD_BUILDHOST := $(SUBARCH)
-
-# Kbuild save the ARCH and CROSS_COMPILE setting in kernel.* files.
-# Restore these settings and check that user did not specify
-# conflicting values.
-
-saved_arch := $(shell cat include/generated/kernel.arch 2> /dev/null)
-saved_cross := $(shell cat include/generated/kernel.cross 2> /dev/null)
-
-ifneq ($(CROSS_COMPILE),)
- ifneq ($(saved_cross),)
- ifneq ($(CROSS_COMPILE),$(saved_cross))
- $(error CROSS_COMPILE changed from \
- "$(saved_cross)" to \
- to "$(CROSS_COMPILE)". \
- Use "make mrproper" to fix it up)
- endif
- endif
-else
- CROSS_COMPILE := $(saved_cross)
-endif
-
-ifneq ($(ARCH),)
- ifneq ($(saved_arch),)
- ifneq ($(saved_arch),$(ARCH))
- $(error ARCH changed from \
- "$(saved_arch)" to "$(ARCH)". \
- Use "make mrproper" to fix it up)
- endif
- endif
-else
- ifneq ($(saved_arch),)
- ARCH := $(saved_arch)
- else
- ARCH := $(SUBARCH)
- endif
-endif
+ARCH ?= $(SUBARCH)
+CROSS_COMPILE ?=
# Architecture as present in compile.h
UTS_MACHINE := $(ARCH)
@@ -483,11 +446,6 @@ ifeq ($(config-targets),1)
include $(srctree)/arch/$(SRCARCH)/Makefile
export KBUILD_DEFCONFIG KBUILD_KCONFIG
-# save ARCH & CROSS_COMPILE settings
-$(shell mkdir -p include/generated && \
- echo $(ARCH) > include/generated/kernel.arch && \
- echo $(CROSS_COMPILE) > include/generated/kernel.cross)
-
config: scripts_basic outputmakefile FORCE
$(Q)mkdir -p include/linux include/config
$(Q)$(MAKE) $(build)=scripts/kconfig $@
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c876bace8fdc..07e01149e3bf 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -491,7 +491,7 @@ if PARAVIRT_GUEST
source "arch/x86/xen/Kconfig"
config VMI
- bool "VMI Guest support"
+ bool "VMI Guest support (DEPRECATED)"
select PARAVIRT
depends on X86_32
---help---
@@ -500,6 +500,15 @@ config VMI
at the moment), by linking the kernel to a GPL-ed ROM module
provided by the hypervisor.
+ As of September 2009, VMware has started a phased retirement
+ of this feature from VMware's products. Please see
+ feature-removal-schedule.txt for details. If you are
+ planning to enable this option, please note that you cannot
+ live migrate a VMI enabled VM to a future VMware product,
+ which doesn't support VMI. So if you expect your kernel to
+ seamlessly migrate to newer VMware products, keep this
+ disabled.
+
config KVM_CLOCK
bool "KVM paravirtualized clock"
select PARAVIRT
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 8aebcc41041d..efb38994859c 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -840,42 +840,22 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
static inline unsigned long __raw_local_save_flags(void)
{
- unsigned long f;
-
- asm volatile(paravirt_alt(PARAVIRT_CALL)
- : "=a"(f)
- : paravirt_type(pv_irq_ops.save_fl),
- paravirt_clobber(CLBR_EAX)
- : "memory", "cc");
- return f;
+ return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
}
static inline void raw_local_irq_restore(unsigned long f)
{
- asm volatile(paravirt_alt(PARAVIRT_CALL)
- : "=a"(f)
- : PV_FLAGS_ARG(f),
- paravirt_type(pv_irq_ops.restore_fl),
- paravirt_clobber(CLBR_EAX)
- : "memory", "cc");
+ PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
}
static inline void raw_local_irq_disable(void)
{
- asm volatile(paravirt_alt(PARAVIRT_CALL)
- :
- : paravirt_type(pv_irq_ops.irq_disable),
- paravirt_clobber(CLBR_EAX)
- : "memory", "eax", "cc");
+ PVOP_VCALLEE0(pv_irq_ops.irq_disable);
}
static inline void raw_local_irq_enable(void)
{
- asm volatile(paravirt_alt(PARAVIRT_CALL)
- :
- : paravirt_type(pv_irq_ops.irq_enable),
- paravirt_clobber(CLBR_EAX)
- : "memory", "eax", "cc");
+ PVOP_VCALLEE0(pv_irq_ops.irq_enable);
}
static inline unsigned long __raw_local_irq_save(void)
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index dd0f5b32489d..9357473c8da0 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -494,10 +494,11 @@ int paravirt_disable_iospace(void);
#define EXTRA_CLOBBERS
#define VEXTRA_CLOBBERS
#else /* CONFIG_X86_64 */
+/* [re]ax isn't an arg, but the return val */
#define PVOP_VCALL_ARGS \
unsigned long __edi = __edi, __esi = __esi, \
- __edx = __edx, __ecx = __ecx
-#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
+ __edx = __edx, __ecx = __ecx, __eax = __eax
+#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
@@ -509,6 +510,7 @@ int paravirt_disable_iospace(void);
"=c" (__ecx)
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
+/* void functions are still allowed [re]ax for scratch */
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
@@ -583,8 +585,8 @@ int paravirt_disable_iospace(void);
VEXTRA_CLOBBERS, \
pre, post, ##__VA_ARGS__)
-#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
- ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
+#define __PVOP_VCALLEESAVE(op, pre, post, ...) \
+ ____PVOP_VCALL(op.func, CLBR_RET_REG, \
PVOP_VCALLEE_CLOBBERS, , \
pre, post, ##__VA_ARGS__)
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 391206199515..74656d1d4e30 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -244,7 +244,6 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
__func__, smp_processor_id(), vector, irq);
}
- run_local_timers();
irq_exit();
set_irq_regs(old_regs);
@@ -269,7 +268,6 @@ void smp_generic_interrupt(struct pt_regs *regs)
if (generic_interrupt_extension)
generic_interrupt_extension();
- run_local_timers();
irq_exit();
set_irq_regs(old_regs);
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index d20009b4e6ef..b2a71dca5642 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -311,7 +311,7 @@ void pci_iommu_shutdown(void)
amd_iommu_shutdown();
}
/* Must execute after PCI subsystem */
-fs_initcall(pci_iommu_init);
+rootfs_initcall(pci_iommu_init);
#ifdef CONFIG_PCI
/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index d915d956e66d..ec1de97600e7 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -198,7 +198,6 @@ void smp_reschedule_interrupt(struct pt_regs *regs)
{
ack_APIC_irq();
inc_irq_stat(irq_resched_count);
- run_local_timers();
/*
* KVM uses this interrupt to force a cpu out of guest mode
*/
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index dcb00d278512..be2573448ed9 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -38,7 +38,8 @@ unsigned long profile_pc(struct pt_regs *regs)
#ifdef CONFIG_FRAME_POINTER
return *(unsigned long *)(regs->bp + sizeof(long));
#else
- unsigned long *sp = (unsigned long *)regs->sp;
+ unsigned long *sp =
+ (unsigned long *)kernel_stack_pointer(regs);
/*
* Return address is either directly at stack pointer
* or above a saved flags. Eflags has bits 22-31 zero,
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index 699f7eeb896a..cd022121cab6 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -3,8 +3,16 @@
#include <asm/trampoline.h>
#include <asm/e820.h>
+#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
+#define __trampinit
+#define __trampinitdata
+#else
+#define __trampinit __cpuinit
+#define __trampinitdata __cpuinitdata
+#endif
+
/* ready for x86_64 and x86 */
-unsigned char *__cpuinitdata trampoline_base = __va(TRAMPOLINE_BASE);
+unsigned char *__trampinitdata trampoline_base = __va(TRAMPOLINE_BASE);
void __init reserve_trampoline_memory(void)
{
@@ -26,7 +34,7 @@ void __init reserve_trampoline_memory(void)
* bootstrap into the page concerned. The caller
* has made sure it's suitably aligned.
*/
-unsigned long __cpuinit setup_trampoline(void)
+unsigned long __trampinit setup_trampoline(void)
{
memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
return virt_to_phys(trampoline_base);
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
index 596d54c660a5..3af2dff58b21 100644
--- a/arch/x86/kernel/trampoline_64.S
+++ b/arch/x86/kernel/trampoline_64.S
@@ -32,8 +32,12 @@
#include <asm/segment.h>
#include <asm/processor-flags.h>
+#ifdef CONFIG_ACPI_SLEEP
+.section .rodata, "a", @progbits
+#else
/* We can free up the trampoline after bootup if cpu hotplug is not supported. */
__CPUINITRODATA
+#endif
.code16
ENTRY(trampoline_data)
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 31e6f6cfe53e..d430e4c30193 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -648,7 +648,7 @@ static inline int __init activate_vmi(void)
pv_info.paravirt_enabled = 1;
pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
- pv_info.name = "vmi";
+ pv_info.name = "vmi [deprecated]";
pv_init_ops.patch = vmi_patch;
diff --git a/block/blk-core.c b/block/blk-core.c
index 81f34311659a..ac0fa10f8fa5 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -70,7 +70,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
part_stat_inc(cpu, part, merges[rw]);
else {
part_round_stats(cpu, part);
- part_inc_in_flight(part);
+ part_inc_in_flight(part, rw);
}
part_stat_unlock();
@@ -1030,9 +1030,9 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
if (now == part->stamp)
return;
- if (part->in_flight) {
+ if (part_in_flight(part)) {
__part_stat_add(cpu, part, time_in_queue,
- part->in_flight * (now - part->stamp));
+ part_in_flight(part) * (now - part->stamp));
__part_stat_add(cpu, part, io_ticks, (now - part->stamp));
}
part->stamp = now;
@@ -1739,7 +1739,7 @@ static void blk_account_io_done(struct request *req)
part_stat_inc(cpu, part, ios[rw]);
part_stat_add(cpu, part, ticks[rw], duration);
part_round_stats(cpu, part);
- part_dec_in_flight(part);
+ part_dec_in_flight(part, rw);
part_stat_unlock();
}
@@ -2492,14 +2492,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
}
EXPORT_SYMBOL(kblockd_schedule_work);
-int kblockd_schedule_delayed_work(struct request_queue *q,
- struct delayed_work *work,
- unsigned long delay)
-{
- return queue_delayed_work(kblockd_workqueue, work, delay);
-}
-EXPORT_SYMBOL(kblockd_schedule_delayed_work);
-
int __init blk_dev_init(void)
{
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
diff --git a/block/blk-merge.c b/block/blk-merge.c
index b0de8574fdc8..99cb5cf1f447 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -351,7 +351,7 @@ static void blk_account_io_merge(struct request *req)
part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
part_round_stats(cpu, part);
- part_dec_in_flight(part);
+ part_dec_in_flight(part, rq_data_dir(req));
part_stat_unlock();
}
diff --git a/block/blk-settings.c b/block/blk-settings.c
index e0695bca7027..66d4aa8799b7 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -242,7 +242,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_sectors);
/**
* blk_queue_max_discard_sectors - set max sectors for a single discard
* @q: the request queue for the device
- * @max_discard: maximum number of sectors to discard
+ * @max_discard_sectors: maximum number of sectors to discard
**/
void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors)
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 2e5cfeb59333..6b0f52c20964 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -359,7 +359,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
max_depth -= 2;
if (!max_depth)
max_depth = 1;
- if (q->in_flight[0] > max_depth)
+ if (q->in_flight[BLK_RW_ASYNC] > max_depth)
return 1;
}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 9c4b679908f4..069a61017c02 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -150,7 +150,7 @@ struct cfq_data {
* idle window management
*/
struct timer_list idle_slice_timer;
- struct delayed_work unplug_work;
+ struct work_struct unplug_work;
struct cfq_queue *active_queue;
struct cfq_io_context *active_cic;
@@ -230,7 +230,7 @@ CFQ_CFQQ_FNS(coop);
blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
static void cfq_dispatch_insert(struct request_queue *, struct request *);
-static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
+static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
struct io_context *, gfp_t);
static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
struct io_context *);
@@ -241,40 +241,35 @@ static inline int rq_in_driver(struct cfq_data *cfqd)
}
static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
- int is_sync)
+ bool is_sync)
{
- return cic->cfqq[!!is_sync];
+ return cic->cfqq[is_sync];
}
static inline void cic_set_cfqq(struct cfq_io_context *cic,
- struct cfq_queue *cfqq, int is_sync)
+ struct cfq_queue *cfqq, bool is_sync)
{
- cic->cfqq[!!is_sync] = cfqq;
+ cic->cfqq[is_sync] = cfqq;
}
/*
* We regard a request as SYNC, if it's either a read or has the SYNC bit
* set (in which case it could also be direct WRITE).
*/
-static inline int cfq_bio_sync(struct bio *bio)
+static inline bool cfq_bio_sync(struct bio *bio)
{
- if (bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO))
- return 1;
-
- return 0;
+ return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
}
/*
* scheduler run of queue, if there are requests pending and no one in the
* driver that will restart queueing
*/
-static inline void cfq_schedule_dispatch(struct cfq_data *cfqd,
- unsigned long delay)
+static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
{
if (cfqd->busy_queues) {
cfq_log(cfqd, "schedule dispatch");
- kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work,
- delay);
+ kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
}
}
@@ -290,7 +285,7 @@ static int cfq_queue_empty(struct request_queue *q)
* if a queue is marked sync and has sync io queued. A sync queue with async
* io only, should not get full sync slice length.
*/
-static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
+static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
unsigned short prio)
{
const int base_slice = cfqd->cfq_slice[sync];
@@ -318,7 +313,7 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
* isn't valid until the first request from the dispatch is activated
* and the slice time set.
*/
-static inline int cfq_slice_used(struct cfq_queue *cfqq)
+static inline bool cfq_slice_used(struct cfq_queue *cfqq)
{
if (cfq_cfqq_slice_new(cfqq))
return 0;
@@ -493,7 +488,7 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
* we will service the queues.
*/
static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- int add_front)
+ bool add_front)
{
struct rb_node **p, *parent;
struct cfq_queue *__cfqq;
@@ -509,11 +504,20 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
} else
rb_key += jiffies;
} else if (!add_front) {
+ /*
+ * Get our rb key offset. Subtract any residual slice
+ * value carried from last service. A negative resid
+ * count indicates slice overrun, and this should position
+ * the next service time further away in the tree.
+ */
rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
- rb_key += cfqq->slice_resid;
+ rb_key -= cfqq->slice_resid;
cfqq->slice_resid = 0;
- } else
- rb_key = 0;
+ } else {
+ rb_key = -HZ;
+ __cfqq = cfq_rb_first(&cfqd->service_tree);
+ rb_key += __cfqq ? __cfqq->rb_key : jiffies;
+ }
if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
/*
@@ -547,7 +551,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
n = &(*p)->rb_left;
else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
n = &(*p)->rb_right;
- else if (rb_key < __cfqq->rb_key)
+ else if (time_before(rb_key, __cfqq->rb_key))
n = &(*p)->rb_left;
else
n = &(*p)->rb_right;
@@ -827,8 +831,10 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
* reposition in fifo if next is older than rq
*/
if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
- time_before(next->start_time, rq->start_time))
+ time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
list_move(&rq->queuelist, &next->queuelist);
+ rq_set_fifo_time(rq, rq_fifo_time(next));
+ }
cfq_remove_request(next);
}
@@ -844,7 +850,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
* Disallow merge of a sync bio into an async request.
*/
if (cfq_bio_sync(bio) && !rq_is_sync(rq))
- return 0;
+ return false;
/*
* Lookup the cfqq that this bio will be queued with. Allow
@@ -852,13 +858,10 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
*/
cic = cfq_cic_lookup(cfqd, current->io_context);
if (!cic)
- return 0;
+ return false;
cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
- if (cfqq == RQ_CFQQ(rq))
- return 1;
-
- return 0;
+ return cfqq == RQ_CFQQ(rq);
}
static void __cfq_set_active_queue(struct cfq_data *cfqd,
@@ -886,7 +889,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
*/
static void
__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- int timed_out)
+ bool timed_out)
{
cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
@@ -914,7 +917,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}
}
-static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
+static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
{
struct cfq_queue *cfqq = cfqd->active_queue;
@@ -1026,7 +1029,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
*/
static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
struct cfq_queue *cur_cfqq,
- int probe)
+ bool probe)
{
struct cfq_queue *cfqq;
@@ -1090,6 +1093,15 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
if (!cic || !atomic_read(&cic->ioc->nr_tasks))
return;
+ /*
+ * If our average think time is larger than the remaining time
+ * slice, then don't idle. This avoids overrunning the allotted
+ * time slice.
+ */
+ if (sample_valid(cic->ttime_samples) &&
+ (cfqq->slice_end - jiffies < cic->ttime_mean))
+ return;
+
cfq_mark_cfqq_wait_request(cfqq);
/*
@@ -1129,9 +1141,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
*/
static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
{
- struct cfq_data *cfqd = cfqq->cfqd;
- struct request *rq;
- int fifo;
+ struct request *rq = NULL;
if (cfq_cfqq_fifo_expire(cfqq))
return NULL;
@@ -1141,13 +1151,11 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
if (list_empty(&cfqq->fifo))
return NULL;
- fifo = cfq_cfqq_sync(cfqq);
rq = rq_entry_fifo(cfqq->fifo.next);
-
- if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
+ if (time_before(jiffies, rq_fifo_time(rq)))
rq = NULL;
- cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq);
+ cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
return rq;
}
@@ -1248,67 +1256,21 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
return dispatched;
}
-/*
- * Dispatch a request from cfqq, moving them to the request queue
- * dispatch list.
- */
-static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
- struct request *rq;
-
- BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
-
- /*
- * follow expired path, else get first next available
- */
- rq = cfq_check_fifo(cfqq);
- if (!rq)
- rq = cfqq->next_rq;
-
- /*
- * insert request into driver dispatch list
- */
- cfq_dispatch_insert(cfqd->queue, rq);
-
- if (!cfqd->active_cic) {
- struct cfq_io_context *cic = RQ_CIC(rq);
-
- atomic_long_inc(&cic->ioc->refcount);
- cfqd->active_cic = cic;
- }
-}
-
-/*
- * Find the cfqq that we need to service and move a request from that to the
- * dispatch list
- */
-static int cfq_dispatch_requests(struct request_queue *q, int force)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
- struct cfq_queue *cfqq;
unsigned int max_dispatch;
- if (!cfqd->busy_queues)
- return 0;
-
- if (unlikely(force))
- return cfq_forced_dispatch(cfqd);
-
- cfqq = cfq_select_queue(cfqd);
- if (!cfqq)
- return 0;
-
/*
* Drain async requests before we start sync IO
*/
if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
- return 0;
+ return false;
/*
* If this is an async queue and we have sync IO in flight, let it wait
*/
if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
- return 0;
+ return false;
max_dispatch = cfqd->cfq_quantum;
if (cfq_class_idle(cfqq))
@@ -1322,13 +1284,13 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
* idle queue must always only have a single IO in flight
*/
if (cfq_class_idle(cfqq))
- return 0;
+ return false;
/*
* We have other queues, don't allow more IO from this one
*/
if (cfqd->busy_queues > 1)
- return 0;
+ return false;
/*
* Sole queue user, allow bigger slice
@@ -1352,13 +1314,72 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
max_dispatch = depth;
}
- if (cfqq->dispatched >= max_dispatch)
+ /*
+ * If we're below the current max, allow a dispatch
+ */
+ return cfqq->dispatched < max_dispatch;
+}
+
+/*
+ * Dispatch a request from cfqq, moving them to the request queue
+ * dispatch list.
+ */
+static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ struct request *rq;
+
+ BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
+
+ if (!cfq_may_dispatch(cfqd, cfqq))
+ return false;
+
+ /*
+ * follow expired path, else get first next available
+ */
+ rq = cfq_check_fifo(cfqq);
+ if (!rq)
+ rq = cfqq->next_rq;
+
+ /*
+ * insert request into driver dispatch list
+ */
+ cfq_dispatch_insert(cfqd->queue, rq);
+
+ if (!cfqd->active_cic) {
+ struct cfq_io_context *cic = RQ_CIC(rq);
+
+ atomic_long_inc(&cic->ioc->refcount);
+ cfqd->active_cic = cic;
+ }
+
+ return true;
+}
+
+/*
+ * Find the cfqq that we need to service and move a request from that to the
+ * dispatch list
+ */
+static int cfq_dispatch_requests(struct request_queue *q, int force)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+ struct cfq_queue *cfqq;
+
+ if (!cfqd->busy_queues)
+ return 0;
+
+ if (unlikely(force))
+ return cfq_forced_dispatch(cfqd);
+
+ cfqq = cfq_select_queue(cfqd);
+ if (!cfqq)
return 0;
/*
- * Dispatch a request from this cfqq
+ * Dispatch a request from this cfqq, if it is allowed
*/
- cfq_dispatch_request(cfqd, cfqq);
+ if (!cfq_dispatch_request(cfqd, cfqq))
+ return 0;
+
cfqq->slice_dispatch++;
cfq_clear_cfqq_must_dispatch(cfqq);
@@ -1399,7 +1420,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
if (unlikely(cfqd->active_queue == cfqq)) {
__cfq_slice_expired(cfqd, cfqq, 0);
- cfq_schedule_dispatch(cfqd, 0);
+ cfq_schedule_dispatch(cfqd);
}
kmem_cache_free(cfq_pool, cfqq);
@@ -1494,7 +1515,7 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
if (unlikely(cfqq == cfqd->active_queue)) {
__cfq_slice_expired(cfqd, cfqq, 0);
- cfq_schedule_dispatch(cfqd, 0);
+ cfq_schedule_dispatch(cfqd);
}
cfq_put_queue(cfqq);
@@ -1658,7 +1679,7 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
}
static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- pid_t pid, int is_sync)
+ pid_t pid, bool is_sync)
{
RB_CLEAR_NODE(&cfqq->rb_node);
RB_CLEAR_NODE(&cfqq->p_node);
@@ -1678,7 +1699,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}
static struct cfq_queue *
-cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
+cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
struct io_context *ioc, gfp_t gfp_mask)
{
struct cfq_queue *cfqq, *new_cfqq = NULL;
@@ -1742,7 +1763,7 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
}
static struct cfq_queue *
-cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
+cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
gfp_t gfp_mask)
{
const int ioprio = task_ioprio(ioc);
@@ -1977,7 +1998,10 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
(!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic)))
enable_idle = 0;
else if (sample_valid(cic->ttime_samples)) {
- if (cic->ttime_mean > cfqd->cfq_slice_idle)
+ unsigned int slice_idle = cfqd->cfq_slice_idle;
+ if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
+ slice_idle = msecs_to_jiffies(CFQ_MIN_TT);
+ if (cic->ttime_mean > slice_idle)
enable_idle = 0;
else
enable_idle = 1;
@@ -1996,7 +2020,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* Check if new_cfqq should preempt the currently active queue. Return 0 for
* no or if we aren't sure, a 1 will cause a preempt.
*/
-static int
+static bool
cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
struct request *rq)
{
@@ -2004,48 +2028,48 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
cfqq = cfqd->active_queue;
if (!cfqq)
- return 0;
+ return false;
if (cfq_slice_used(cfqq))
- return 1;
+ return true;
if (cfq_class_idle(new_cfqq))
- return 0;
+ return false;
if (cfq_class_idle(cfqq))
- return 1;
+ return true;
/*
* if the new request is sync, but the currently running queue is
* not, let the sync request have priority.
*/
if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
- return 1;
+ return true;
/*
* So both queues are sync. Let the new request get disk time if
* it's a metadata request and the current queue is doing regular IO.
*/
if (rq_is_meta(rq) && !cfqq->meta_pending)
- return 1;
+ return false;
/*
* Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
*/
if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
- return 1;
+ return true;
if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
- return 0;
+ return false;
/*
* if this request is as-good as one we would expect from the
* current cfqq, let it preempt
*/
if (cfq_rq_close(cfqd, rq))
- return 1;
+ return true;
- return 0;
+ return false;
}
/*
@@ -2130,6 +2154,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
cfq_add_rq_rb(rq);
+ rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_rq_enqueued(cfqd, cfqq, rq);
@@ -2211,7 +2236,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
}
if (!rq_in_driver(cfqd))
- cfq_schedule_dispatch(cfqd, 0);
+ cfq_schedule_dispatch(cfqd);
}
/*
@@ -2309,7 +2334,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_io_context *cic;
const int rw = rq_data_dir(rq);
- const int is_sync = rq_is_sync(rq);
+ const bool is_sync = rq_is_sync(rq);
struct cfq_queue *cfqq;
unsigned long flags;
@@ -2341,7 +2366,7 @@ queue_fail:
if (cic)
put_io_context(cic->ioc);
- cfq_schedule_dispatch(cfqd, 0);
+ cfq_schedule_dispatch(cfqd);
spin_unlock_irqrestore(q->queue_lock, flags);
cfq_log(cfqd, "set_request fail");
return 1;
@@ -2350,7 +2375,7 @@ queue_fail:
static void cfq_kick_queue(struct work_struct *work)
{
struct cfq_data *cfqd =
- container_of(work, struct cfq_data, unplug_work.work);
+ container_of(work, struct cfq_data, unplug_work);
struct request_queue *q = cfqd->queue;
spin_lock_irq(q->queue_lock);
@@ -2404,7 +2429,7 @@ static void cfq_idle_slice_timer(unsigned long data)
expire:
cfq_slice_expired(cfqd, timed_out);
out_kick:
- cfq_schedule_dispatch(cfqd, 0);
+ cfq_schedule_dispatch(cfqd);
out_cont:
spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
}
@@ -2412,7 +2437,7 @@ out_cont:
static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
{
del_timer_sync(&cfqd->idle_slice_timer);
- cancel_delayed_work_sync(&cfqd->unplug_work);
+ cancel_work_sync(&cfqd->unplug_work);
}
static void cfq_put_async_queues(struct cfq_data *cfqd)
@@ -2494,7 +2519,7 @@ static void *cfq_init_queue(struct request_queue *q)
cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
cfqd->idle_slice_timer.data = (unsigned long) cfqd;
- INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue);
+ INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
cfqd->cfq_quantum = cfq_quantum;
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
diff --git a/block/elevator.c b/block/elevator.c
index 1975b619c86d..a847046c6e53 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -1059,9 +1059,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
return count;
strlcpy(elevator_name, name, sizeof(elevator_name));
- strstrip(elevator_name);
-
- e = elevator_get(elevator_name);
+ e = elevator_get(strstrip(elevator_name));
if (!e) {
printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
return -EINVAL;
diff --git a/block/genhd.c b/block/genhd.c
index 5a0861da324d..517e4332cb37 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -869,6 +869,7 @@ static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
+static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
#ifdef CONFIG_FAIL_MAKE_REQUEST
static struct device_attribute dev_attr_fail =
__ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
@@ -888,6 +889,7 @@ static struct attribute *disk_attrs[] = {
&dev_attr_alignment_offset.attr,
&dev_attr_capability.attr,
&dev_attr_stat.attr,
+ &dev_attr_inflight.attr,
#ifdef CONFIG_FAIL_MAKE_REQUEST
&dev_attr_fail.attr,
#endif
@@ -1053,7 +1055,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
part_stat_read(hd, merges[1]),
(unsigned long long)part_stat_read(hd, sectors[1]),
jiffies_to_msecs(part_stat_read(hd, ticks[1])),
- hd->in_flight,
+ part_in_flight(hd),
jiffies_to_msecs(part_stat_read(hd, io_ticks)),
jiffies_to_msecs(part_stat_read(hd, time_in_queue))
);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index fb5be2d95d52..6399e5090df4 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -68,6 +68,12 @@ MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
MODULE_VERSION("3.6.20");
MODULE_LICENSE("GPL");
+static int cciss_allow_hpsa;
+module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(cciss_allow_hpsa,
+ "Prevent cciss driver from accessing hardware known to be "
+ " supported by the hpsa driver");
+
#include "cciss_cmd.h"
#include "cciss.h"
#include <linux/cciss_ioctl.h>
@@ -101,8 +107,6 @@ static const struct pci_device_id cciss_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
- {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
{0,}
};
@@ -123,8 +127,6 @@ static struct board_type products[] = {
{0x409D0E11, "Smart Array 6400 EM", &SA5_access},
{0x40910E11, "Smart Array 6i", &SA5_access},
{0x3225103C, "Smart Array P600", &SA5_access},
- {0x3223103C, "Smart Array P800", &SA5_access},
- {0x3234103C, "Smart Array P400", &SA5_access},
{0x3235103C, "Smart Array P400i", &SA5_access},
{0x3211103C, "Smart Array E200i", &SA5_access},
{0x3212103C, "Smart Array E200", &SA5_access},
@@ -132,6 +134,10 @@ static struct board_type products[] = {
{0x3214103C, "Smart Array E200i", &SA5_access},
{0x3215103C, "Smart Array E200i", &SA5_access},
{0x3237103C, "Smart Array E500", &SA5_access},
+/* controllers below this line are also supported by the hpsa driver. */
+#define HPSA_BOUNDARY 0x3223103C
+ {0x3223103C, "Smart Array P800", &SA5_access},
+ {0x3234103C, "Smart Array P400", &SA5_access},
{0x323D103C, "Smart Array P700m", &SA5_access},
{0x3241103C, "Smart Array P212", &SA5_access},
{0x3243103C, "Smart Array P410", &SA5_access},
@@ -140,7 +146,6 @@ static struct board_type products[] = {
{0x3249103C, "Smart Array P812", &SA5_access},
{0x324A103C, "Smart Array P712m", &SA5_access},
{0x324B103C, "Smart Array P711m", &SA5_access},
- {0xFFFF103C, "Unknown Smart Array", &SA5_access},
};
/* How long to wait (in milliseconds) for board to go into simple mode */
@@ -3754,7 +3759,27 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
__u64 cfg_offset;
__u32 cfg_base_addr;
__u64 cfg_base_addr_index;
- int i, err;
+ int i, prod_index, err;
+
+ subsystem_vendor_id = pdev->subsystem_vendor;
+ subsystem_device_id = pdev->subsystem_device;
+ board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
+ subsystem_vendor_id);
+
+ for (i = 0; i < ARRAY_SIZE(products); i++) {
+ /* Stand aside for hpsa driver on request */
+ if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY)
+ return -ENODEV;
+ if (board_id == products[i].board_id)
+ break;
+ }
+ prod_index = i;
+ if (prod_index == ARRAY_SIZE(products)) {
+ dev_warn(&pdev->dev,
+ "unrecognized board ID: 0x%08lx, ignoring.\n",
+ (unsigned long) board_id);
+ return -ENODEV;
+ }
/* check to see if controller has been disabled */
/* BEFORE trying to enable it */
@@ -3778,11 +3803,6 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
return err;
}
- subsystem_vendor_id = pdev->subsystem_vendor;
- subsystem_device_id = pdev->subsystem_device;
- board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
- subsystem_vendor_id);
-
#ifdef CCISS_DEBUG
printk("command = %x\n", command);
printk("irq = %x\n", pdev->irq);
@@ -3868,14 +3888,9 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
* leave a little room for ioctl calls.
*/
c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
- for (i = 0; i < ARRAY_SIZE(products); i++) {
- if (board_id == products[i].board_id) {
- c->product_name = products[i].product_name;
- c->access = *(products[i].access);
- c->nr_cmds = c->max_commands - 4;
- break;
- }
- }
+ c->product_name = products[prod_index].product_name;
+ c->access = *(products[prod_index].access);
+ c->nr_cmds = c->max_commands - 4;
if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
(readb(&c->cfgtable->Signature[1]) != 'I') ||
(readb(&c->cfgtable->Signature[2]) != 'S') ||
@@ -3884,27 +3899,6 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
err = -ENODEV;
goto err_out_free_res;
}
- /* We didn't find the controller in our list. We know the
- * signature is valid. If it's an HP device let's try to
- * bind to the device and fire it up. Otherwise we bail.
- */
- if (i == ARRAY_SIZE(products)) {
- if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
- c->product_name = products[i-1].product_name;
- c->access = *(products[i-1].access);
- c->nr_cmds = c->max_commands - 4;
- printk(KERN_WARNING "cciss: This is an unknown "
- "Smart Array controller.\n"
- "cciss: Please update to the latest driver "
- "available from www.hp.com.\n");
- } else {
- printk(KERN_WARNING "cciss: Sorry, I don't know how"
- " to access the Smart Array controller %08lx\n"
- , (unsigned long)board_id);
- err = -ENODEV;
- goto err_out_free_res;
- }
- }
#ifdef CONFIG_X86
{
/* Need to enable prefetch in the SCSI core for 6400 in x86 */
@@ -4254,7 +4248,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
mutex_init(&hba[i]->busy_shutting_down);
if (cciss_pci_init(hba[i], pdev) != 0)
- goto clean0;
+ goto clean_no_release_regions;
sprintf(hba[i]->devname, "cciss%d", i);
hba[i]->ctlr = i;
@@ -4391,13 +4385,14 @@ clean2:
clean1:
cciss_destroy_hba_sysfs_entry(hba[i]);
clean0:
+ pci_release_regions(pdev);
+clean_no_release_regions:
hba[i]->busy_initializing = 0;
/*
* Deliberately omit pci_disable_device(): it does something nasty to
* Smart Array controllers that pci_enable_device does not undo
*/
- pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
free_hba(i);
return -1;
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index aac0985a572b..31e7c91c2d9d 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -43,6 +43,7 @@
#define RTC_VERSION "1.07"
#include <linux/module.h>
+#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/miscdevice.h>
#include <linux/fcntl.h>
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index e0d0f8b2696b..bc4ab3e54550 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -74,6 +74,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
+#include <linux/sched.h>
#include <linux/sysctl.h>
#include <linux/wait.h>
#include <linux/bcd.h>
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index fd3dced97776..8c262aaf7c26 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -36,6 +36,7 @@
*/
#include <linux/module.h>
+#include <linux/sched.h>
#include <linux/input.h>
#include <linux/pci.h>
#include <linux/init.h>
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index be34d32906bd..7d05c4bb201e 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1066,7 +1066,7 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event);
* @type: HID report type (HID_*_REPORT)
* @data: report contents
* @size: size of data parameter
- * @interrupt: called from atomic?
+ * @interrupt: distinguish between interrupt and control transfers
*
* This is data entry for lower layers.
*/
diff --git a/drivers/hid/hid-twinhan.c b/drivers/hid/hid-twinhan.c
index b05f602c051e..c40afc57fc8f 100644
--- a/drivers/hid/hid-twinhan.c
+++ b/drivers/hid/hid-twinhan.c
@@ -132,12 +132,12 @@ static struct hid_driver twinhan_driver = {
.input_mapping = twinhan_input_mapping,
};
-static int twinhan_init(void)
+static int __init twinhan_init(void)
{
return hid_register_driver(&twinhan_driver);
}
-static void twinhan_exit(void)
+static void __exit twinhan_exit(void)
{
hid_unregister_driver(&twinhan_driver);
}
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index ba05275e5104..cdd136942bca 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -48,10 +48,9 @@ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count,
char *report;
DECLARE_WAITQUEUE(wait, current);
- while (ret == 0) {
-
- mutex_lock(&list->read_mutex);
+ mutex_lock(&list->read_mutex);
+ while (ret == 0) {
if (list->head == list->tail) {
add_wait_queue(&list->hidraw->wait, &wait);
set_current_state(TASK_INTERRUPTIBLE);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 23e76fe0d359..376f1ab48a24 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -130,7 +130,7 @@ struct mapped_device {
/*
* A list of ios that arrived while we were suspended.
*/
- atomic_t pending;
+ atomic_t pending[2];
wait_queue_head_t wait;
struct work_struct work;
struct bio_list deferred;
@@ -453,13 +453,14 @@ static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
int cpu;
+ int rw = bio_data_dir(io->bio);
io->start_time = jiffies;
cpu = part_stat_lock();
part_round_stats(cpu, &dm_disk(md)->part0);
part_stat_unlock();
- dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
+ dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
}
static void end_io_acct(struct dm_io *io)
@@ -479,8 +480,9 @@ static void end_io_acct(struct dm_io *io)
* After this is decremented the bio must not be touched if it is
* a barrier.
*/
- dm_disk(md)->part0.in_flight = pending =
- atomic_dec_return(&md->pending);
+ dm_disk(md)->part0.in_flight[rw] = pending =
+ atomic_dec_return(&md->pending[rw]);
+ pending += atomic_read(&md->pending[rw^0x1]);
/* nudge anyone waiting on suspend queue */
if (!pending)
@@ -1785,7 +1787,8 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->disk)
goto bad_disk;
- atomic_set(&md->pending, 0);
+ atomic_set(&md->pending[0], 0);
+ atomic_set(&md->pending[1], 0);
init_waitqueue_head(&md->wait);
INIT_WORK(&md->work, dm_wq_work);
init_waitqueue_head(&md->eventq);
@@ -2088,7 +2091,8 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
break;
}
spin_unlock_irqrestore(q->queue_lock, flags);
- } else if (!atomic_read(&md->pending))
+ } else if (!atomic_read(&md->pending[0]) &&
+ !atomic_read(&md->pending[1]))
break;
if (interruptible == TASK_INTERRUPTIBLE &&
diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl4030-core.c
index e424cf6d8e9e..e832e975da60 100644
--- a/drivers/mfd/twl4030-core.c
+++ b/drivers/mfd/twl4030-core.c
@@ -480,7 +480,6 @@ static int
add_children(struct twl4030_platform_data *pdata, unsigned long features)
{
struct device *child;
- struct device *usb_transceiver = NULL;
if (twl_has_bci() && pdata->bci && !(features & TPS_SUBSET)) {
child = add_child(3, "twl4030_bci",
@@ -532,16 +531,61 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
}
if (twl_has_usb() && pdata->usb) {
+
+ static struct regulator_consumer_supply usb1v5 = {
+ .supply = "usb1v5",
+ };
+ static struct regulator_consumer_supply usb1v8 = {
+ .supply = "usb1v8",
+ };
+ static struct regulator_consumer_supply usb3v1 = {
+ .supply = "usb3v1",
+ };
+
+ /* First add the regulators so that they can be used by transceiver */
+ if (twl_has_regulator()) {
+ /* this is a template that gets copied */
+ struct regulator_init_data usb_fixed = {
+ .constraints.valid_modes_mask =
+ REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .constraints.valid_ops_mask =
+ REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ };
+
+ child = add_regulator_linked(TWL4030_REG_VUSB1V5,
+ &usb_fixed, &usb1v5, 1);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator_linked(TWL4030_REG_VUSB1V8,
+ &usb_fixed, &usb1v8, 1);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator_linked(TWL4030_REG_VUSB3V1,
+ &usb_fixed, &usb3v1, 1);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ }
+
child = add_child(0, "twl4030_usb",
pdata->usb, sizeof(*pdata->usb),
true,
/* irq0 = USB_PRES, irq1 = USB */
pdata->irq_base + 8 + 2, pdata->irq_base + 4);
+
if (IS_ERR(child))
return PTR_ERR(child);
/* we need to connect regulators to this transceiver */
- usb_transceiver = child;
+ if (twl_has_regulator() && child) {
+ usb1v5.dev = child;
+ usb1v8.dev = child;
+ usb3v1.dev = child;
+ }
}
if (twl_has_watchdog()) {
@@ -580,47 +624,6 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
return PTR_ERR(child);
}
- if (twl_has_regulator() && usb_transceiver) {
- static struct regulator_consumer_supply usb1v5 = {
- .supply = "usb1v5",
- };
- static struct regulator_consumer_supply usb1v8 = {
- .supply = "usb1v8",
- };
- static struct regulator_consumer_supply usb3v1 = {
- .supply = "usb3v1",
- };
-
- /* this is a template that gets copied */
- struct regulator_init_data usb_fixed = {
- .constraints.valid_modes_mask =
- REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .constraints.valid_ops_mask =
- REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- };
-
- usb1v5.dev = usb_transceiver;
- usb1v8.dev = usb_transceiver;
- usb3v1.dev = usb_transceiver;
-
- child = add_regulator_linked(TWL4030_REG_VUSB1V5, &usb_fixed,
- &usb1v5, 1);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator_linked(TWL4030_REG_VUSB1V8, &usb_fixed,
- &usb1v8, 1);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator_linked(TWL4030_REG_VUSB3V1, &usb_fixed,
- &usb3v1, 1);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
/* maybe add LDOs that are omitted on cost-reduced parts */
if (twl_has_regulator() && !(features & TPS_SUBSET)) {
child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2);
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 9693b0fd323d..0bd898c94759 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/string.h>
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 83da596e2052..58c66819f39b 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/fcntl.h>
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index a52f29c72c33..f1340faaf022 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/fcntl.h>
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
index 2b7ae366ceb1..5df60a6b6776 100644
--- a/drivers/oprofile/event_buffer.c
+++ b/drivers/oprofile/event_buffer.c
@@ -35,12 +35,23 @@ static size_t buffer_pos;
/* atomic_t because wait_event checks it outside of buffer_mutex */
static atomic_t buffer_ready = ATOMIC_INIT(0);
-/* Add an entry to the event buffer. When we
- * get near to the end we wake up the process
- * sleeping on the read() of the file.
+/*
+ * Add an entry to the event buffer. When we get near to the end we
+ * wake up the process sleeping on the read() of the file. To protect
+ * the event_buffer this function may only be called when buffer_mutex
+ * is set.
*/
void add_event_entry(unsigned long value)
{
+ /*
+ * This shouldn't happen since all workqueues or handlers are
+ * canceled or flushed before the event buffer is freed.
+ */
+ if (!event_buffer) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
if (buffer_pos == buffer_size) {
atomic_inc(&oprofile_stats.event_lost_overflow);
return;
@@ -69,7 +80,6 @@ void wake_up_buffer_waiter(void)
int alloc_event_buffer(void)
{
- int err = -ENOMEM;
unsigned long flags;
spin_lock_irqsave(&oprofilefs_lock, flags);
@@ -80,21 +90,22 @@ int alloc_event_buffer(void)
if (buffer_watershed >= buffer_size)
return -EINVAL;
+ buffer_pos = 0;
event_buffer = vmalloc(sizeof(unsigned long) * buffer_size);
if (!event_buffer)
- goto out;
+ return -ENOMEM;
- err = 0;
-out:
- return err;
+ return 0;
}
void free_event_buffer(void)
{
+ mutex_lock(&buffer_mutex);
vfree(event_buffer);
-
+ buffer_pos = 0;
event_buffer = NULL;
+ mutex_unlock(&buffer_mutex);
}
@@ -167,6 +178,12 @@ static ssize_t event_buffer_read(struct file *file, char __user *buf,
mutex_lock(&buffer_mutex);
+ /* May happen if the buffer is freed during pending reads. */
+ if (!event_buffer) {
+ retval = -EINTR;
+ goto out;
+ }
+
atomic_set(&buffer_ready, 0);
retval = -EFAULT;
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 14bbaa17e2ca..22b02c6df854 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -354,6 +354,7 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
struct acpi_dmar_hardware_unit *drhd;
struct acpi_dmar_reserved_memory *rmrr;
struct acpi_dmar_atsr *atsr;
+ struct acpi_dmar_rhsa *rhsa;
switch (header->type) {
case ACPI_DMAR_TYPE_HARDWARE_UNIT:
@@ -375,6 +376,12 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
atsr = container_of(header, struct acpi_dmar_atsr, header);
printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
break;
+ case ACPI_DMAR_HARDWARE_AFFINITY:
+ rhsa = container_of(header, struct acpi_dmar_rhsa, header);
+ printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
+ (unsigned long long)rhsa->base_address,
+ rhsa->proximity_domain);
+ break;
}
}
@@ -459,9 +466,13 @@ parse_dmar_table(void)
ret = dmar_parse_one_atsr(entry_header);
#endif
break;
+ case ACPI_DMAR_HARDWARE_AFFINITY:
+ /* We don't do anything with RHSA (yet?) */
+ break;
default:
printk(KERN_WARNING PREFIX
- "Unknown DMAR structure type\n");
+ "Unknown DMAR structure type %d\n",
+ entry_header->type);
ret = 0; /* for forward compatibility */
break;
}
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index 53836001d511..9c6a9fd26812 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -32,6 +32,7 @@
#include <asm/io.h> /* for read? and write? functions */
#include <linux/delay.h> /* for delays */
#include <linux/mutex.h>
+#include <linux/sched.h> /* for signal_pending() */
#define MY_NAME "cpqphp"
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 855dd7ca47f3..b1e97e682500 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -48,6 +48,7 @@
#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
+#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
#define IOAPIC_RANGE_START (0xfee00000)
#define IOAPIC_RANGE_END (0xfeefffff)
@@ -94,6 +95,7 @@ static inline unsigned long virt_to_dma_pfn(void *p)
/* global iommu list, set NULL for ignored DMAR units */
static struct intel_iommu **g_iommus;
+static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;
/*
@@ -1934,6 +1936,9 @@ error:
}
static int iommu_identity_mapping;
+#define IDENTMAP_ALL 1
+#define IDENTMAP_GFX 2
+#define IDENTMAP_AZALIA 4
static int iommu_domain_identity_map(struct dmar_domain *domain,
unsigned long long start,
@@ -2151,8 +2156,14 @@ static int domain_add_dev_info(struct dmar_domain *domain,
static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
{
- if (iommu_identity_mapping == 2)
- return IS_GFX_DEVICE(pdev);
+ if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
+ return 1;
+
+ if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
+ return 1;
+
+ if (!(iommu_identity_mapping & IDENTMAP_ALL))
+ return 0;
/*
* We want to start off with all devices in the 1:1 domain, and
@@ -2332,11 +2343,14 @@ int __init init_dmars(void)
}
if (iommu_pass_through)
- iommu_identity_mapping = 1;
+ iommu_identity_mapping |= IDENTMAP_ALL;
+
#ifdef CONFIG_DMAR_BROKEN_GFX_WA
- else
- iommu_identity_mapping = 2;
+ iommu_identity_mapping |= IDENTMAP_GFX;
#endif
+
+ check_tylersburg_isoch();
+
/*
* If pass through is not set or not enabled, setup context entries for
* identity mappings for rmrr, gfx, and isa and may fall back to static
@@ -3670,3 +3684,61 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
+
+/* On Tylersburg chipsets, some BIOSes have been known to enable the
+ ISOCH DMAR unit for the Azalia sound device, but not give it any
+ TLB entries, which causes it to deadlock. Check for that. We do
+ this in a function called from init_dmars(), instead of in a PCI
+ quirk, because we don't want to print the obnoxious "BIOS broken"
+ message if VT-d is actually disabled.
+*/
+static void __init check_tylersburg_isoch(void)
+{
+ struct pci_dev *pdev;
+ uint32_t vtisochctrl;
+
+ /* If there's no Azalia in the system anyway, forget it. */
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
+ if (!pdev)
+ return;
+ pci_dev_put(pdev);
+
+ /* System Management Registers. Might be hidden, in which case
+ we can't do the sanity check. But that's OK, because the
+ known-broken BIOSes _don't_ actually hide it, so far. */
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
+ if (!pdev)
+ return;
+
+ if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
+ pci_dev_put(pdev);
+ return;
+ }
+
+ pci_dev_put(pdev);
+
+ /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
+ if (vtisochctrl & 1)
+ return;
+
+ /* Drop all bits other than the number of TLB entries */
+ vtisochctrl &= 0x1c;
+
+ /* If we have the recommended number of TLB entries (16), fine. */
+ if (vtisochctrl == 0x10)
+ return;
+
+ /* Zero TLB entries? You get to ride the short bus to school. */
+ if (!vtisochctrl) {
+ WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
+ iommu_identity_mapping |= IDENTMAP_AZALIA;
+ return;
+ }
+
+ printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
+ vtisochctrl);
+}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 3835871f4832..4e4c295a049f 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2723,17 +2723,6 @@ int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
return 1;
}
-static int __devinit pci_init(void)
-{
- struct pci_dev *dev = NULL;
-
- while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
- pci_fixup_device(pci_fixup_final, dev);
- }
-
- return 0;
-}
-
static int __init pci_setup(char *str)
{
while (str) {
@@ -2771,8 +2760,6 @@ static int __init pci_setup(char *str)
}
early_param("pci", pci_setup);
-device_initcall(pci_init);
-
EXPORT_SYMBOL(pci_reenable_device);
EXPORT_SYMBOL(pci_enable_device_io);
EXPORT_SYMBOL(pci_enable_device_mem);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index efa6534a6593..a790b1771f9f 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2591,6 +2591,19 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
}
pci_do_fixups(dev, start, end);
}
+
+static int __init pci_apply_final_quirks(void)
+{
+ struct pci_dev *dev = NULL;
+
+ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ pci_fixup_device(pci_fixup_final, dev);
+ }
+
+ return 0;
+}
+
+fs_initcall_sync(pci_apply_final_quirks);
#else
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {}
#endif
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 72743d360509..7a520a862f49 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -2321,7 +2321,18 @@ static int ext3_commit_super(struct super_block *sb,
if (!sbh)
return error;
- es->s_wtime = cpu_to_le32(get_seconds());
+ /*
+ * If the file system is mounted read-only, don't update the
+ * superblock write time. This avoids updating the superblock
+ * write time when we are mounting the root file system
+ * read/only but we need to replay the journal; at that point,
+ * for people who are east of GMT and who make their clock
+ * tick in localtime for Windows bug-for-bug compatibility,
+ * the clock is set in the future, and this will cause e2fsck
+ * to complain and force a full file system check.
+ */
+ if (!(sb->s_flags & MS_RDONLY))
+ es->s_wtime = cpu_to_le32(get_seconds());
es->s_free_blocks_count = cpu_to_le32(ext3_count_free_blocks(sb));
es->s_free_inodes_count = cpu_to_le32(ext3_count_free_inodes(sb));
BUFFER_TRACE(sbh, "marking dirty");
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index f38fee0311a7..7b685e10cbad 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -248,11 +248,19 @@ ssize_t part_stat_show(struct device *dev,
part_stat_read(p, merges[WRITE]),
(unsigned long long)part_stat_read(p, sectors[WRITE]),
jiffies_to_msecs(part_stat_read(p, ticks[WRITE])),
- p->in_flight,
+ part_in_flight(p),
jiffies_to_msecs(part_stat_read(p, io_ticks)),
jiffies_to_msecs(part_stat_read(p, time_in_queue)));
}
+ssize_t part_inflight_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hd_struct *p = dev_to_part(dev);
+
+ return sprintf(buf, "%8u %8u\n", p->in_flight[0], p->in_flight[1]);
+}
+
#ifdef CONFIG_FAIL_MAKE_REQUEST
ssize_t part_fail_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -281,6 +289,7 @@ static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL);
static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL);
static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
+static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
#ifdef CONFIG_FAIL_MAKE_REQUEST
static struct device_attribute dev_attr_fail =
__ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
@@ -292,6 +301,7 @@ static struct attribute *part_attrs[] = {
&dev_attr_size.attr,
&dev_attr_alignment_offset.attr,
&dev_attr_stat.attr,
+ &dev_attr_inflight.attr,
#ifdef CONFIG_FAIL_MAKE_REQUEST
&dev_attr_fail.attr,
#endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 25119041e034..221cecd86bd3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1172,11 +1172,7 @@ static inline void put_dev_sector(Sector p)
}
struct work_struct;
-struct delayed_work;
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
-int kblockd_schedule_delayed_work(struct request_queue *q,
- struct delayed_work *work,
- unsigned long delay);
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 7beaa21b3880..297df45ffd0a 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -98,7 +98,7 @@ struct hd_struct {
int make_it_fail;
#endif
unsigned long stamp;
- int in_flight;
+ int in_flight[2];
#ifdef CONFIG_SMP
struct disk_stats *dkstats;
#else
@@ -322,18 +322,23 @@ static inline void free_part_stats(struct hd_struct *part)
#define part_stat_sub(cpu, gendiskp, field, subnd) \
part_stat_add(cpu, gendiskp, field, -subnd)
-static inline void part_inc_in_flight(struct hd_struct *part)
+static inline void part_inc_in_flight(struct hd_struct *part, int rw)
{
- part->in_flight++;
+ part->in_flight[rw]++;
if (part->partno)
- part_to_disk(part)->part0.in_flight++;
+ part_to_disk(part)->part0.in_flight[rw]++;
}
-static inline void part_dec_in_flight(struct hd_struct *part)
+static inline void part_dec_in_flight(struct hd_struct *part, int rw)
{
- part->in_flight--;
+ part->in_flight[rw]--;
if (part->partno)
- part_to_disk(part)->part0.in_flight--;
+ part_to_disk(part)->part0.in_flight[rw]--;
+}
+
+static inline int part_in_flight(struct hd_struct *part)
+{
+ return part->in_flight[0] + part->in_flight[1];
}
/* block/blk-core.c */
@@ -546,6 +551,8 @@ extern ssize_t part_size_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t part_stat_show(struct device *dev,
struct device_attribute *attr, char *buf);
+extern ssize_t part_inflight_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
#ifdef CONFIG_FAIL_MAKE_REQUEST
extern ssize_t part_fail_show(struct device *dev,
struct device_attribute *attr, char *buf);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d3cd23f30039..f4e3184fa054 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -659,6 +659,12 @@ extern int do_sysinfo(struct sysinfo *info);
#endif /* __KERNEL__ */
+#ifndef __EXPORTED_HEADERS__
+#ifndef __KERNEL__
+#warning Attempt to use kernel headers from user space, see http://kernelnewbies.org/KernelHeaders
+#endif /* __KERNEL__ */
+#endif /* __EXPORTED_HEADERS__ */
+
#define SI_LOAD_SHIFT 16
struct sysinfo {
long uptime; /* Seconds since boot */
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 3815ac1d58b2..9af56723c096 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -142,6 +142,11 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
#ifdef CONFIG_LOCK_STAT
static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
+static inline u64 lockstat_clock(void)
+{
+ return cpu_clock(smp_processor_id());
+}
+
static int lock_point(unsigned long points[], unsigned long ip)
{
int i;
@@ -158,7 +163,7 @@ static int lock_point(unsigned long points[], unsigned long ip)
return i;
}
-static void lock_time_inc(struct lock_time *lt, s64 time)
+static void lock_time_inc(struct lock_time *lt, u64 time)
{
if (time > lt->max)
lt->max = time;
@@ -234,12 +239,12 @@ static void put_lock_stats(struct lock_class_stats *stats)
static void lock_release_holdtime(struct held_lock *hlock)
{
struct lock_class_stats *stats;
- s64 holdtime;
+ u64 holdtime;
if (!lock_stat)
return;
- holdtime = sched_clock() - hlock->holdtime_stamp;
+ holdtime = lockstat_clock() - hlock->holdtime_stamp;
stats = get_lock_stats(hlock_class(hlock));
if (hlock->read)
@@ -2792,7 +2797,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
hlock->references = references;
#ifdef CONFIG_LOCK_STAT
hlock->waittime_stamp = 0;
- hlock->holdtime_stamp = sched_clock();
+ hlock->holdtime_stamp = lockstat_clock();
#endif
if (check == 2 && !mark_irqflags(curr, hlock))
@@ -3322,7 +3327,7 @@ found_it:
if (hlock->instance != lock)
return;
- hlock->waittime_stamp = sched_clock();
+ hlock->waittime_stamp = lockstat_clock();
contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
contending_point = lock_point(hlock_class(hlock)->contending_point,
@@ -3345,8 +3350,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
struct held_lock *hlock, *prev_hlock;
struct lock_class_stats *stats;
unsigned int depth;
- u64 now;
- s64 waittime = 0;
+ u64 now, waittime = 0;
int i, cpu;
depth = curr->lockdep_depth;
@@ -3374,7 +3378,7 @@ found_it:
cpu = smp_processor_id();
if (hlock->waittime_stamp) {
- now = sched_clock();
+ now = lockstat_clock();
waittime = now - hlock->waittime_stamp;
hlock->holdtime_stamp = now;
}
diff --git a/kernel/sched.c b/kernel/sched.c
index 76c0e9691fc0..e88689522e66 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -676,6 +676,7 @@ inline void update_rq_clock(struct rq *rq)
/**
* runqueue_is_locked
+ * @cpu: the processor in question.
*
* Returns true if the current cpu runqueue is locked.
* This interface allows printk to be called with the runqueue lock
@@ -2311,7 +2312,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
{
int cpu, orig_cpu, this_cpu, success = 0;
unsigned long flags;
- struct rq *rq;
+ struct rq *rq, *orig_rq;
if (!sched_feat(SYNC_WAKEUPS))
wake_flags &= ~WF_SYNC;
@@ -2319,7 +2320,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
this_cpu = get_cpu();
smp_wmb();
- rq = task_rq_lock(p, &flags);
+ rq = orig_rq = task_rq_lock(p, &flags);
update_rq_clock(rq);
if (!(p->state & state))
goto out;
@@ -2350,6 +2351,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
set_task_cpu(p, cpu);
rq = task_rq_lock(p, &flags);
+
+ if (rq != orig_rq)
+ update_rq_clock(rq);
+
WARN_ON(p->state != TASK_WAKING);
cpu = task_cpu(p);
@@ -3656,6 +3661,7 @@ static void update_group_power(struct sched_domain *sd, int cpu)
/**
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
+ * @sd: The sched_domain whose statistics are to be updated.
* @group: sched_group whose statistics are to be updated.
* @this_cpu: Cpu for which load balance is currently performed.
* @idle: Idle status of this_cpu
@@ -6718,9 +6724,6 @@ EXPORT_SYMBOL(yield);
/*
* This task is about to go to sleep on IO. Increment rq->nr_iowait so
* that process accounting knows that this is a task in IO wait state.
- *
- * But don't do that if it is a deliberate, throttling IO wait (this task
- * has set its backing_dev_info: the queue against which it should throttle)
*/
void __sched io_schedule(void)
{
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 45068269ebb1..c820b0310a12 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1393,7 +1393,7 @@ int trace_array_vprintk(struct trace_array *tr,
int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
{
- return trace_array_printk(&global_trace, ip, fmt, args);
+ return trace_array_vprintk(&global_trace, ip, fmt, args);
}
EXPORT_SYMBOL_GPL(trace_vprintk);
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 23245785927f..98a6cc5c64ed 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -933,8 +933,9 @@ static void postfix_clear(struct filter_parse_state *ps)
while (!list_empty(&ps->postfix)) {
elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
- kfree(elt->operand);
list_del(&elt->list);
+ kfree(elt->operand);
+ kfree(elt);
}
}
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 3d3accb1f800..5a37e2055717 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -92,7 +92,7 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
"BdiDirtyThresh: %8lu kB\n"
"DirtyThresh: %8lu kB\n"
"BackgroundThresh: %8lu kB\n"
- "WriteBack threads:%8lu\n"
+ "WritebackThreads: %8lu\n"
"b_dirty: %8lu\n"
"b_io: %8lu\n"
"b_more_io: %8lu\n"
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index a3b14090b1fb..2c5d79236ead 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -566,7 +566,8 @@ static void balance_dirty_pages(struct address_space *mapping,
if (pages_written >= write_chunk)
break; /* We've done our duty */
- schedule_timeout_interruptible(pause);
+ __set_current_state(TASK_INTERRUPTIBLE);
+ io_schedule_timeout(pause);
/*
* Increase the delay for each loop, up to our previous
diff --git a/mm/percpu.c b/mm/percpu.c
index 4a048abad043..6af78c1ee704 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1870,13 +1870,14 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
max_distance = 0;
for (group = 0; group < ai->nr_groups; group++) {
ai->groups[group].base_offset = areas[group] - base;
- max_distance = max(max_distance, ai->groups[group].base_offset);
+ max_distance = max_t(size_t, max_distance,
+ ai->groups[group].base_offset);
}
max_distance += ai->unit_size;
/* warn if maximum distance is further than 75% of vmalloc space */
if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
- pr_warning("PERCPU: max_distance=0x%lx too large for vmalloc "
+ pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
"space 0x%lx\n",
max_distance, VMALLOC_END - VMALLOC_START);
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 4f9c1908593b..c67e73ecd5be 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -100,7 +100,7 @@ as-option = $(call try-run,\
# Usage: cflags-y += $(call as-instr,instr,option1,option2)
as-instr = $(call try-run,\
- echo -e "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" -,$(2),$(3))
+ /bin/echo -e "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" -,$(2),$(3))
# cc-option
# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 7a7778746ea6..ffdafb26f539 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -208,7 +208,7 @@ cmd_gzip = (cat $(filter-out FORCE,$^) | gzip -f -9 > $@) || \
# Bzip2 and LZMA do not include size in file... so we have to fake that;
# append the size as a 32-bit littleendian number as gzip does.
-size_append = echo -ne $(shell \
+size_append = /bin/echo -ne $(shell \
dec_size=0; \
for F in $1; do \
fsize=$$(stat -c "%s" $$F); \
diff --git a/scripts/checkkconfigsymbols.sh b/scripts/checkkconfigsymbols.sh
index 39677c82747a..46be3c5a62b7 100755
--- a/scripts/checkkconfigsymbols.sh
+++ b/scripts/checkkconfigsymbols.sh
@@ -9,7 +9,7 @@ paths="$@"
# Doing this once at the beginning saves a lot of time, on a cache-hot tree.
Kconfigs="`find . -name 'Kconfig' -o -name 'Kconfig*[^~]'`"
-echo -e "File list \tundefined symbol used"
+/bin/echo -e "File list \tundefined symbol used"
find $paths -name '*.[chS]' -o -name 'Makefile' -o -name 'Makefile*[^~]'| while read i
do
# Output the bare Kconfig variable and the filename; the _MODULE part at
@@ -54,6 +54,6 @@ while read symb files; do
# beyond the purpose of this script.
symb_bare=`echo $symb | sed -e 's/_MODULE//'`
if ! grep -q "\<$symb_bare\>" $Kconfigs; then
- echo -e "$files: \t$symb"
+ /bin/echo -e "$files: \t$symb"
fi
done|sort
diff --git a/scripts/headers_install.pl b/scripts/headers_install.pl
index c6ae4052ab43..b89ca2c58fdb 100644
--- a/scripts/headers_install.pl
+++ b/scripts/headers_install.pl
@@ -20,7 +20,7 @@ use strict;
my ($readdir, $installdir, $arch, @files) = @ARGV;
-my $unifdef = "scripts/unifdef -U__KERNEL__";
+my $unifdef = "scripts/unifdef -U__KERNEL__ -D__EXPORTED_HEADERS__";
foreach my $file (@files) {
local *INFILE;
diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
index 6a12dd9f1181..bce3d0fe6fbd 100755
--- a/scripts/mkcompile_h
+++ b/scripts/mkcompile_h
@@ -1,3 +1,5 @@
+#!/bin/sh
+
TARGET=$1
ARCH=$2
SMP=$3
@@ -50,7 +52,7 @@ UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
# Truncate to maximum length
UTS_LEN=64
-UTS_TRUNCATE="sed -e s/\(.\{1,$UTS_LEN\}\).*/\1/"
+UTS_TRUNCATE="cut -b -$UTS_LEN"
# Generate a temporary compile.h
@@ -66,9 +68,13 @@ UTS_TRUNCATE="sed -e s/\(.\{1,$UTS_LEN\}\).*/\1/"
echo \#define LINUX_COMPILE_HOST \"`hostname | $UTS_TRUNCATE`\"
if [ -x /bin/dnsdomainname ]; then
- echo \#define LINUX_COMPILE_DOMAIN \"`dnsdomainname | $UTS_TRUNCATE`\"
+ domain=`dnsdomainname 2> /dev/null`
elif [ -x /bin/domainname ]; then
- echo \#define LINUX_COMPILE_DOMAIN \"`domainname | $UTS_TRUNCATE`\"
+ domain=`domainname 2> /dev/null`
+ fi
+
+ if [ -n "$domain" ]; then
+ echo \#define LINUX_COMPILE_DOMAIN \"`echo $domain | $UTS_TRUNCATE`\"
else
echo \#define LINUX_COMPILE_DOMAIN
fi
diff --git a/scripts/package/Makefile b/scripts/package/Makefile
index fa4a0a17b7e0..f67cc885c807 100644
--- a/scripts/package/Makefile
+++ b/scripts/package/Makefile
@@ -18,6 +18,9 @@
# e) generate the rpm files, based on kernel.spec
# - Use /. to avoid tar packing just the symlink
+# Note that the rpm-pkg target cannot be used with KBUILD_OUTPUT,
+# but the binrpm-pkg target can; for some reason O= gets ignored.
+
# Do we have rpmbuild, otherwise fall back to the older rpm
RPM := $(shell if [ -x "/usr/bin/rpmbuild" ]; then echo rpmbuild; \
else echo rpm; fi)
@@ -33,6 +36,12 @@ $(objtree)/kernel.spec: $(MKSPEC) $(srctree)/Makefile
$(CONFIG_SHELL) $(MKSPEC) > $@
rpm-pkg rpm: $(objtree)/kernel.spec FORCE
+ @if test -n "$(KBUILD_OUTPUT)"; then \
+ echo "Building source + binary RPM is not possible outside the"; \
+ echo "kernel source tree. Don't set KBUILD_OUTPUT, or use the"; \
+ echo "binrpm-pkg target instead."; \
+ false; \
+ fi
$(MAKE) clean
$(PREV) ln -sf $(srctree) $(KERNELPATH)
$(CONFIG_SHELL) $(srctree)/scripts/setlocalversion > $(objtree)/.scmversion
@@ -61,7 +70,7 @@ binrpm-pkg: $(objtree)/binkernel.spec FORCE
set -e; \
mv -f $(objtree)/.tmp_version $(objtree)/.version
- $(RPM) $(RPMOPTS) --define "_builddir $(srctree)" --target \
+ $(RPM) $(RPMOPTS) --define "_builddir $(objtree)" --target \
$(UTS_MACHINE) -bb $<
clean-files += $(objtree)/binkernel.spec
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index 3d93f8c81252..47bdd2f99b78 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -70,7 +70,7 @@ echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib/modules'
echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
echo "%endif"
-echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{_smp_mflags} modules_install'
+echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{_smp_mflags} KBUILD_SRC= modules_install'
echo "%ifarch ia64"
echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index dc78272fc39f..1f0f8213e2d5 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -937,6 +937,7 @@ static int __devinit aaci_probe_ac97(struct aaci *aaci)
struct snd_ac97 *ac97;
int ret;
+ writel(0, aaci->base + AC97_POWERDOWN);
/*
* Assert AACIRESET for 2us
*/
diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
index 24585c6c6d01..4e2b925a94cc 100644
--- a/sound/pci/bt87x.c
+++ b/sound/pci/bt87x.c
@@ -808,6 +808,8 @@ static struct pci_device_id snd_bt87x_ids[] = {
BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x1002, 0x0001, GENERIC),
/* Leadtek Winfast tv 2000xp delux */
BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x107d, 0x6606, GENERIC),
+ /* Pinnacle PCTV */
+ BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x11bd, 0x0012, GENERIC),
/* Voodoo TV 200 */
BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x121a, 0x3000, GENERIC),
/* Askey Computer Corp. MagicTView'99 */
diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c
index c8435c9a97f9..9fb60276f5c9 100644
--- a/sound/pci/hda/patch_nvhdmi.c
+++ b/sound/pci/hda/patch_nvhdmi.c
@@ -29,6 +29,9 @@
#include "hda_codec.h"
#include "hda_local.h"
+/* define below to restrict the supported rates and formats */
+/* #define LIMITED_RATE_FMT_SUPPORT */
+
struct nvhdmi_spec {
struct hda_multi_out multiout;
@@ -60,6 +63,22 @@ static struct hda_verb nvhdmi_basic_init[] = {
{} /* terminator */
};
+#ifdef LIMITED_RATE_FMT_SUPPORT
+/* support only the safe format and rate */
+#define SUPPORTED_RATES SNDRV_PCM_RATE_48000
+#define SUPPORTED_MAXBPS 16
+#define SUPPORTED_FORMATS SNDRV_PCM_FMTBIT_S16_LE
+#else
+/* support all rates and formats */
+#define SUPPORTED_RATES \
+ (SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
+ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |\
+ SNDRV_PCM_RATE_192000)
+#define SUPPORTED_MAXBPS 24
+#define SUPPORTED_FORMATS \
+ (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
+#endif
+
/*
* Controls
*/
@@ -258,9 +277,9 @@ static struct hda_pcm_stream nvhdmi_pcm_digital_playback_8ch = {
.channels_min = 2,
.channels_max = 8,
.nid = Nv_Master_Convert_nid,
- .rates = SNDRV_PCM_RATE_48000,
- .maxbps = 16,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = SUPPORTED_RATES,
+ .maxbps = SUPPORTED_MAXBPS,
+ .formats = SUPPORTED_FORMATS,
.ops = {
.open = nvhdmi_dig_playback_pcm_open,
.close = nvhdmi_dig_playback_pcm_close_8ch,
@@ -273,9 +292,9 @@ static struct hda_pcm_stream nvhdmi_pcm_digital_playback_2ch = {
.channels_min = 2,
.channels_max = 2,
.nid = Nv_Master_Convert_nid,
- .rates = SNDRV_PCM_RATE_48000,
- .maxbps = 16,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = SUPPORTED_RATES,
+ .maxbps = SUPPORTED_MAXBPS,
+ .formats = SUPPORTED_FORMATS,
.ops = {
.open = nvhdmi_dig_playback_pcm_open,
.close = nvhdmi_dig_playback_pcm_close_2ch,
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 470fd74a0a1a..c08ca660daba 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -275,7 +275,7 @@ struct alc_spec {
struct snd_kcontrol_new *cap_mixer; /* capture mixer */
unsigned int beep_amp; /* beep amp value, set via set_beep_amp() */
- const struct hda_verb *init_verbs[5]; /* initialization verbs
+ const struct hda_verb *init_verbs[10]; /* initialization verbs
* don't forget NULL
* termination!
*/
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index a9b26828a651..66c0876bf734 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -158,6 +158,7 @@ enum {
STAC_D965_5ST_NO_FP,
STAC_DELL_3ST,
STAC_DELL_BIOS,
+ STAC_927X_VOLKNOB,
STAC_927X_MODELS
};
@@ -907,6 +908,16 @@ static struct hda_verb d965_core_init[] = {
{}
};
+static struct hda_verb dell_3st_core_init[] = {
+ /* don't set delta bit */
+ {0x24, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0x7f},
+ /* unmute node 0x1b */
+ {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000},
+ /* select node 0x03 as DAC */
+ {0x0b, AC_VERB_SET_CONNECT_SEL, 0x01},
+ {}
+};
+
static struct hda_verb stac927x_core_init[] = {
/* set master volume and direct control */
{ 0x24, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff},
@@ -915,6 +926,14 @@ static struct hda_verb stac927x_core_init[] = {
{}
};
+static struct hda_verb stac927x_volknob_core_init[] = {
+ /* don't set delta bit */
+ {0x24, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0x7f},
+ /* enable analog pc beep path */
+ {0x01, AC_VERB_SET_DIGI_CONVERT_2, 1 << 5},
+ {}
+};
+
static struct hda_verb stac9205_core_init[] = {
/* set master volume and direct control */
{ 0x24, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff},
@@ -1999,6 +2018,7 @@ static unsigned int *stac927x_brd_tbl[STAC_927X_MODELS] = {
[STAC_D965_5ST_NO_FP] = d965_5st_no_fp_pin_configs,
[STAC_DELL_3ST] = dell_3st_pin_configs,
[STAC_DELL_BIOS] = NULL,
+ [STAC_927X_VOLKNOB] = NULL,
};
static const char *stac927x_models[STAC_927X_MODELS] = {
@@ -2010,6 +2030,7 @@ static const char *stac927x_models[STAC_927X_MODELS] = {
[STAC_D965_5ST_NO_FP] = "5stack-no-fp",
[STAC_DELL_3ST] = "dell-3stack",
[STAC_DELL_BIOS] = "dell-bios",
+ [STAC_927X_VOLKNOB] = "volknob",
};
static struct snd_pci_quirk stac927x_cfg_tbl[] = {
@@ -2045,6 +2066,8 @@ static struct snd_pci_quirk stac927x_cfg_tbl[] = {
"Intel D965", STAC_D965_5ST),
SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_INTEL, 0xff00, 0x2500,
"Intel D965", STAC_D965_5ST),
+ /* volume-knob fixes */
+ SND_PCI_QUIRK_VENDOR(0x10cf, "FSC", STAC_927X_VOLKNOB),
{} /* terminator */
};
@@ -5612,10 +5635,14 @@ static int patch_stac927x(struct hda_codec *codec)
spec->dmic_nids = stac927x_dmic_nids;
spec->num_dmics = STAC927X_NUM_DMICS;
- spec->init = d965_core_init;
+ spec->init = dell_3st_core_init;
spec->dmux_nids = stac927x_dmux_nids;
spec->num_dmuxes = ARRAY_SIZE(stac927x_dmux_nids);
break;
+ case STAC_927X_VOLKNOB:
+ spec->num_dmics = 0;
+ spec->init = stac927x_volknob_core_init;
+ break;
default:
spec->num_dmics = 0;
spec->init = stac927x_core_init;
diff --git a/sound/pci/ice1712/amp.c b/sound/pci/ice1712/amp.c
index 37564300b50d..6da21a2bcade 100644
--- a/sound/pci/ice1712/amp.c
+++ b/sound/pci/ice1712/amp.c
@@ -52,11 +52,13 @@ static int __devinit snd_vt1724_amp_init(struct snd_ice1712 *ice)
/* only use basic functionality for now */
- ice->num_total_dacs = 2; /* only PSDOUT0 is connected */
+ /* VT1616 6ch codec connected to PSDOUT0 using packed mode */
+ ice->num_total_dacs = 6;
ice->num_total_adcs = 2;
- /* Chaintech AV-710 has another codecs, which need initialization */
- /* initialize WM8728 codec */
+ /* Chaintech AV-710 has another WM8728 codec connected to PSDOUT4
+ (shared with the SPDIF output). Mixer control for this codec
+ is not yet supported. */
if (ice->eeprom.subvendor == VT1724_SUBDEVICE_AV710) {
for (i = 0; i < ARRAY_SIZE(wm_inits); i += 2)
wm_put(ice, wm_inits[i], wm_inits[i+1]);
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index 76b717dae4b6..10fc92c05574 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -648,7 +648,7 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
(inb(ICEMT1724(ice, DMA_PAUSE)) & DMA_PAUSES)) {
/* running? we cannot change the rate now... */
spin_unlock_irqrestore(&ice->reg_lock, flags);
- return -EBUSY;
+ return ((rate == ice->cur_rate) && !force) ? 0 : -EBUSY;
}
if (!force && is_pro_rate_locked(ice)) {
spin_unlock_irqrestore(&ice->reg_lock, flags);
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 5881943f0c34..742a32eee8fc 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -157,11 +157,18 @@ uname_R := $(shell sh -c 'uname -r 2>/dev/null || echo not')
uname_P := $(shell sh -c 'uname -p 2>/dev/null || echo not')
uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not')
-# If we're on a 64-bit kernel, use -m64
-ifndef NO_64BIT
- ifneq ($(patsubst %64,%,$(uname_M)),$(uname_M))
- M64 := -m64
- endif
+#
+# Add -m32 for cross-builds:
+#
+ifdef NO_64BIT
+ MBITS := -m32
+else
+ #
+ # If we're on a 64-bit kernel, use -m64:
+ #
+ ifneq ($(patsubst %64,%,$(uname_M)),$(uname_M))
+ MBITS := -m64
+ endif
endif
# CFLAGS and LDFLAGS are for the users to override from the command line.
@@ -194,7 +201,7 @@ EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wold-style-definition
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-prototypes
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wdeclaration-after-statement
-CFLAGS = $(M64) -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -fstack-protector-all -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS)
+CFLAGS = $(MBITS) -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -fstack-protector-all -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS)
LDFLAGS = -lpthread -lrt -lelf -lm
ALL_CFLAGS = $(CFLAGS)
ALL_LDFLAGS = $(LDFLAGS)
@@ -416,7 +423,7 @@ ifeq ($(uname_S),Darwin)
endif
ifneq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y)
- msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel);
+ msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]);
endif
ifdef NO_DEMANGLE
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index ea9c15c0cdfe..ce2d5be4f30e 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1287,7 +1287,7 @@ static struct sort_dimension *available_sorts[] = {
static LIST_HEAD(sort_list);
-static int sort_dimension__add(char *tok, struct list_head *list)
+static int sort_dimension__add(const char *tok, struct list_head *list)
{
int i;
@@ -1917,7 +1917,7 @@ static void setup_sorting(void)
free(str);
- sort_dimension__add((char *)"pid", &cmp_pid);
+ sort_dimension__add("pid", &cmp_pid);
}
static const char *record_args[] = {
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 87c424de79ee..8cfb48cbbea0 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -691,7 +691,10 @@ static void store_event_type(const char *orgname)
FILE *file;
int id;
- sprintf(filename, "/sys/kernel/debug/tracing/events/%s/id", orgname);
+ sprintf(filename, "%s/", debugfs_path);
+ strncat(filename, orgname, strlen(orgname));
+ strcat(filename, "/id");
+
c = strchr(filename, ':');
if (c)
*c = '/';
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 55b41b9e3834..55c9659a56e2 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -618,7 +618,7 @@ static int test_type(enum event_type type, enum event_type expect)
}
static int test_type_token(enum event_type type, char *token,
- enum event_type expect, char *expect_tok)
+ enum event_type expect, const char *expect_tok)
{
if (type != expect) {
die("Error: expected type %d but read %d",
@@ -650,7 +650,7 @@ static int read_expect_type(enum event_type expect, char **tok)
return __read_expect_type(expect, tok, 1);
}
-static int __read_expected(enum event_type expect, char *str, int newline_ok)
+static int __read_expected(enum event_type expect, const char *str, int newline_ok)
{
enum event_type type;
char *token;
@@ -668,12 +668,12 @@ static int __read_expected(enum event_type expect, char *str, int newline_ok)
return 0;
}
-static int read_expected(enum event_type expect, char *str)
+static int read_expected(enum event_type expect, const char *str)
{
return __read_expected(expect, str, 1);
}
-static int read_expected_item(enum event_type expect, char *str)
+static int read_expected_item(enum event_type expect, const char *str)
{
return __read_expected(expect, str, 0);
}